Merge tag 'drm/tegra/for-4.9-rc1' of git://anongit.freedesktop.org/tegra/linux into...
authorDave Airlie <airlied@redhat.com>
Fri, 30 Sep 2016 03:00:36 +0000 (13:00 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 30 Sep 2016 03:00:36 +0000 (13:00 +1000)
drm/tegra: Changes for v4.9-rc1

One bugfix that avoids overwriting the Y plane base address when
displaying buffers with one of the YUV/YVU formats.

* tag 'drm/tegra/for-4.9-rc1' of git://anongit.freedesktop.org/tegra/linux:
  drm/tegra: Fix window[0] base address corruption

2114 files changed:
.mailmap
Documentation/ABI/stable/sysfs-devices
Documentation/PCI/MSI-HOWTO.txt
Documentation/PCI/pci.txt
Documentation/arm/CCN.txt
Documentation/arm64/silicon-errata.txt
Documentation/block/queue-sysfs.txt
Documentation/conf.py
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/devicetree/bindings/display/bridge/tda998x.txt
Documentation/devicetree/bindings/display/msm/hdmi.txt
Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
Documentation/devicetree/bindings/mmc/sdhci-st.txt
Documentation/devicetree/bindings/serial/8250.txt
Documentation/devicetree/bindings/sound/omap-mcpdm.txt
Documentation/devicetree/bindings/thermal/thermal.txt
Documentation/filesystems/overlayfs.txt
Documentation/gpu/drm-internals.rst
Documentation/gpu/drm-kms-helpers.rst
Documentation/gpu/drm-kms.rst
Documentation/gpu/drm-mm.rst
Documentation/gpu/drm-uapi.rst
Documentation/gpu/i915.rst
Documentation/gpu/index.rst
Documentation/gpu/kms-properties.csv
Documentation/gpu/vgaarbiter.rst [new file with mode: 0644]
Documentation/hwmon/ftsteutates
Documentation/i2c/slave-interface
Documentation/kernel-documentation.rst
Documentation/kernel-parameters.txt
Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
Documentation/media/uapi/cec/cec-ioc-dqevent.rst
Documentation/networking/dsa/dsa.txt
Documentation/networking/rxrpc.txt
Documentation/power/basic-pm-debugging.txt
Documentation/power/interface.txt
Documentation/powerpc/transactional_memory.txt
Documentation/rapidio/mport_cdev.txt
Documentation/sphinx-static/theme_overrides.css
Documentation/sync_file.txt
Documentation/vgaarbiter.txt [deleted file]
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/uaccess.h
arch/arc/include/asm/entry.h
arch/arc/include/asm/irqflags-compact.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/uaccess.h
arch/arc/include/uapi/asm/elf.h
arch/arc/kernel/arcksyms.c
arch/arc/kernel/process.c
arch/arc/kernel/setup.c
arch/arc/mm/cache.c
arch/arc/mm/highmem.c
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/dts/am335x-baltos.dtsi
arch/arm/boot/dts/am335x-boneblack.dts
arch/arm/boot/dts/am335x-igep0033.dtsi
arch/arm/boot/dts/am335x-phycore-som.dtsi
arch/arm/boot/dts/arm-realview-pbx-a9.dts
arch/arm/boot/dts/armada-388-clearfog.dts
arch/arm/boot/dts/bcm2835-rpi.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/exynos5410-odroidxu.dts
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sx-sabreauto.dts
arch/arm/boot/dts/imx7d-sdb.dts
arch/arm/boot/dts/integratorap.dts
arch/arm/boot/dts/integratorcp.dts
arch/arm/boot/dts/keystone.dtsi
arch/arm/boot/dts/kirkwood-ib62x0.dts
arch/arm/boot/dts/kirkwood-openrd.dtsi
arch/arm/boot/dts/logicpd-som-lv.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/omap3-overo-base.dtsi
arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
arch/arm/boot/dts/rk3066a.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/rk3xxx.dtsi
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/boot/dts/stih410.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/tegra114-dalmore.dts
arch/arm/boot/dts/tegra114-roth.dts
arch/arm/boot/dts/tegra114-tn7.dts
arch/arm/common/locomo.c
arch/arm/common/sa1111.c
arch/arm/configs/aspeed_g4_defconfig
arch/arm/configs/aspeed_g5_defconfig
arch/arm/configs/keystone_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/crypto/aes-ce-glue.c
arch/arm/include/asm/pgtable-2level-hwdef.h
arch/arm/include/asm/pgtable-3level-hwdef.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/hyp-stub.S
arch/arm/kernel/sys_oabi-compat.c
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/mach-clps711x/Kconfig
arch/arm/mach-exynos/suspend.c
arch/arm/mach-imx/gpc.c
arch/arm/mach-imx/mach-imx6ul.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-mvebu/Makefile
arch/arm/mach-omap2/cm33xx.c
arch/arm/mach-omap2/cminst44xx.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-oxnas/Kconfig
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/idp.c
arch/arm/mach-pxa/lubbock.c
arch/arm/mach-pxa/spitz.c
arch/arm/mach-pxa/xcep.c
arch/arm/mach-realview/Makefile
arch/arm/mach-realview/core.c
arch/arm/mach-s5pv210/Makefile
arch/arm/mach-sa1100/clock.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/generic.h
arch/arm/mach-sa1100/pleb.c
arch/arm/mach-shmobile/platsmp.c
arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
arch/arm64/boot/dts/broadcom/bcm2837.dtsi
arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/bcm283x.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/ns2.dtsi
arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
arch/arm64/boot/dts/exynos/exynos7-espresso.dts
arch/arm64/boot/dts/exynos/exynos7.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
arch/arm64/boot/dts/marvell/armada-ap806.dtsi
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
arch/arm64/boot/dts/xilinx/zynqmp.dtsi
arch/arm64/configs/defconfig
arch/arm64/crypto/aes-glue.c
arch/arm64/include/asm/debug-monitors.h
arch/arm64/include/asm/kprobes.h
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/spinlock.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/sleep.S
arch/arm64/kernel/smp.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/dump.c
arch/arm64/mm/fault.c
arch/arm64/mm/numa.c
arch/arm64/mm/proc.S
arch/avr32/include/asm/uaccess.h
arch/avr32/kernel/avr32_ksyms.c
arch/avr32/lib/copy_user.S
arch/blackfin/include/asm/uaccess.h
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/ezkit.c
arch/cris/include/asm/uaccess.h
arch/frv/include/asm/uaccess.h
arch/h8300/include/asm/io.h
arch/hexagon/include/asm/uaccess.h
arch/ia64/Kconfig
arch/ia64/include/asm/uaccess.h
arch/m32r/include/asm/uaccess.h
arch/m68k/kernel/signal.c
arch/metag/include/asm/uaccess.h
arch/metag/mm/init.c
arch/microblaze/include/asm/uaccess.h
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/ath79/clock.c
arch/mips/cavium-octeon/octeon-platform.c
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
arch/mips/include/asm/page.h
arch/mips/include/asm/uaccess.h
arch/mips/kernel/mips-r2-to-r6-emul.c
arch/mips/kernel/process.c
arch/mips/kernel/setup.c
arch/mips/kernel/smp.c
arch/mips/kernel/uprobes.c
arch/mips/kernel/vdso.c
arch/mips/kvm/emulate.c
arch/mips/kvm/mmu.c
arch/mips/math-emu/dsemul.c
arch/mips/mm/c-r4k.c
arch/mips/mm/init.c
arch/mn10300/include/asm/uaccess.h
arch/mn10300/lib/usercopy.c
arch/nios2/include/asm/uaccess.h
arch/openrisc/include/asm/uaccess.h
arch/parisc/Kconfig
arch/parisc/configs/c8000_defconfig
arch/parisc/configs/generic-64bit_defconfig
arch/parisc/include/asm/uaccess.h
arch/parisc/include/uapi/asm/errno.h
arch/parisc/kernel/processor.c
arch/parisc/kernel/time.c
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/crypto/crc32c-vpmsum_glue.c
arch/powerpc/include/asm/cpu_has_feature.h
arch/powerpc/include/asm/cpuidle.h
arch/powerpc/include/asm/cputhreads.h
arch/powerpc/include/asm/feature-fixups.h
arch/powerpc/include/asm/hmi.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/include/asm/xics.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/hmi.c [deleted file]
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/vdso32/Makefile
arch/powerpc/kernel/vdso64/Makefile
arch/powerpc/kvm/Makefile
arch/powerpc/kvm/book3s_hv_hmi.c [new file with mode: 0644]
arch/powerpc/kvm/book3s_xics.c
arch/powerpc/lib/checksum_32.S
arch/powerpc/lib/feature-fixups.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/slb_low.S
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/embedded6xx/holly.c
arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
arch/powerpc/platforms/pasemi/iommu.c
arch/powerpc/platforms/powernv/opal-dump.c
arch/powerpc/platforms/powernv/opal-elog.c
arch/powerpc/platforms/powernv/opal-irqchip.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/platforms/pseries/pci.c
arch/powerpc/platforms/pseries/pci_dlpar.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/cpm1.c
arch/powerpc/sysdev/cpm_common.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/xics/Kconfig
arch/powerpc/sysdev/xics/icp-opal.c
arch/powerpc/sysdev/xics/ics-opal.c
arch/powerpc/sysdev/xics/ics-rtas.c
arch/powerpc/sysdev/xics/xics-common.c
arch/s390/Kconfig
arch/s390/boot/compressed/head.S
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/crypto/crc32-vx.c
arch/s390/defconfig
arch/s390/include/asm/uaccess.h
arch/s390/kernel/head.S
arch/s390/kernel/setup.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c
arch/s390/lib/string.c
arch/s390/lib/uaccess.c
arch/s390/mm/pageattr.c
arch/score/include/asm/uaccess.h
arch/sh/include/asm/atomic-llsc.h
arch/sh/include/asm/uaccess.h
arch/sh/include/asm/uaccess_64.h
arch/sparc/Kconfig
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/tile/Kconfig
arch/tile/include/asm/uaccess.h
arch/um/include/asm/common.lds.S
arch/um/kernel/skas/syscall.c
arch/unicore32/include/asm/mmu_context.h
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/configs/tiny.config
arch/x86/crypto/sha256-mb/sha256_mb.c
arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
arch/x86/crypto/sha512-mb/sha512_mb.c
arch/x86/entry/Makefile
arch/x86/entry/entry_64.S
arch/x86/events/amd/core.c
arch/x86/events/amd/uncore.c
arch/x86/events/intel/bts.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cqm.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore_snb.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/init.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/realmode.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/uv/bios.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/irq.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kernel/uprobes.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/pmu_amd.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/hweight.S
arch/x86/lib/kaslr.c
arch/x86/mm/ident_map.c
arch/x86/mm/init.c
arch/x86/mm/kaslr.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/pci/fixup.c
arch/x86/pci/vmd.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
arch/x86/platform/uv/bios_uv.c
arch/x86/power/hibernate_64.c
arch/x86/realmode/init.c
arch/x86/um/ptrace_32.c
arch/x86/um/ptrace_64.c
arch/x86/xen/enlighten.c
block/bio.c
block/blk-core.c
block/blk-merge.c
block/blk-mq.c
block/blk-throttle.c
block/elevator.c
crypto/Kconfig
crypto/blkcipher.c
crypto/cryptd.c
crypto/echainiv.c
crypto/rsa-pkcs1pad.c
crypto/sha3_generic.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/mce.c
drivers/acpi/scan.c
drivers/ata/libahci.c
drivers/ata/pata_ninja32.c
drivers/base/power/runtime.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap.c
drivers/block/floppy.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/bus/arm-cci.c
drivers/bus/arm-ccn.c
drivers/bus/vexpress-config.c
drivers/char/agp/intel-gtt.c
drivers/char/hw_random/Kconfig
drivers/char/tpm/tpm2-cmd.c
drivers/char/virtio_console.c
drivers/clk/renesas/r8a7795-cpg-mssr.c
drivers/clk/rockchip/clk-rk3399.c
drivers/clk/sunxi-ng/ccu-sun8i-h3.c
drivers/clk/sunxi-ng/ccu_common.c
drivers/clk/sunxi-ng/ccu_nk.c
drivers/clk/sunxi/clk-a10-pll2.c
drivers/clk/sunxi/clk-sun8i-mbus.c
drivers/clk/tegra/clk-tegra114.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/bcm_kona_timer.c
drivers/clocksource/mips-gic-timer.c
drivers/clocksource/pxa_timer.c
drivers/clocksource/sun4i_timer.c
drivers/clocksource/time-armada-370-xp.c
drivers/clocksource/time-pistachio.c
drivers/clocksource/timer-atmel-pit.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/powernv-cpufreq.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/vmx/aes_xts.c
drivers/dax/dax.c
drivers/dax/pmem.c
drivers/dma-buf/dma-buf.c
drivers/dma-buf/fence-array.c
drivers/dma-buf/reservation.c
drivers/dma-buf/sync_file.c
drivers/dma/at_xdmac.c
drivers/dma/fsl_raid.c
drivers/dma/img-mdc-dma.c
drivers/dma/pxa_dma.c
drivers/dma/sh/usb-dmac.c
drivers/edac/Kconfig
drivers/edac/Makefile
drivers/edac/sb_edac.c
drivers/edac/skx_edac.c [new file with mode: 0644]
drivers/firmware/arm_scpi.c
drivers/firmware/dmi-id.c
drivers/firmware/efi/capsule-loader.c
drivers/firmware/efi/capsule.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/libstub/random.c
drivers/gpio/Kconfig
drivers/gpio/gpio-max730x.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-sa1100.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/Kconfig
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/ObjectID.h
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
drivers/gpu/drm/amd/amdgpu/atombios_dp.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cikd.h
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/cz_smc.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_v6_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
drivers/gpu/drm/amd/amdgpu/dce_virtual.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_virtual.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/fiji_dpm.c [deleted file]
drivers/gpu/drm/amd/amdgpu/fiji_smc.c [deleted file]
drivers/gpu/drm/amd/amdgpu/fiji_smum.h [deleted file]
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_dpm.c [deleted file]
drivers/gpu/drm/amd/amdgpu/iceland_smc.c [deleted file]
drivers/gpu/drm/amd/amdgpu/iceland_smum.h [deleted file]
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/r600_dpm.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/si.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_dma.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_dma.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_dpm.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_dpm.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_ih.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_ih.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/sislands_smc.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c [deleted file]
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
drivers/gpu/drm/amd/amdgpu/tonga_smc.c [deleted file]
drivers/gpu/drm/amd/amdgpu/tonga_smum.h [deleted file]
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdgpu/vid.h
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/si/sid.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
drivers/gpu/drm/amd/include/atombios.h
drivers/gpu/drm/amd/include/cgs_common.h [changed mode: 0644->0755]
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h [deleted file]
drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
drivers/gpu/drm/amd/powerplay/inc/power_state.h
drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
drivers/gpu/drm/amd/powerplay/inc/smu71.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/inc/smu7_common.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/Makefile
drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c [changed mode: 0644->0755]
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/arc/arcpgu_crtc.c
drivers/gpu/drm/arc/arcpgu_drv.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_drv.h
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/armada/armada_fbdev.c
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/ast/ast_fb.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bochs/bochs.h
drivers/gpu/drm/bochs/bochs_drv.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/adv7511/adv7533.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
drivers/gpu/drm/bridge/dw-hdmi.c
drivers/gpu/drm/bridge/parade-ps8622.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_fbdev.c
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_agpsupport.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_blend.c
drivers/gpu/drm/drm_bridge.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_color_mgmt.c [new file with mode: 0644]
drivers/gpu/drm/drm_connector.c [new file with mode: 0644]
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_crtc_helper_internal.h [new file with mode: 0644]
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_dma.c
drivers/gpu/drm/drm_dp_aux_dev.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_encoder.c [new file with mode: 0644]
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_fourcc.c
drivers/gpu/drm/drm_framebuffer.c [new file with mode: 0644]
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_global.c
drivers/gpu/drm/drm_hashtab.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_kms_helper_common.c
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_mipi_dsi.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_mode_object.c [new file with mode: 0644]
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_modeset_helper.c [new file with mode: 0644]
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_plane.c [new file with mode: 0644]
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/drm_property.c [new file with mode: 0644]
drivers/gpu/drm/drm_rect.c
drivers/gpu/drm/drm_scatter.c
drivers/gpu/drm/drm_simple_kms_helper.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_vma_manager.c
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_dump.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
drivers/gpu/drm/etnaviv/etnaviv_iommu.h
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h [deleted file]
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
drivers/gpu/drm/etnaviv/state_hi.xml.h
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
drivers/gpu/drm/fsl-dcu/fsl_tcon.c
drivers/gpu/drm/gma500/accel_2d.c
drivers/gpu/drm/gma500/cdv_intel_lvds.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/mdfld_dsi_output.c
drivers/gpu/drm/gma500/opregion.c
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/gma500/psb_intel_modes.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
drivers/gpu/drm/i2c/Kconfig
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_batch_pool.c
drivers/gpu/drm/i915/i915_gem_batch_pool.h
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_debug.c [deleted file]
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_render_state.h
drivers/gpu/drm/i915/i915_gem_request.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem_request.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_reg.h
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_memcpy.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_mm.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sw_fence.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_sw_fence.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_dpll_mgr.h
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_engine_cs.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_frontbuffer.c
drivers/gpu/drm/i915/intel_frontbuffer.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_loader.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_mocs.c
drivers/gpu/drm/i915/intel_mocs.h
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_renderstate.h
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-drm.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/mediatek/Kconfig
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
drivers/gpu/drm/mediatek/mtk_disp_rdma.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.h
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/mediatek/mtk_drm_plane.h
drivers/gpu/drm/mga/mga_drv.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/mgag200/mgag200_fb.c
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
drivers/gpu/drm/omapdrm/dss/dss-of.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-jdi-lt070me05000.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_draw.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_fb.c
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/r128/r128_drv.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_dp_auxch.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rockchip/Makefile
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_psr.c [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_psr.h [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/rockchip/rockchip_vop_reg.h
drivers/gpu/drm/savage/savage_drv.c
drivers/gpu/drm/savage/savage_state.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/sti/Kconfig
drivers/gpu/drm/sti/Makefile
drivers/gpu/drm/sti/sti_compositor.c
drivers/gpu/drm/sti/sti_compositor.h
drivers/gpu/drm/sti/sti_crtc.c
drivers/gpu/drm/sti/sti_cursor.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_gdp.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c [deleted file]
drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h [deleted file]
drivers/gpu/drm/sti/sti_hqvdp.c
drivers/gpu/drm/sti/sti_mixer.c
drivers/gpu/drm/sti/sti_tvout.c
drivers/gpu/drm/sti/sti_vid.c
drivers/gpu/drm/sti/sti_vtac.c
drivers/gpu/drm/sti/sti_vtg.c
drivers/gpu/drm/sun4i/Makefile
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/gpu/drm/sun4i/sun4i_backend.h
drivers/gpu/drm/sun4i/sun4i_dotclock.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_framebuffer.c
drivers/gpu/drm/sun4i/sun4i_layer.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tcon.h
drivers/gpu/drm/sun4i/sun4i_tv.c
drivers/gpu/drm/sun4i/sun6i_drc.c [new file with mode: 0644]
drivers/gpu/drm/tdfx/tdfx_drv.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tilcdc/Makefile
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_drv.h
drivers/gpu/drm/tilcdc/tilcdc_external.c
drivers/gpu/drm/tilcdc/tilcdc_external.h
drivers/gpu/drm/tilcdc/tilcdc_panel.c
drivers/gpu/drm/tilcdc/tilcdc_plane.c [new file with mode: 0644]
drivers/gpu/drm/tilcdc/tilcdc_regs.h
drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/udl/udl_modeset.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_dpi.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_irq.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_validate_shaders.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/virtio/virtgpu_drm_bus.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_kms.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/vmwgfx/Kconfig
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/ipu-v3/Makefile
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-cpmem.c
drivers/gpu/ipu-v3/ipu-csi.c
drivers/gpu/ipu-v3/ipu-dmfc.c
drivers/gpu/ipu-v3/ipu-ic.c
drivers/gpu/ipu-v3/ipu-image-convert.c [new file with mode: 0644]
drivers/gpu/ipu-v3/ipu-prv.h
drivers/gpu/ipu-v3/ipu-vdi.c [new file with mode: 0644]
drivers/gpu/vga/vgaarb.c
drivers/hwmon/it87.c
drivers/i2c/busses/i2c-at91.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/busses/i2c-bcm-kona.c
drivers/i2c/busses/i2c-brcmstb.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-cros-ec-tunnel.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-meson.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/muxes/i2c-demux-pinctrl.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/iio/accel/Kconfig
drivers/iio/accel/bma220_spi.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/accel/kxsd9.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ad799x.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/rockchip_saradc.c
drivers/iio/adc/ti-ads1015.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/chemical/atlas-ph-sensor.c
drivers/iio/common/hid-sensors/hid-sensor-attributes.c
drivers/iio/dac/stx104.c
drivers/iio/humidity/Kconfig
drivers/iio/humidity/am2315.c
drivers/iio/humidity/hdc100x.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/iio/light/Kconfig
drivers/iio/pressure/bmp280-core.c
drivers/iio/proximity/as3935.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/multicast.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/chip.h
drivers/infiniband/hw/hfi1/debugfs.c
drivers/infiniband/hw/hfi1/driver.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/hfi1/pio_copy.c
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/qsfp.c
drivers/infiniband/hw/hfi1/qsfp.h
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_hw.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mcg.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/sw/rdmavt/mr.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/rxe/rxe.c
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_net.h
drivers/infiniband/sw/rxe/rxe_recv.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/keyboard/tegra-kbc.c
drivers/input/rmi4/rmi_driver.c
drivers/input/serio/i8042.c
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/silead.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/dma-iommu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/mtk_iommu.h
drivers/irqchip/irq-atmel-aic.c
drivers/irqchip/irq-atmel-aic5.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mips-gic.c
drivers/macintosh/ams/ams-i2c.c
drivers/macintosh/windfarm_pm112.c
drivers/macintosh/windfarm_pm72.c
drivers/macintosh/windfarm_rm31.c
drivers/mailbox/Kconfig
drivers/mailbox/bcm-pdc-mailbox.c
drivers/md/bcache/super.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/dm-crypt.c
drivers/md/dm-flakey.c
drivers/md/dm-log-writes.c
drivers/md/dm-log.c
drivers/md/dm-raid.c
drivers/md/dm-round-robin.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/media/cec-edid.c
drivers/media/pci/cx23885/cx23885-417.c
drivers/media/pci/saa7134/saa7134-dvb.c
drivers/media/pci/saa7134/saa7134-empress.c
drivers/media/platform/Kconfig
drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
drivers/media/platform/rcar-fcp.c
drivers/memory/omap-gpmc.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/bh1780gli.c [deleted file]
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/native.c
drivers/misc/cxl/pci.c
drivers/misc/cxl/vphb.c
drivers/misc/lkdtm_rodata.c
drivers/misc/lkdtm_usercopy.c
drivers/misc/mei/hw-me.c
drivers/misc/mei/pci-me.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mmc/card/queue.h
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-st.c
drivers/mtd/nand/mtk_ecc.c
drivers/mtd/nand/mtk_nand.c
drivers/mtd/nand/mxc_nand.c
drivers/net/bonding/bond_main.c
drivers/net/can/flexcan.c
drivers/net/can/ifi_canfd/ifi_canfd.c
drivers/net/dsa/b53/b53_regs.h
drivers/net/dsa/bcm_sf2.h
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/alx/reg.h
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nic_reg.h
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_client.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlxsw/port.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/mellanox/mlxsw/trap.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/synopsys/dwc_eth_qos.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/Kconfig
drivers/net/phy/mdio-xgene.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/team/team_mode_loadbalance.c
drivers/net/tun.c
drivers/net/usb/kaweth.c
drivers/net/usb/r8152.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/marvell/mwifiex/11n_aggr.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/xen-netback/xenbus.c
drivers/nvdimm/btt.c
drivers/nvdimm/btt_devs.c
drivers/nvdimm/bus.c
drivers/nvdimm/nd.h
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/Kconfig
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/rdma.c
drivers/of/base.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/platform.c
drivers/pci/host-bridge.c
drivers/pci/msi.c
drivers/pci/quirks.c
drivers/pci/remove.c
drivers/pcmcia/ds.c
drivers/pcmcia/pxa2xx_base.c
drivers/pcmcia/pxa2xx_base.h
drivers/pcmcia/sa1111_badge4.c
drivers/pcmcia/sa1111_generic.c
drivers/pcmcia/sa1111_jornada720.c
drivers/pcmcia/sa1111_lubbock.c
drivers/pcmcia/sa1111_neponset.c
drivers/pcmcia/sa11xx_base.c
drivers/pcmcia/soc_common.c
drivers/perf/arm_pmu.c
drivers/phy/phy-brcm-sata.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-sun9i-usb.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-pistachio.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
drivers/platform/olpc/olpc-ec.c
drivers/platform/x86/dell-wmi.c
drivers/platform/x86/intel_pmic_gpio.c
drivers/power/max17042_battery.c
drivers/power/reset/Kconfig
drivers/power/reset/hisi-reboot.c
drivers/power/tps65217_charger.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/rio_cm.c
drivers/regulator/max14577-regulator.c
drivers/regulator/max77693-regulator.c
drivers/regulator/qcom_smd-regulator.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_eckd.c
drivers/s390/cio/device.c
drivers/s390/cio/device_status.c
drivers/s390/cio/io_sch.h
drivers/s390/cio/qdio_main.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/s390/virtio/Makefile
drivers/s390/virtio/kvm_virtio.c
drivers/scsi/aacraid/commctrl.c
drivers/scsi/constants.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/ipr.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/ses.c
drivers/scsi/wd719x.c
drivers/spi/spi-img-spfi.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-pxa2xx-pci.c
drivers/spi/spi-qup.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi.c
drivers/staging/android/sync_debug.c
drivers/staging/comedi/drivers/adv_pci1760.c
drivers/staging/comedi/drivers/comedi_test.c
drivers/staging/comedi/drivers/daqboard2000.c
drivers/staging/comedi/drivers/dt2811.c
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/staging/lustre/lustre/llite/namei.c
drivers/staging/media/cec/TODO
drivers/staging/media/cec/cec-adap.c
drivers/staging/media/cec/cec-api.c
drivers/staging/media/cec/cec-core.c
drivers/staging/media/pulse8-cec/pulse8-cec.c
drivers/staging/wilc1000/host_interface.c
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
drivers/thermal/clock_cooling.c
drivers/thermal/cpu_cooling.c
drivers/thermal/fair_share.c
drivers/thermal/gov_bang_bang.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/int3406_thermal.c
drivers/thermal/intel_pch_thermal.c
drivers/thermal/intel_powerclamp.c
drivers/thermal/power_allocator.c
drivers/thermal/rcar_thermal.c
drivers/thermal/step_wise.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_hwmon.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_fintek.c
drivers/tty/serial/8250/8250_mid.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/Kconfig
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/core/config.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/debug.h
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_eem.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/rndis.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/function/uvc_configfs.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/fsl_qe_udc.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/max3421-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/misc/ftdi-elan.c
drivers/usb/misc/usbtest.c
drivers/usb/musb/Kconfig
drivers/usb/musb/musb_virthub.c
drivers/usb/phy/phy-generic.c
drivers/usb/phy/phy-omap-otg.c
drivers/usb/renesas_usbhs/common.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/usb-serial-simple.c
drivers/usb/serial/usb-serial.c
drivers/vfio/pci/vfio_pci_intrs.c
drivers/vhost/scsi.c
drivers/vhost/test.c
drivers/vhost/vsock.c
drivers/virtio/virtio_ring.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/afs/cmservice.c
fs/afs/fsclient.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/afs/vlclient.c
fs/aio.c
fs/autofs4/expire.c
fs/binfmt_elf.c
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/ctree.h
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/qgroup.h
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/volumes.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/mds_client.c
fs/cifs/cifsfs.c
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/configfs/file.c
fs/crypto/policy.c
fs/devpts/inode.c
fs/dlm/debug_fs.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/node.c
fs/f2fs/super.c
fs/fs-writeback.c
fs/fuse/file.c
fs/ioctl.c
fs/iomap.c
fs/kernfs/file.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/blocklayout/blocklayout.h
fs/nfs/blocklayout/extent_tree.c
fs/nfs/callback.c
fs/nfs/callback_proc.c
fs/nfs/client.c
fs/nfs/file.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayout.h
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/internal.h
fs/nfs/nfs42proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4renewd.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/nfs4state.c
fs/nfs/pnfs.c
fs/nfs/super.c
fs/nfsd/nfs4state.c
fs/nfsd/vfs.c
fs/notify/fanotify/fanotify.c
fs/notify/fanotify/fanotify_user.c
fs/notify/group.c
fs/notify/notification.c
fs/ocfs2/alloc.c
fs/ocfs2/cluster/tcp_internal.h
fs/ocfs2/dlm/dlmconvert.c
fs/ocfs2/file.c
fs/ocfs2/suballoc.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/pipe.c
fs/proc/base.c
fs/proc/kcore.c
fs/proc/meminfo.c
fs/proc/task_mmu.c
fs/ramfs/file-mmu.c
fs/seq_file.c
fs/sysfs/file.c
fs/ubifs/tnc_commit.c
fs/ubifs/xattr.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_defer.h
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_rmap_btree.c
fs/xfs/libxfs/xfs_sb.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.h
fs/xfs/xfs_iops.c
fs/xfs/xfs_super.c
fs/xfs/xfs_trace.h
include/asm-generic/qrwlock.h
include/asm-generic/uaccess.h
include/drm/bridge/analogix_dp.h
include/drm/drmP.h
include/drm/drm_atomic.h
include/drm/drm_atomic_helper.h
include/drm/drm_blend.h [new file with mode: 0644]
include/drm/drm_bridge.h [new file with mode: 0644]
include/drm/drm_color_mgmt.h [new file with mode: 0644]
include/drm/drm_connector.h [new file with mode: 0644]
include/drm/drm_core.h [deleted file]
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_aux_dev.h [deleted file]
include/drm/drm_dp_helper.h
include/drm/drm_edid.h
include/drm/drm_encoder.h [new file with mode: 0644]
include/drm/drm_fb_helper.h
include/drm/drm_fourcc.h
include/drm/drm_framebuffer.h [new file with mode: 0644]
include/drm/drm_gem.h
include/drm/drm_mipi_dsi.h
include/drm/drm_mm.h
include/drm/drm_mode_object.h [new file with mode: 0644]
include/drm/drm_modes.h
include/drm/drm_modeset_helper.h [new file with mode: 0644]
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_plane.h [new file with mode: 0644]
include/drm/drm_plane_helper.h
include/drm/drm_property.h [new file with mode: 0644]
include/drm/drm_simple_kms_helper.h
include/drm/drm_vma_manager.h
include/drm/i2c/tda998x.h
include/drm/i915_drm.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_memory.h
include/drm/ttm/ttm_placement.h
include/dt-bindings/display/tda998x.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/bvec.h
include/linux/cec-funcs.h
include/linux/cec.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/cpuhotplug.h
include/linux/efi.h
include/linux/fence-array.h
include/linux/fence.h
include/linux/fs.h
include/linux/fscrypto.h
include/linux/fsnotify_backend.h
include/linux/iio/sw_trigger.h
include/linux/io-mapping.h
include/linux/iomap.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/kvm_host.h
include/linux/mempolicy.h
include/linux/mfd/da8xx-cfgchip.h [new file with mode: 0644]
include/linux/mfd/ti_am335x_tscadc.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/msi.h
include/linux/netdevice.h
include/linux/netfilter/nfnetlink_acct.h
include/linux/nvme.h
include/linux/pagemap.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/printk.h
include/linux/qed/qed_if.h
include/linux/sctp.h
include/linux/serial_8250.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/smc91x.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/xprt.h
include/linux/sync_file.h
include/linux/sysctl.h
include/linux/thread_info.h
include/linux/uaccess.h
include/linux/uio.h
include/linux/vgaarb.h
include/media/cec.h
include/net/act_api.h
include/net/af_rxrpc.h
include/net/af_unix.h
include/net/cfg80211.h
include/net/gre.h
include/net/inet_ecn.h
include/net/ip_fib.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack_synproxy.h
include/net/netfilter/nft_meta.h
include/net/netfilter/nft_reject.h
include/net/pkt_cls.h
include/net/sctp/sm.h
include/net/sock.h
include/net/tcp.h
include/net/xfrm.h
include/rdma/ib_verbs.h
include/scsi/scsi_transport_sas.h
include/trace/events/timer.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/drm.h
include/uapi/drm/drm_mode.h
include/uapi/drm/i915_drm.h
include/uapi/drm/msm_drm.h
include/uapi/linux/atm_zatm.h
include/uapi/linux/bpf.h
include/uapi/linux/if_pppol2tp.h
include/uapi/linux/if_pppox.h
include/uapi/linux/if_tunnel.h
include/uapi/linux/ipx.h
include/uapi/linux/libc-compat.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/openvswitch.h
include/uapi/linux/sctp.h
include/uapi/linux/sync_file.h
include/uapi/linux/virtio_vsock.h
include/uapi/misc/cxl.h
include/video/imx-ipu-image-convert.h [new file with mode: 0644]
include/video/imx-ipu-v3.h
include/xen/xen-ops.h
init/Kconfig
kernel/audit_watch.c
kernel/bpf/hashtab.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/configs/tiny.config
kernel/cpuset.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/events/uprobes.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/irq/affinity.c
kernel/irq/chip.c
kernel/irq/manage.c
kernel/irq/msi.c
kernel/kexec_file.c
kernel/locking/qspinlock_paravirt.h
kernel/locking/qspinlock_stat.h
kernel/memremap.c
kernel/power/hibernate.c
kernel/power/qos.c
kernel/power/snapshot.c
kernel/printk/braille.c
kernel/printk/internal.h
kernel/printk/nmi.c
kernel/printk/printk.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/seccomp.c
kernel/sysctl.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timekeeping_debug.c
kernel/time/timer.c
kernel/trace/blktrace.c
kernel/trace/trace.c
lib/Kconfig.debug
lib/Makefile
lib/iov_iter.c
lib/radix-tree.c
lib/rhashtable.c
lib/strncpy_from_user.c
lib/strnlen_user.c
lib/test_hash.c
lib/test_rhashtable.c
lib/usercopy.c [deleted file]
mm/Kconfig
mm/Makefile
mm/debug.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/quarantine.c
mm/khugepaged.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_io.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/slub.c
mm/swapfile.c
mm/usercopy.c [new file with mode: 0644]
mm/vmscan.c
net/8021q/vlan.c
net/9p/trans_virtio.c
net/batman-adv/bat_v_elp.c
net/batman-adv/routing.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_request.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bridge/br_fdb.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nft_meta_bridge.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/ceph/string_table.c
net/core/dev.c
net/core/filter.c
net/core/flow_dissector.c
net/core/sock.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nft_chain_route_ipv4.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/udplite.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/calipso.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/netfilter/nft_chain_route_ipv6.c
net/ipv6/netfilter/nft_reject_ipv6.c
net/ipv6/ping.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udplite.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_tunnel.c
net/irda/af_irda.c
net/irda/iriap.c
net/kcm/kcmsock.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ppp.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/driver-ops.h
net/mac80211/mesh.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_netdev.c
net/netfilter/nf_tables_trace.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_meta.c
net/netfilter/nft_rbtree.c
net/netfilter/nft_reject.c
net/netfilter/nft_reject_inet.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_nfacct.c
net/openvswitch/conntrack.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-vxlan.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/input.c
net/rxrpc/recvmsg.c
net/rxrpc/skbuff.c
net/sched/act_api.c
net/sched/act_ife.c
net/sched/act_police.c
net/sched/cls_api.c
net/sched/sch_generic.c
net/sctp/input.c
net/sctp/inqueue.c
net/sctp/output.c
net/sctp/proc.c
net/sctp/sctp_diag.c
net/sctp/ulpevent.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/clnt.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/sunrpc/xprtsock.c
net/tipc/monitor.c
net/tipc/name_distr.c
net/tipc/socket.c
net/tipc/udp_media.c
net/unix/af_unix.c
net/vmw_vsock/virtio_transport.c
net/wireless/chan.c
net/wireless/nl80211.c
net/wireless/wext-core.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/bpf/bpf_helpers.h
samples/bpf/test_cgrp2_tc_kern.c
samples/bpf/test_maps.c
scripts/Kbuild.include
scripts/Makefile.gcc-plugins
scripts/checkpatch.pl
scripts/faddr2line [new file with mode: 0755]
scripts/gcc-plugin.sh
scripts/gcc-plugins/Makefile
scripts/get_maintainer.pl
scripts/package/builddeb
scripts/tags.sh
security/Kconfig
security/keys/encrypted-keys/encrypted.c
sound/core/rawmidi.c
sound/core/timer.c
sound/firewire/fireworks/fireworks.h
sound/firewire/fireworks/fireworks_hwdep.c
sound/firewire/fireworks/fireworks_proc.c
sound/firewire/fireworks/fireworks_transaction.c
sound/firewire/tascam/tascam-hwdep.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/codecs/da7213.c
sound/soc/codecs/max98371.c
sound/soc/codecs/nau8825.c
sound/soc/codecs/wm2000.c
sound/soc/generic/Makefile
sound/soc/generic/simple-card-utils.c
sound/soc/intel/skylake/skl-sst-utils.c
sound/soc/intel/skylake/skl.c
sound/soc/omap/omap-abe-twl6040.c
sound/soc/omap/omap-mcpdm.c
sound/soc/samsung/s3c24xx_uda134x.c
sound/soc/sh/rcar/src.c
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/usb/line6/pcm.c
sound/usb/line6/pod.c
sound/usb/quirks.c
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/sie.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/required-features.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/gpio/gpio-event-mon.c
tools/iio/iio_generic_buffer.c
tools/include/linux/string.h
tools/include/uapi/linux/bpf.h
tools/lguest/lguest.c
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-script.txt
tools/perf/arch/powerpc/util/sym-handling.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/builtin-mem.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/util/evsel.c
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
tools/perf/util/jitdump.c
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-file.c
tools/perf/util/probe-finder.c
tools/perf/util/sort.c
tools/perf/util/symbol-elf.c
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libunwind-local.c
tools/testing/nvdimm/test/nfit.c
tools/testing/radix-tree/Makefile
tools/testing/radix-tree/multiorder.c
tools/testing/selftests/powerpc/Makefile
tools/virtio/linux/dma-mapping.h
tools/virtio/linux/kernel.h
tools/virtio/linux/slab.h
tools/virtio/linux/virtio.h
tools/virtio/linux/virtio_config.h
tools/virtio/ringtest/ptr_ring.c
virt/kvm/arm/arch_timer.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h
virt/kvm/kvm_main.c

index 2a91c14c80bf5315730eadceb7896bcb9d681aaf..de22daefd9daecbf956d9e874aa5664355416597 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -88,6 +88,7 @@ Kay Sievers <kay.sievers@vrfy.org>
 Kenneth W Chen <kenneth.w.chen@intel.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
@@ -158,6 +159,8 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
index 43f78b88da28beaa556b3bba509f1ac97fa44c16..df449d79b563ebe850f1eeb28b0e2e91d517c430 100644 (file)
@@ -1,7 +1,7 @@
 # Note: This documents additional properties of any device beyond what
 # is documented in Documentation/sysfs-rules.txt
 
-What:          /sys/devices/*/of_path
+What:          /sys/devices/*/of_node
 Date:          February 2015
 Contact:       Device Tree mailing list <devicetree@vger.kernel.org>
 Description:
index c55df2911136c90d37944616cfeebaeca4954a96..cd9c9f6a7cd9e94de08cc7b4f97c232ff040cd38 100644 (file)
@@ -94,14 +94,11 @@ has a requirements for a minimum number of vectors the driver can pass a
 min_vecs argument set to this limit, and the PCI core will return -ENOSPC
 if it can't meet the minimum number of vectors.
 
-The flags argument should normally be set to 0, but can be used to pass the
-PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
-MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
-case the device does not support legacy interrupt lines.
-
-By default this function will spread the interrupts around the available
-CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
-flag.
+The flags argument is used to specify which type of interrupt can be used
+by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
+A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
+any possible kind of interrupt.  If the PCI_IRQ_AFFINITY flag is set,
+pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
 
 To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
 vectors, use the following function:
@@ -131,7 +128,7 @@ larger than the number supported by the device it will automatically be
 capped to the supported limit, so there is no need to query the number of
 vectors supported beforehand:
 
-       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
+       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES)
        if (nvec < 0)
                goto out_err;
 
@@ -140,7 +137,7 @@ interrupts it can request a particular number of interrupts by passing that
 number to pci_alloc_irq_vectors() function as both 'min_vecs' and
 'max_vecs' parameters:
 
-       ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
+       ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES);
        if (ret < 0)
                goto out_err;
 
@@ -148,15 +145,14 @@ The most notorious example of the request type described above is enabling
 the single MSI mode for a device.  It could be done by passing two 1s as
 'min_vecs' and 'max_vecs':
 
-       ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
        if (ret < 0)
                goto out_err;
 
 Some devices might not support using legacy line interrupts, in which case
-the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
-can't provide MSI or MSI-X interrupts:
+the driver can specify that only MSI or MSI-X is acceptable:
 
-       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
+       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX);
        if (nvec < 0)
                goto out_err;
 
index 123881f62219d444303b047154b25d39363fb9f8..77f49dc5be23585fe671e8058ec9d73cec1f04ff 100644 (file)
@@ -124,7 +124,6 @@ initialization with a pointer to a structure describing the driver
 
 The ID table is an array of struct pci_device_id entries ending with an
 all-zero entry.  Definitions with static const are generally preferred.
-Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
 
 Each entry consists of:
 
index ffca443a19b4265993493141328b2bd58fdd91ae..15cdb7bc57c3672fa92acfabdbf2de72834d4207 100644 (file)
@@ -18,13 +18,17 @@ and config2 fields of the perf_event_attr structure. The "events"
 directory provides configuration templates for all documented
 events, that can be used with perf tool. For example "xp_valid_flit"
 is an equivalent of "type=0x8,event=0x4". Other parameters must be
-explicitly specified. For events originating from device, "node"
-defines its index. All crosspoint events require "xp" (index),
-"port" (device port number) and "vc" (virtual channel ID) and
-"dir" (direction). Watchpoints (special "event" value 0xfe) also
-require comparator values ("cmp_l" and "cmp_h") and "mask", being
-index of the comparator mask.
+explicitly specified.
 
+For events originating from device, "node" defines its index.
+
+Crosspoint PMU events require "xp" (index), "bus" (bus number)
+and "vc" (virtual channel ID).
+
+Crosspoint watchpoint-based events (special "event" value 0xfe)
+require "xp" and "vc" as as above plus "port" (device port index),
+"dir" (transmit/receive direction), comparator values ("cmp_l"
+and "cmp_h") and "mask", being index of the comparator mask.
 Masks are defined separately from the event description
 (due to limited number of the config values) in the "cmp_mask"
 directory, with first 8 configurable by user and additional
index 4da60b463995464a71c04245e7f90e7ced4f51d2..ccc60324e7388eb5756de1782174246708715c36 100644 (file)
@@ -53,6 +53,7 @@ stable kernels.
 | ARM            | Cortex-A57      | #832075         | ARM64_ERRATUM_832075    |
 | ARM            | Cortex-A57      | #852523         | N/A                     |
 | ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220    |
+| ARM            | Cortex-A72      | #853709         | N/A                     |
 | ARM            | MMU-500         | #841119,#826419 | N/A                     |
 |                |                 |                 |                         |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
index d515d58962b9df66b39bdcf0fc6f981090ed4630..2a3904030dea5d287f6e1dc2f8f93461707ae018 100644 (file)
@@ -14,6 +14,12 @@ add_random (RW)
 This file allows to turn off the disk entropy contribution. Default
 value of this file is '1'(on).
 
+dax (RO)
+--------
+This file indicates whether the device supports Direct Access (DAX),
+used by CPU-addressable storage to bypass the pagecache.  It shows '1'
+if true, '0' if not.
+
 discard_granularity (RO)
 -----------------------
 This shows the size of internal allocation of the device in bytes, if
@@ -46,6 +52,12 @@ hw_sector_size (RO)
 -------------------
 This is the hardware sector size of the device, in bytes.
 
+io_poll (RW)
+------------
+When read, this file shows the total number of block IO polls and how
+many returned success.  Writing '0' to this file will disable polling
+for this device.  Writing any non-zero value will enable this feature.
+
 iostats (RW)
 -------------
 This file is used to control (on/off) the iostats accounting of the
@@ -151,5 +163,11 @@ device state. This means that it might not be safe to toggle the
 setting from "write back" to "write through", since that will also
 eliminate cache flushes issued by the kernel.
 
+write_same_max_bytes (RO)
+-------------------------
+This is the number of bytes the device can write in a single write-same
+command.  A value of '0' means write-same is not supported by this
+device.
+
 
 Jens Axboe <jens.axboe@oracle.com>, February 2009
index 96b7aa66c89ca68c5f891d6fb3f2744dd80c6ca7..106ae9c740b99307abcffee492e0eace15c4f295 100644 (file)
@@ -131,7 +131,7 @@ pygments_style = 'sphinx'
 todo_include_todos = False
 
 primary_domain = 'C'
-highlight_language = 'C'
+highlight_language = 'guess'
 
 # -- Options for HTML output ----------------------------------------------
 
index fc647492e940539be1f37b02335ac1bbc7829820..8d9773f23550cbc4e8d97ebecb79d55a161e6d58 100644 (file)
@@ -103,7 +103,7 @@ Config Main Menu
        Power management options (ACPI, APM)  --->
                CPU Frequency scaling  --->
                        [*] CPU Frequency scaling
-                       <*>   CPU frequency translation statistics 
+                       [*]   CPU frequency translation statistics
                        [*]     CPU frequency translation statistics details
 
 
index e178e6b9f9ee1ace419d5c9ccd4bf27a64799116..24cc2466185a490067157b0b923a1fde69432649 100644 (file)
@@ -21,8 +21,19 @@ Optional properties:
   - video-ports: 24 bits value which defines how the video controller
        output is wired to the TDA998x input - default: <0x230145>
 
+  - audio-ports: array of 8-bit values, 2 values per one DAI[1].
+       The first value defines the DAI type: TDA998x_SPDIF or TDA998x_I2S[2].
+       The second value defines the tda998x AP_ENA reg content when the DAI
+       in question is used. The implementation allows one or two DAIs. If two
+       DAIs are defined, they must be of different type.
+
+[1] Documentation/sound/alsa/soc/DAI.txt
+[2] include/dt-bindings/display/tda998x.h
+
 Example:
 
+#include <dt-bindings/display/tda998x.h>
+
        tda998x: hdmi-encoder {
                compatible = "nxp,tda998x";
                reg = <0x70>;
@@ -30,4 +41,11 @@ Example:
                interrupts = <27 2>;            /* falling edge */
                pinctrl-0 = <&pmx_camera>;
                pinctrl-names = "default";
+               video-ports = <0x230145>;
+
+               #sound-dai-cells = <2>;
+                            /* DAI-format      AP_ENA reg value */
+               audio-ports = < TDA998x_SPDIF   0x04
+                               TDA998x_I2S     0x03>;
+
        };
index b63f614e0c045fd9e28eff611cabab6cc4f8fb16..2ad578984fcf8caea3592030b9417f85c00526c0 100644 (file)
@@ -14,17 +14,16 @@ Required properties:
 - power-domains: Should be <&mmcc MDSS_GDSC>.
 - clocks: device clocks
   See ../clocks/clock-bindings.txt for details.
-- qcom,hdmi-tx-ddc-clk-gpio: ddc clk pin
-- qcom,hdmi-tx-ddc-data-gpio: ddc data pin
-- qcom,hdmi-tx-hpd-gpio: hpd pin
 - core-vdda-supply: phandle to supply regulator
 - hdmi-mux-supply: phandle to mux regulator
 - phys: the phandle for the HDMI PHY device
 - phy-names: the name of the corresponding PHY device
 
 Optional properties:
-- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
-- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
+- hpd-gpios: hpd pin
+- qcom,hdmi-tx-mux-en-gpios: hdmi mux enable pin
+- qcom,hdmi-tx-mux-sel-gpios: hdmi mux select pin
+- qcom,hdmi-tx-mux-lpm-gpios: hdmi mux lpm pin
 - power-domains: reference to the power domain(s), if available.
 - pinctrl-names: the pin control state names; should contain "default"
 - pinctrl-0: the default pinctrl state (active)
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
new file mode 100644 (file)
index 0000000..9e75904
--- /dev/null
@@ -0,0 +1,7 @@
+Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
+
+Required properties:
+- compatible: should be "innolux,g101ice-l01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt
new file mode 100644 (file)
index 0000000..4989c91
--- /dev/null
@@ -0,0 +1,31 @@
+JDI model LT070ME05000 1200x1920 7" DSI Panel
+
+Required properties:
+- compatible: should be "jdi,lt070me05000"
+- vddp-supply: phandle of the regulator that provides the supply voltage
+  Power IC supply (3-5V)
+- iovcc-supply: phandle of the regulator that provides the supply voltage
+  IOVCC , power supply for LCM (1.8V)
+- enable-gpios: phandle of gpio for enable line
+  LED_EN, LED backlight enable, High active
+- reset-gpios: phandle of gpio for reset line
+  This should be 8mA, gpio can be configured using mux, pinctrl, pinctrl-names
+  XRES, Reset, Low active
+- dcdc-en-gpios: phandle of the gpio for power ic line
+  Power IC supply enable, High active
+
+Example:
+
+       dsi0: qcom,mdss_dsi@4700000 {
+               panel@0 {
+                       compatible = "jdi,lt070me05000";
+                       reg = <0>;
+
+                       vddp-supply = <&pm8921_l17>;
+                       iovcc-supply = <&pm8921_lvs7>;
+
+                       enable-gpios = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+                       reset-gpios = <&tlmm_pinmux 54 GPIO_ACTIVE_LOW>;
+                       dcdc-en-gpios = <&pm8921_gpio 23 GPIO_ACTIVE_HIGH>;
+               };
+       };
index 5489b59e3d415e0ff292d13b824aa07a1f766d2a..9eb3f0a2a078b4e1f62d71daf123cf7d8363b683 100644 (file)
@@ -6,8 +6,10 @@ buffer to an external LCD interface.
 
 Required properties:
 - compatible: value should be one of the following
-               "rockchip,rk3288-vop";
                "rockchip,rk3036-vop";
+               "rockchip,rk3288-vop";
+               "rockchip,rk3399-vop-big";
+               "rockchip,rk3399-vop-lit";
 
 - interrupts: should contain a list of all VOP IP block interrupts in the
                 order: VSYNC, LCD_SYSTEM. The interrupt specifier
index df8f4aeefe4cd15286c5d50c158d99b0a87c3219..b95696d748c759ceb38c1b14be1b7990bb744c1e 100644 (file)
@@ -26,13 +26,14 @@ TCON
 The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
 
 Required properties:
- - compatible: value should be "allwinner,sun5i-a13-tcon".
+ - compatible: value must be either:
+   * allwinner,sun5i-a13-tcon
+   * allwinner,sun8i-a33-tcon
  - reg: base address and size of memory-mapped region
  - interrupts: interrupt associated to this IP
  - clocks: phandles to the clocks feeding the TCON. Three are needed:
    - 'ahb': the interface clocks
    - 'tcon-ch0': The clock driving the TCON channel 0
-   - 'tcon-ch1': The clock driving the TCON channel 1
  - resets: phandles to the reset controllers driving the encoder
    - "lcd": the reset line for the TCON channel 0
 
@@ -49,6 +50,33 @@ Required properties:
   second the block connected to the TCON channel 1 (usually the TV
   encoder)
 
+On the A13, there is one more clock required:
+   - 'tcon-ch1': The clock driving the TCON channel 1
+
+DRC
+---
+
+The DRC (Dynamic Range Controller), found in the latest Allwinner SoCs
+(A31, A23, A33), allows to dynamically adjust pixel
+brightness/contrast based on histogram measurements for LCD content
+adaptive backlight control.
+
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun8i-a33-drc
+  - reg: base address and size of the memory-mapped region.
+  - interrupts: interrupt associated to this IP
+  - clocks: phandles to the clocks feeding the DRC
+    * ahb: the DRC interface clock
+    * mod: the DRC module clock
+    * ram: the DRC DRAM clock
+  - clock-names: the clock names mentioned above
+  - resets: phandles to the reset line driving the DRC
+
+- ports: A ports node with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt. The
+  first port should be the input endpoints, the second one the outputs
 
 Display Engine Backend
 ----------------------
@@ -59,6 +87,7 @@ system.
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-backend
+    * allwinner,sun8i-a33-display-backend
   - reg: base address and size of the memory-mapped region.
   - clocks: phandles to the clocks feeding the frontend and backend
     * ahb: the backend interface clock
@@ -71,6 +100,14 @@ Required properties:
   Documentation/devicetree/bindings/media/video-interfaces.txt. The
   first port should be the input endpoints, the second one the output
 
+On the A33, some additional properties are required:
+  - reg needs to have an additional region corresponding to the SAT
+  - reg-names need to be set, with "be" and "sat"
+  - clocks and clock-names need to have a phandle to the SAT bus
+    clocks, whose name will be "sat"
+  - resets and reset-names need to have a phandle to the SAT bus
+    resets, whose name will be "sat"
+
 Display Engine Frontend
 -----------------------
 
@@ -80,6 +117,7 @@ deinterlacing and color space conversion.
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-frontend
+    * allwinner,sun8i-a33-display-frontend
   - reg: base address and size of the memory-mapped region.
   - interrupts: interrupt associated to this IP
   - clocks: phandles to the clocks feeding the frontend and backend
@@ -104,6 +142,7 @@ extra node.
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-engine
+    * allwinner,sun8i-a33-display-engine
 
   - allwinner,pipelines: list of phandle to the display engine
     frontends available.
index 2136ee81e061b8d3ab492d5dd303a467d44a4595..a83abd79c55cbccada48ff1744e9adae5500868e 100644 (file)
@@ -17,6 +17,18 @@ Optional properties:
    the lcd controller.
  - max-pixelclock: The maximum pixel clock that can be supported
    by the lcd controller in KHz.
+ - blue-and-red-wiring: Recognized values "straight" or "crossed".
+   This property deals with the LCDC revision 2 (found on AM335x)
+   color errata [1].
+    - "straight" indicates normal wiring that supports RGB565,
+      BGR888, and XBGR8888 color formats.
+    - "crossed" indicates wiring that has blue and red wires
+      crossed. This setup supports BGR565, RGB888 and XRGB8888
+      formats.
+    - If the property is not present or its value is not recognized
+      the legacy mode is assumed. This configuration supports RGB565,
+      RGB888 and XRGB8888 formats. However, depending on wiring, the red
+      and blue colors are swapped in either 16 or 24-bit color modes.
 
 Optional nodes:
 
@@ -24,6 +36,18 @@ Optional nodes:
    binding follows Documentation/devicetree/bindings/graph.txt and
    suppors a single port with a single endpoint.
 
+ - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
+   Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
+   tfp410 DVI encoder or lcd panel to lcdc
+
+[1] There is an errata about AM335x color wiring. For 16-bit color mode
+    the wires work as they should (LCD_DATA[0:4] is for Blue[3:7]),
+    but for 24 bit color modes the wiring of blue and red components is
+    crossed and LCD_DATA[0:4] is for Red[3:7] and LCD_DATA[11:15] is
+    for Blue[3-7]. For more details see section 3.1.1 in AM335x
+    Silicon Errata:
+    http://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sprz360
+
 Example:
 
        fb: fb@4830e000 {
@@ -33,6 +57,8 @@ Example:
                interrupts = <36>;
                ti,hwmods = "lcdc";
 
+               blue-and-red-wiring = "crossed";
+
                port {
                        lcdc_0: endpoint@0 {
                                remote-endpoint = <&hdmi_0>;
index bf99e2f24788001557194d56d92224fe8411a1e8..205593f56fe759706ea6fd321c08e66059ebd489 100644 (file)
@@ -16,6 +16,11 @@ Required properties:
 - vref-supply: The regulator supply ADC reference voltage.
 - #io-channel-cells: Should be 1, see ../iio-bindings.txt
 
+Optional properties:
+- resets: Must contain an entry for each entry in reset-names if need support
+         this option. See ../reset/reset.txt for details.
+- reset-names: Must include the name "saradc-apb".
+
 Example:
        saradc: saradc@2006c000 {
                compatible = "rockchip,saradc";
@@ -23,6 +28,8 @@ Example:
                interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                #io-channel-cells = <1>;
                vref-supply = <&vcc18>;
        };
index 1112e0d794e172c43bb09ff866742d0a7fa314b5..820fee4b77b601e7f4c5521e854a06d2dd78408b 100644 (file)
@@ -13,6 +13,7 @@ Required properties:
 - touchscreen-size-y     : See touchscreen.txt
 
 Optional properties:
+- firmware-name                  : File basename (string) for board specific firmware
 - touchscreen-inverted-x  : See touchscreen.txt
 - touchscreen-inverted-y  : See touchscreen.txt
 - touchscreen-swapped-x-y : See touchscreen.txt
index 88faa91125bf5c92e76afcb6f0c9cdfc01031827..3cd4c43a32601957038eea8595c1e5d42651f94d 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
                        subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
                        family).
 
-- clock-names:         Should be "mmc".
+- clock-names:         Should be "mmc" and "icn".  (NB: The latter is not compulsory)
                        See: Documentation/devicetree/bindings/resource-names.txt
 - clocks:              Phandle to the clock.
                        See: Documentation/devicetree/bindings/clock/clock-bindings.txt
index f5561ac7e17ed358ce7e6a00c7c0f8913f3464c6..936ab5b87324ce7cf022320a777342d2507dfd19 100644 (file)
@@ -42,9 +42,6 @@ Optional properties:
 - auto-flow-control: one way to enable automatic flow control support. The
   driver is allowed to detect support for the capability even without this
   property.
-- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
-  line respectively. It will use specified GPIO instead of the peripheral
-  function pin for the UART feature. If unsure, don't specify this property.
 
 Note:
 * fsl,ns16550:
@@ -66,19 +63,3 @@ Example:
                interrupts = <10>;
                reg-shift = <2>;
        };
-
-Example for OMAP UART using GPIO-based modem control signals:
-
-       uart4: serial@49042000 {
-               compatible = "ti,omap3-uart";
-               reg = <0x49042000 0x400>;
-               interrupts = <80>;
-               ti,hwmods = "uart4";
-               clock-frequency = <48000000>;
-               cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
-               rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
-               dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
-               dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
-               dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
-               rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
-       };
index 6f6c2f8e908db15ff37567413c20b19a5ed68d17..0741dff048dd39ba3336f67c6186918c306a563f 100644 (file)
@@ -8,8 +8,6 @@ Required properties:
 - interrupts: Interrupt number for McPDM
 - interrupt-parent: The parent interrupt controller
 - ti,hwmods: Name of the hwmod associated to the McPDM
-- clocks:  phandle for the pdmclk provider, likely <&twl6040>
-- clock-names: Must be "pdmclk"
 
 Example:
 
@@ -21,11 +19,3 @@ mcpdm: mcpdm@40132000 {
        interrupt-parent = <&gic>;
        ti,hwmods = "mcpdm";
 };
-
-In board DTS file the pdmclk needs to be added:
-
-&mcpdm {
-       clocks = <&twl6040>;
-       clock-names = "pdmclk";
-       status = "okay";
-};
index 41b817f7b6706515025b9a6e4af05304a62d5c25..88b6ea1ad2903cadd73650ff6dbfa0eb54aac39f 100644 (file)
@@ -62,7 +62,7 @@ For more examples of cooling devices, refer to the example sections below.
 Required properties:
 - #cooling-cells:      Used to provide cooling device specific information
   Type: unsigned       while referring to it. Must be at least 2, in order
-  Size: one cell       to specify minimum and maximum cooling state used
+  Size: one cell       to specify minimum and maximum cooling state used
                        in the reference. The first cell is the minimum
                        cooling state requested and the second cell is
                        the maximum cooling state requested in the reference.
@@ -119,7 +119,7 @@ Required properties:
 Optional property:
 - contribution:                The cooling contribution to the thermal zone of the
   Type: unsigned       referred cooling device at the referred trip point.
-  Size: one cell       The contribution is a ratio of the sum
+  Size: one cell       The contribution is a ratio of the sum
                        of all cooling contributions within a thermal zone.
 
 Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle
@@ -145,7 +145,7 @@ Required properties:
   Size: one cell
 
 - thermal-sensors:     A list of thermal sensor phandles and sensor specifier
-  Type: list of        used while monitoring the thermal zone.
+  Type: list of                used while monitoring the thermal zone.
   phandles + sensor
   specifier
 
@@ -473,7 +473,7 @@ thermal-zones {
                                  <&adc>;       /* pcb north */
 
                /* hotspot = 100 * bandgap - 120 * adc + 484 */
-               coefficients =          <100    -120    484>;
+               coefficients =          <100    -120    484>;
 
                trips {
                        ...
@@ -502,7 +502,7 @@ from the ADC sensor. The binding would be then:
         thermal-sensors =  <&adc>;
 
                /* hotspot = 1 * adc + 6000 */
-       coefficients =          <1      6000>;
+       coefficients =          <1      6000>;
 
 (d) - Board thermal
 
index d6259c7863165939074088908323500e2fcd0fe8..bcbf9710e4af1a1c9f46d9fa9526851ebf9dc944 100644 (file)
@@ -183,12 +183,10 @@ The copy_up operation essentially creates a new, identical file and
 moves it over to the old name.  The new file may be on a different
 filesystem, so both st_dev and st_ino of the file may change.
 
-Any open files referring to this inode will access the old data and
-metadata.  Similarly any file locks obtained before copy_up will not
-apply to the copied up file.
+Any open files referring to this inode will access the old data.
 
-On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and
-fsetxattr(2) will fail with EROFS.
+Any file locks (and leases) obtained before copy_up will not apply
+to the copied up file.
 
 If a file with multiple hard links is copied up, then this will
 "break" the link.  Changes will not be propagated to other names
index 3bb26135971f06a3f9d3794a1d23efbaafec28fe..37284bcc77641a76d9b658fb30c50e2bde861542 100644 (file)
@@ -53,9 +53,12 @@ u32 driver_features;
 DRIVER_USE_AGP
     Driver uses AGP interface, the DRM core will manage AGP resources.
 
-DRIVER_REQUIRE_AGP
-    Driver needs AGP interface to function. AGP initialization failure
-    will become a fatal error.
+DRIVER_LEGACY
+    Denote a legacy driver using shadow attach. Don't use.
+
+DRIVER_KMS_LEGACY_CONTEXT
+    Used only by nouveau for backwards compatibility with existing userspace.
+    Don't use.
 
 DRIVER_PCI_DMA
     Driver is capable of PCI DMA, mapping of PCI DMA buffers to
index 0b302fedf1afcc3278b6eedc5dd14fb66a9aceab..bb4254d19cbb810c038922dac9dd7997cff02ea9 100644 (file)
@@ -2,38 +2,45 @@
 Mode Setting Helper Functions
 =============================
 
-The plane, CRTC, encoder and connector functions provided by the drivers
-implement the DRM API. They're called by the DRM core and ioctl handlers
-to handle device state changes and configuration request. As
-implementing those functions often requires logic not specific to
-drivers, mid-layer helper functions are available to avoid duplicating
-boilerplate code.
-
-The DRM core contains one mid-layer implementation. The mid-layer
-provides implementations of several plane, CRTC, encoder and connector
-functions (called from the top of the mid-layer) that pre-process
-requests and call lower-level functions provided by the driver (at the
-bottom of the mid-layer). For instance, the
-:c:func:`drm_crtc_helper_set_config()` function can be used to
-fill the :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`
-set_config field. When called, it will split the set_config operation
-in smaller, simpler operations and call the driver to handle them.
-
-To use the mid-layer, drivers call
-:c:func:`drm_crtc_helper_add()`,
-:c:func:`drm_encoder_helper_add()` and
-:c:func:`drm_connector_helper_add()` functions to install their
-mid-layer bottom operations handlers, and fill the :c:type:`struct
-drm_crtc_funcs <drm_crtc_funcs>`, :c:type:`struct
-drm_encoder_funcs <drm_encoder_funcs>` and :c:type:`struct
-drm_connector_funcs <drm_connector_funcs>` structures with
-pointers to the mid-layer top API functions. Installing the mid-layer
-bottom operation handlers is best done right after registering the
-corresponding KMS object.
-
-The mid-layer is not split between CRTC, encoder and connector
-operations. To use it, a driver must provide bottom functions for all of
-the three KMS entities.
+The DRM subsystem aims for a strong separation between core code and helper
+libraries. Core code takes care of general setup and teardown and decoding
+userspace requests to kernel internal objects. Everything else is handled by a
+large set of helper libraries, which can be combined freely to pick and choose
+for each driver what fits, and avoid shared code where special behaviour is
+needed.
+
+This distinction between core code and helpers is especially strong in the
+modesetting code, where there's a shared userspace ABI for all drivers. This is
+in contrast to the render side, where pretty much everything (with very few
+exceptions) can be considered optional helper code.
+
+There are a few areas these helpers can grouped into:
+
+* Helpers to implement modesetting. The important ones here are the atomic
+  helpers. Old drivers still often use the legacy CRTC helpers. They both share
+  the same set of common helper vtables. For really simple drivers (anything
+  that would have been a great fit in the deprecated fbdev subsystem) there's
+  also the simple display pipe helpers.
+
+* There's a big pile of helpers for handling outputs. First the generic bridge
+  helpers for handling encoder and transcoder IP blocks. Second the panel helpers
+  for handling panel-related information and logic. Plus then a big set of
+  helpers for the various sink standards (DisplayPort, HDMI, MIPI DSI). Finally
+  there's also generic helpers for handling output probing, and for dealing with
+  EDIDs.
+
+* The last group of helpers concerns itself with the frontend side of a display
+  pipeline: Planes, handling rectangles for visibility checking and scissoring,
+  flip queues and assorted bits.
+
+Modeset Helper Reference for Common Vtables
+===========================================
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+   :internal:
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+   :doc: overview
 
 Atomic Modeset Helper Functions Reference
 =========================================
@@ -62,33 +69,27 @@ Atomic State Reset and Initialization
 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
    :export:
 
-Modeset Helper Reference for Common Vtables
-===========================================
-
-.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
-   :internal:
-
-.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
-   :doc: overview
-
 Legacy CRTC/Modeset Helper Functions Reference
 ==============================================
 
 .. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
-   :export:
+   :doc: overview
 
 .. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
-   :doc: overview
+   :export:
 
-Output Probing Helper Functions Reference
-=========================================
+Simple KMS Helper Reference
+===========================
 
-.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
-   :doc: output probing helper overview
+.. kernel-doc:: include/drm/drm_simple_kms_helper.h
+   :internal:
 
-.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
    :export:
 
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+   :doc: overview
+
 fbdev Helper Functions Reference
 ================================
 
@@ -110,6 +111,43 @@ Framebuffer CMA Helper Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
    :export:
 
+Bridges
+=======
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :doc: overview
+
+Default bridge callback sequence
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :doc: bridge callbacks
+
+
+Bridge Helper Reference
+-------------------------
+
+.. kernel-doc:: include/drm/drm_bridge.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :export:
+
+Panel Helper Reference
+======================
+
+.. kernel-doc:: include/drm/drm_panel.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+   :doc: drm panel
+
 Display Port Helper Functions Reference
 =======================================
 
@@ -158,9 +196,21 @@ MIPI DSI Helper Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
    :export:
 
+Output Probing Helper Functions Reference
+=========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+   :doc: output probing helper overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+   :export:
+
 EDID Helper Functions Reference
 ===============================
 
+.. kernel-doc:: include/drm/drm_edid.h
+   :internal:
+
 .. kernel-doc:: drivers/gpu/drm/drm_edid.c
    :export:
 
@@ -176,18 +226,6 @@ Rectangle Utilities Reference
 .. kernel-doc:: drivers/gpu/drm/drm_rect.c
    :export:
 
-Flip-work Helper Reference
-==========================
-
-.. kernel-doc:: include/drm/drm_flip_work.h
-   :doc: flip utils
-
-.. kernel-doc:: include/drm/drm_flip_work.h
-   :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
-   :export:
-
 HDMI Infoframes Helper Reference
 ================================
 
@@ -202,59 +240,40 @@ libraries and hence is also included here.
 .. kernel-doc:: drivers/video/hdmi.c
    :export:
 
-Plane Helper Reference
-======================
-
-.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
-   :doc: overview
+Flip-work Helper Reference
+==========================
 
-Tile group
-----------
+.. kernel-doc:: include/drm/drm_flip_work.h
+   :doc: flip utils
 
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
-   :doc: Tile group
+.. kernel-doc:: include/drm/drm_flip_work.h
+   :internal:
 
-Bridges
-=======
+.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
+   :export:
 
-Overview
---------
+Plane Helper Reference
+======================
 
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
    :doc: overview
 
-Default bridge callback sequence
---------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
-   :doc: bridge callbacks
-
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
    :export:
 
-Panel Helper Reference
-======================
-
-.. kernel-doc:: include/drm/drm_panel.h
-   :internal:
+Tile group
+==========
 
-.. kernel-doc:: drivers/gpu/drm/drm_panel.c
-   :export:
+# FIXME: This should probably be moved into a property documentation section
 
-.. kernel-doc:: drivers/gpu/drm/drm_panel.c
-   :doc: drm panel
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+   :doc: Tile group
 
-Simple KMS Helper Reference
-===========================
+Auxiliary Modeset Helpers
+=========================
 
-.. kernel-doc:: include/drm/drm_simple_kms_helper.h
-   :internal:
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
+   :doc: aux kms helpers
 
-.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
    :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
-   :doc: overview
index 8dfa4b214b96f39449f17fac9bf085a9f26ddb84..53b872c105d24f0741e06f463ab2fdadbb0c01cd 100644 (file)
@@ -2,9 +2,6 @@
 Kernel Mode Setting (KMS)
 =========================
 
-Mode Setting
-============
-
 Drivers must initialize the mode setting core by calling
 :c:func:`drm_mode_config_init()` on the DRM device. The function
 initializes the :c:type:`struct drm_device <drm_device>`
@@ -18,60 +15,59 @@ be setup by initializing the following fields.
 -  struct drm_mode_config_funcs \*funcs;
    Mode setting functions.
 
-Display Modes Function Reference
---------------------------------
+Modeset Base Object Abstraction
+===============================
 
-.. kernel-doc:: include/drm/drm_modes.h
+.. kernel-doc:: include/drm/drm_mode_object.h
    :internal:
 
-.. kernel-doc:: drivers/gpu/drm/drm_modes.c
+.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
+   :export:
+
+KMS Data Structures
+===================
+
+.. kernel-doc:: include/drm/drm_crtc.h
+   :internal:
+
+KMS API Functions
+=================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
    :export:
 
 Atomic Mode Setting Function Reference
---------------------------------------
+======================================
 
 .. kernel-doc:: drivers/gpu/drm/drm_atomic.c
    :export:
 
-.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+.. kernel-doc:: include/drm/drm_atomic.h
    :internal:
 
 Frame Buffer Abstraction
-------------------------
-
-Frame buffers are abstract memory objects that provide a source of
-pixels to scanout to a CRTC. Applications explicitly request the
-creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls
-and receive an opaque handle that can be passed to the KMS CRTC control,
-plane configuration and page flip functions.
-
-Frame buffers rely on the underneath memory manager for low-level memory
-operations. When creating a frame buffer applications pass a memory
-handle (or a list of memory handles for multi-planar formats) through
-the ``drm_mode_fb_cmd2`` argument. For drivers using GEM as their
-userspace buffer management interface this would be a GEM handle.
-Drivers are however free to use their own backing storage object
-handles, e.g. vmwgfx directly exposes special TTM handles to userspace
-and so expects TTM handles in the create ioctl and not GEM handles.
-
-The lifetime of a drm framebuffer is controlled with a reference count,
-drivers can grab additional references with
-:c:func:`drm_framebuffer_reference()`and drop them again with
-:c:func:`drm_framebuffer_unreference()`. For driver-private
-framebuffers for which the last reference is never dropped (e.g. for the
-fbdev framebuffer when the struct :c:type:`struct drm_framebuffer
-<drm_framebuffer>` is embedded into the fbdev helper struct)
-drivers can manually clean up a framebuffer at module unload time with
-:c:func:`drm_framebuffer_unregister_private()`.
+========================
+
+.. kernel-doc:: drivers/gpu/drm/drm_framebuffer.c
+   :doc: overview
+
+Frame Buffer Functions Reference
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_framebuffer.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_framebuffer.h
+   :internal:
 
 DRM Format Handling
--------------------
+===================
 
 .. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
    :export:
 
 Dumb Buffer Objects
--------------------
+===================
 
 The KMS API doesn't standardize backing storage object creation and
 leaves it to driver-specific ioctls. Furthermore actually creating a
@@ -114,14 +110,59 @@ Note that dumb objects may not be used for gpu acceleration, as has been
 attempted on some ARM embedded platforms. Such drivers really must have
 a hardware-specific ioctl to allocate suitable buffer objects.
 
-Output Polling
---------------
+Plane Abstraction
+=================
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+   :doc: overview
+
+Plane Functions Reference
+-------------------------
+
+.. kernel-doc:: include/drm/drm_plane.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+   :export:
+
+Display Modes Function Reference
+================================
+
+.. kernel-doc:: include/drm/drm_modes.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_modes.c
+   :export:
 
-void (\*output_poll_changed)(struct drm_device \*dev);
-This operation notifies the driver that the status of one or more
-connectors has changed. Drivers that use the fb helper can just call the
-:c:func:`drm_fb_helper_hotplug_event()` function to handle this
-operation.
+Connector Abstraction
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :doc: overview
+
+Connector Functions Reference
+-----------------------------
+
+.. kernel-doc:: include/drm/drm_connector.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :export:
+
+Encoder Abstraction
+===================
+
+.. kernel-doc:: drivers/gpu/drm/drm_encoder.c
+   :doc: overview
+
+Encoder Functions Reference
+---------------------------
+
+.. kernel-doc:: include/drm/drm_encoder.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_encoder.c
+   :export:
 
 KMS Initialization and Cleanup
 ==============================
@@ -151,250 +192,6 @@ allocated and zeroed by the driver, possibly as part of a larger
 structure, and registered with a call to :c:func:`drm_crtc_init()`
 with a pointer to CRTC functions.
 
-Planes (:c:type:`struct drm_plane <drm_plane>`)
------------------------------------------------
-
-A plane represents an image source that can be blended with or overlayed
-on top of a CRTC during the scanout process. Planes are associated with
-a frame buffer to crop a portion of the image memory (source) and
-optionally scale it to a destination size. The result is then blended
-with or overlayed on top of a CRTC.
-
-The DRM core recognizes three types of planes:
-
--  DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC.
-   Primary planes are the planes operated upon by CRTC modesetting and
-   flipping operations described in the page_flip hook in
-   :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`.
--  DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC.
-   Cursor planes are the planes operated upon by the
-   DRM_IOCTL_MODE_CURSOR and DRM_IOCTL_MODE_CURSOR2 ioctls.
--  DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor
-   planes. Some drivers refer to these types of planes as "sprites"
-   internally.
-
-For compatibility with legacy userspace, only overlay planes are made
-available to userspace by default. Userspace clients may set the
-DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate
-that they wish to receive a universal plane list containing all plane
-types.
-
-Plane Initialization
-~~~~~~~~~~~~~~~~~~~~
-
-To create a plane, a KMS drivers allocates and zeroes an instances of
-:c:type:`struct drm_plane <drm_plane>` (possibly as part of a
-larger structure) and registers it with a call to
-:c:func:`drm_universal_plane_init()`. The function takes a
-bitmask of the CRTCs that can be associated with the plane, a pointer to
-the plane functions, a list of format supported formats, and the type of
-plane (primary, cursor, or overlay) being initialized.
-
-Cursor and overlay planes are optional. All drivers should provide one
-primary plane per CRTC (although this requirement may change in the
-future); drivers that do not wish to provide special handling for
-primary planes may make use of the helper functions described in ? to
-create and register a primary plane with standard capabilities.
-
-Encoders (:c:type:`struct drm_encoder <drm_encoder>`)
------------------------------------------------------
-
-An encoder takes pixel data from a CRTC and converts it to a format
-suitable for any attached connectors. On some devices, it may be
-possible to have a CRTC send data to more than one encoder. In that
-case, both encoders would receive data from the same scanout buffer,
-resulting in a "cloned" display configuration across the connectors
-attached to each encoder.
-
-Encoder Initialization
-~~~~~~~~~~~~~~~~~~~~~~
-
-As for CRTCs, a KMS driver must create, initialize and register at least
-one :c:type:`struct drm_encoder <drm_encoder>` instance. The
-instance is allocated and zeroed by the driver, possibly as part of a
-larger structure.
-
-Drivers must initialize the :c:type:`struct drm_encoder
-<drm_encoder>` possible_crtcs and possible_clones fields before
-registering the encoder. Both fields are bitmasks of respectively the
-CRTCs that the encoder can be connected to, and sibling encoders
-candidate for cloning.
-
-After being initialized, the encoder must be registered with a call to
-:c:func:`drm_encoder_init()`. The function takes a pointer to the
-encoder functions and an encoder type. Supported types are
-
--  DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
--  DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
--  DRM_MODE_ENCODER_LVDS for display panels
--  DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
-   Component, SCART)
--  DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
-
-Encoders must be attached to a CRTC to be used. DRM drivers leave
-encoders unattached at initialization time. Applications (or the fbdev
-compatibility layer when implemented) are responsible for attaching the
-encoders they want to use to a CRTC.
-
-Connectors (:c:type:`struct drm_connector <drm_connector>`)
------------------------------------------------------------
-
-A connector is the final destination for pixel data on a device, and
-usually connects directly to an external display device like a monitor
-or laptop panel. A connector can only be attached to one encoder at a
-time. The connector is also the structure where information about the
-attached display is kept, so it contains fields for display data, EDID
-data, DPMS & connection status, and information about modes supported on
-the attached displays.
-
-Connector Initialization
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Finally a KMS driver must create, initialize, register and attach at
-least one :c:type:`struct drm_connector <drm_connector>`
-instance. The instance is created as other KMS objects and initialized
-by setting the following fields.
-
-interlace_allowed
-    Whether the connector can handle interlaced modes.
-
-doublescan_allowed
-    Whether the connector can handle doublescan.
-
-display_info
-    Display information is filled from EDID information when a display
-    is detected. For non hot-pluggable displays such as flat panels in
-    embedded systems, the driver should initialize the
-    display_info.width_mm and display_info.height_mm fields with the
-    physical size of the display.
-
-polled
-    Connector polling mode, a combination of
-
-    DRM_CONNECTOR_POLL_HPD
-        The connector generates hotplug events and doesn't need to be
-        periodically polled. The CONNECT and DISCONNECT flags must not
-        be set together with the HPD flag.
-
-    DRM_CONNECTOR_POLL_CONNECT
-        Periodically poll the connector for connection.
-
-    DRM_CONNECTOR_POLL_DISCONNECT
-        Periodically poll the connector for disconnection.
-
-    Set to 0 for connectors that don't support connection status
-    discovery.
-
-The connector is then registered with a call to
-:c:func:`drm_connector_init()` with a pointer to the connector
-functions and a connector type, and exposed through sysfs with a call to
-:c:func:`drm_connector_register()`.
-
-Supported connector types are
-
--  DRM_MODE_CONNECTOR_VGA
--  DRM_MODE_CONNECTOR_DVII
--  DRM_MODE_CONNECTOR_DVID
--  DRM_MODE_CONNECTOR_DVIA
--  DRM_MODE_CONNECTOR_Composite
--  DRM_MODE_CONNECTOR_SVIDEO
--  DRM_MODE_CONNECTOR_LVDS
--  DRM_MODE_CONNECTOR_Component
--  DRM_MODE_CONNECTOR_9PinDIN
--  DRM_MODE_CONNECTOR_DisplayPort
--  DRM_MODE_CONNECTOR_HDMIA
--  DRM_MODE_CONNECTOR_HDMIB
--  DRM_MODE_CONNECTOR_TV
--  DRM_MODE_CONNECTOR_eDP
--  DRM_MODE_CONNECTOR_VIRTUAL
-
-Connectors must be attached to an encoder to be used. For devices that
-map connectors to encoders 1:1, the connector should be attached at
-initialization time with a call to
-:c:func:`drm_mode_connector_attach_encoder()`. The driver must
-also set the :c:type:`struct drm_connector <drm_connector>`
-encoder field to point to the attached encoder.
-
-Finally, drivers must initialize the connectors state change detection
-with a call to :c:func:`drm_kms_helper_poll_init()`. If at least
-one connector is pollable but can't generate hotplug interrupts
-(indicated by the DRM_CONNECTOR_POLL_CONNECT and
-DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
-automatically be queued to periodically poll for changes. Connectors
-that can generate hotplug interrupts must be marked with the
-DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
-call :c:func:`drm_helper_hpd_irq_event()`. The function will
-queue a delayed work to check the state of all connectors, but no
-periodic polling will be done.
-
-Connector Operations
-~~~~~~~~~~~~~~~~~~~~
-
-    **Note**
-
-    Unless otherwise state, all operations are mandatory.
-
-DPMS
-''''
-
-void (\*dpms)(struct drm_connector \*connector, int mode);
-The DPMS operation sets the power state of a connector. The mode
-argument is one of
-
--  DRM_MODE_DPMS_ON
-
--  DRM_MODE_DPMS_STANDBY
-
--  DRM_MODE_DPMS_SUSPEND
-
--  DRM_MODE_DPMS_OFF
-
-In all but DPMS_ON mode the encoder to which the connector is attached
-should put the display in low-power mode by driving its signals
-appropriately. If more than one connector is attached to the encoder
-care should be taken not to change the power state of other displays as
-a side effect. Low-power mode should be propagated to the encoders and
-CRTCs when all related connectors are put in low-power mode.
-
-Modes
-'''''
-
-int (\*fill_modes)(struct drm_connector \*connector, uint32_t
-max_width, uint32_t max_height);
-Fill the mode list with all supported modes for the connector. If the
-``max_width`` and ``max_height`` arguments are non-zero, the
-implementation must ignore all modes wider than ``max_width`` or higher
-than ``max_height``.
-
-The connector must also fill in this operation its display_info
-width_mm and height_mm fields with the connected display physical size
-in millimeters. The fields should be set to 0 if the value isn't known
-or is not applicable (for instance for projector devices).
-
-Connection Status
-'''''''''''''''''
-
-The connection status is updated through polling or hotplug events when
-supported (see ?). The status value is reported to userspace through
-ioctls and must not be used inside the driver, as it only gets
-initialized by a call to :c:func:`drm_mode_getconnector()` from
-userspace.
-
-enum drm_connector_status (\*detect)(struct drm_connector
-\*connector, bool force);
-Check to see if anything is attached to the connector. The ``force``
-parameter is set to false whilst polling or to true when checking the
-connector due to user request. ``force`` can be used by the driver to
-avoid expensive, destructive operations during automated probing.
-
-Return connector_status_connected if something is connected to the
-connector, connector_status_disconnected if nothing is connected and
-connector_status_unknown if the connection state isn't known.
-
-Drivers should only return connector_status_connected if the
-connection status has really been probed as connected. Connectors that
-can't detect the connection status, or failed connection status probes,
-should return connector_status_unknown.
 
 Cleanup
 -------
@@ -463,20 +260,8 @@ created for fetching EDID data and performing monitor detection. Once
 the process is complete, the new connector is registered with sysfs to
 make its properties available to applications.
 
-KMS API Functions
------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
-   :export:
-
-KMS Data Structures
--------------------
-
-.. kernel-doc:: include/drm/drm_crtc.h
-   :internal:
-
 KMS Locking
------------
+===========
 
 .. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
    :doc: kms locking
@@ -490,90 +275,38 @@ KMS Locking
 KMS Properties
 ==============
 
-Drivers may need to expose additional parameters to applications than
-those described in the previous sections. KMS supports attaching
-properties to CRTCs, connectors and planes and offers a userspace API to
-list, get and set the property values.
-
-Properties are identified by a name that uniquely defines the property
-purpose, and store an associated value. For all property types except
-blob properties the value is a 64-bit unsigned integer.
-
-KMS differentiates between properties and property instances. Drivers
-first create properties and then create and associate individual
-instances of those properties to objects. A property can be instantiated
-multiple times and associated with different objects. Values are stored
-in property instances, and all other property information are stored in
-the property and shared between all instances of the property.
-
-Every property is created with a type that influences how the KMS core
-handles the property. Supported property types are
-
-DRM_MODE_PROP_RANGE
-    Range properties report their minimum and maximum admissible values.
-    The KMS core verifies that values set by application fit in that
-    range.
-
-DRM_MODE_PROP_ENUM
-    Enumerated properties take a numerical value that ranges from 0 to
-    the number of enumerated values defined by the property minus one,
-    and associate a free-formed string name to each value. Applications
-    can retrieve the list of defined value-name pairs and use the
-    numerical value to get and set property instance values.
-
-DRM_MODE_PROP_BITMASK
-    Bitmask properties are enumeration properties that additionally
-    restrict all enumerated values to the 0..63 range. Bitmask property
-    instance values combine one or more of the enumerated bits defined
-    by the property.
-
-DRM_MODE_PROP_BLOB
-    Blob properties store a binary blob without any format restriction.
-    The binary blobs are created as KMS standalone objects, and blob
-    property instance values store the ID of their associated blob
-    object.
-
-    Blob properties are only used for the connector EDID property and
-    cannot be created by drivers.
-
-To create a property drivers call one of the following functions
-depending on the property type. All property creation functions take
-property flags and name, as well as type-specific arguments.
-
--  struct drm_property \*drm_property_create_range(struct
-   drm_device \*dev, int flags, const char \*name, uint64_t min,
-   uint64_t max);
-   Create a range property with the given minimum and maximum values.
-
--  struct drm_property \*drm_property_create_enum(struct drm_device
-   \*dev, int flags, const char \*name, const struct
-   drm_prop_enum_list \*props, int num_values);
-   Create an enumerated property. The ``props`` argument points to an
-   array of ``num_values`` value-name pairs.
-
--  struct drm_property \*drm_property_create_bitmask(struct
-   drm_device \*dev, int flags, const char \*name, const struct
-   drm_prop_enum_list \*props, int num_values);
-   Create a bitmask property. The ``props`` argument points to an array
-   of ``num_values`` value-name pairs.
-
-Properties can additionally be created as immutable, in which case they
-will be read-only for applications but can be modified by the driver. To
-create an immutable property drivers must set the
-DRM_MODE_PROP_IMMUTABLE flag at property creation time.
-
-When no array of value-name pairs is readily available at property
-creation time for enumerated or range properties, drivers can create the
-property using the :c:func:`drm_property_create()` function and
-manually add enumeration value-name pairs by calling the
-:c:func:`drm_property_add_enum()` function. Care must be taken to
-properly specify the property type through the ``flags`` argument.
-
-After creating properties drivers can attach property instances to CRTC,
-connector and plane objects by calling the
-:c:func:`drm_object_attach_property()`. The function takes a
-pointer to the target object, a pointer to the previously created
-property and an initial instance value.
+Property Types and Blob Property Support
+----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_property.c
+   :doc: overview
+
+.. kernel-doc:: include/drm/drm_property.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_property.c
+   :export:
+
+Plane Composition Properties
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+   :export:
+
+Color Management Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+   :doc: overview
+
+.. kernel-doc:: include/drm/drm_color_mgmt.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+   :export:
 
 Existing KMS Properties
 -----------------------
index 59f9822fecd0427e2212d27a89b53d6b16efe1f3..bca808535dfd85ff1810e9b4ce3d31d853a279fd 100644 (file)
@@ -26,12 +26,12 @@ TTM, but has no video RAM management capabilities and is thus limited to
 UMA devices.
 
 The Translation Table Manager (TTM)
------------------------------------
+===================================
 
 TTM design background and information belongs here.
 
 TTM initialization
-~~~~~~~~~~~~~~~~~~
+------------------
 
     **Warning**
 
@@ -77,7 +77,7 @@ object, ttm_global_item_ref() is used to create an initial reference
 count for the TTM, which will call your initialization function.
 
 The Graphics Execution Manager (GEM)
-------------------------------------
+====================================
 
 The GEM design approach has resulted in a memory manager that doesn't
 provide full coverage of all (or even all common) use cases in its
@@ -114,7 +114,7 @@ read & write, mapping, and domain ownership transfers are left to
 driver-specific ioctls.
 
 GEM Initialization
-~~~~~~~~~~~~~~~~~~
+------------------
 
 Drivers that use GEM must set the DRIVER_GEM bit in the struct
 :c:type:`struct drm_driver <drm_driver>` driver_features
@@ -132,7 +132,7 @@ typically not managed by GEM, and must be initialized separately into
 its own DRM MM object.
 
 GEM Objects Creation
-~~~~~~~~~~~~~~~~~~~~
+--------------------
 
 GEM splits creation of GEM objects and allocation of the memory that
 backs them in two distinct operations.
@@ -173,7 +173,7 @@ a call to :c:func:`drm_gem_private_object_init()` instead of
 must be managed by drivers.
 
 GEM Objects Lifetime
-~~~~~~~~~~~~~~~~~~~~
+--------------------
 
 All GEM objects are reference-counted by the GEM core. References can be
 acquired and release by :c:func:`calling
@@ -196,7 +196,7 @@ resources created by the GEM core, which need to be released with
 :c:func:`drm_gem_object_release()`.
 
 GEM Objects Naming
-~~~~~~~~~~~~~~~~~~
+------------------
 
 Communication between userspace and the kernel refers to GEM objects
 using local handles, global names or, more recently, file descriptors.
@@ -245,7 +245,7 @@ Furthermore PRIME also allows cross-device buffer sharing since it is
 based on dma-bufs.
 
 GEM Objects Mapping
-~~~~~~~~~~~~~~~~~~~
+-------------------
 
 Because mapping operations are fairly heavyweight GEM favours
 read/write-like access to buffers, implemented through driver-specific
@@ -304,7 +304,7 @@ Drivers that want to map the GEM object upfront instead of handling page
 faults can implement their own mmap file operation handler.
 
 Memory Coherency
-~~~~~~~~~~~~~~~~
+----------------
 
 When mapped to the device or used in a command buffer, backing pages for
 an object are flushed to memory and marked write combined so as to be
@@ -320,7 +320,7 @@ blocks the client and waits for rendering to complete before performing
 any necessary flushing operations).
 
 Command Execution
-~~~~~~~~~~~~~~~~~
+-----------------
 
 Perhaps the most important GEM function for GPU devices is providing a
 command execution interface to clients. Client programs construct
@@ -348,8 +348,20 @@ GEM Function Reference
 .. kernel-doc:: include/drm/drm_gem.h
    :internal:
 
+GEM CMA Helper Functions Reference
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+   :doc: cma helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_gem_cma_helper.h
+   :internal:
+
 VMA Offset Manager
-------------------
+==================
 
 .. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
    :doc: vma offset manager
@@ -361,14 +373,14 @@ VMA Offset Manager
    :internal:
 
 PRIME Buffer Sharing
---------------------
+====================
 
 PRIME is the cross device buffer sharing framework in drm, originally
 created for the OPTIMUS range of multi-gpu platforms. To userspace PRIME
 buffers are dma-buf based file descriptors.
 
 Overview and Driver Interface
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
 
 Similar to GEM global names, PRIME file descriptors are also used to
 share buffer objects across processes. They offer additional security:
@@ -406,7 +418,7 @@ struct drm_gem_object \*obj, int flags); struct drm_gem_object \*
 support PRIME.
 
 PRIME Helper Functions
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_prime.c
    :doc: PRIME Helpers
@@ -418,16 +430,16 @@ PRIME Function References
    :export:
 
 DRM MM Range Allocator
-----------------------
+======================
 
 Overview
-~~~~~~~~
+--------
 
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :doc: Overview
 
 LRU Scan/Eviction Support
-~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :doc: lru scan roaster
@@ -440,15 +452,3 @@ DRM MM Range Allocator Function References
 
 .. kernel-doc:: include/drm/drm_mm.h
    :internal:
-
-CMA Helper Functions Reference
-------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
-   :doc: cma helpers
-
-.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
-   :export:
-
-.. kernel-doc:: include/drm/drm_gem_cma_helper.h
-   :internal:
index 536bf3eaadd4f8e28fe53eede067c6b19f444608..1ba301cebe160c548325d96de1e70f8d8a67c919 100644 (file)
@@ -33,6 +33,76 @@ Primary Nodes, DRM Master and Authentication
 .. kernel-doc:: include/drm/drm_auth.h
    :internal:
 
+Open-Source Userspace Requirements
+==================================
+
+The DRM subsystem has stricter requirements than most other kernel subsystems on
+what the userspace side for new uAPI needs to look like. This section here
+explains what exactly those requirements are, and why they exist.
+
+The short summary is that any addition of DRM uAPI requires corresponding
+open-sourced userspace patches, and those patches must be reviewed and ready for
+merging into a suitable and canonical upstream project.
+
+GFX devices (both display and render/GPU side) are really complex bits of
+hardware, with userspace and kernel by necessity having to work together really
+closely.  The interfaces, for rendering and modesetting, must be extremely wide
+and flexible, and therefore it is almost always impossible to precisely define
+them for every possible corner case. This in turn makes it really practically
+infeasible to differentiate between behaviour that's required by userspace, and
+which must not be changed to avoid regressions, and behaviour which is only an
+accidental artifact of the current implementation.
+
+Without access to the full source code of all userspace users that means it
+becomes impossible to change the implementation details, since userspace could
+depend upon the accidental behaviour of the current implementation in minute
+details. And debugging such regressions without access to source code is pretty
+much impossible. As a consequence this means:
+
+- The Linux kernel's "no regression" policy holds in practice only for
+  open-source userspace of the DRM subsystem. DRM developers are perfectly fine
+  if closed-source blob drivers in userspace use the same uAPI as the open
+  drivers, but they must do so in the exact same way as the open drivers.
+  Creative (ab)use of the interfaces will, and in the past routinely has, lead
+  to breakage.
+
+- Any new userspace interface must have an open-source implementation as
+  demonstration vehicle.
+
+The other reason for requiring open-source userspace is uAPI review. Since the
+kernel and userspace parts of a GFX stack must work together so closely, code
+review can only assess whether a new interface achieves its goals by looking at
+both sides. Making sure that the interface indeed covers the use-case fully
+leads to a few additional requirements:
+
+- The open-source userspace must not be a toy/test application, but the real
+  thing. Specifically it needs to handle all the usual error and corner cases.
+  These are often the places where new uAPI falls apart and hence essential to
+  assess the fitness of a proposed interface.
+
+- The userspace side must be fully reviewed and tested to the standards of that
+  userspace project. For e.g. mesa this means piglit testcases and review on the
+  mailing list. This is again to ensure that the new interface actually gets the
+  job done.
+
+- The userspace patches must be against the canonical upstream, not some vendor
+  fork. This is to make sure that no one cheats on the review and testing
+  requirements by doing a quick fork.
+
+- The kernel patch can only be merged after all the above requirements are met,
+  but it **must** be merged **before** the userspace patches land. uAPI always flows
+  from the kernel, doing things the other way round risks divergence of the uAPI
+  definitions and header files.
+
+These are fairly steep requirements, but have grown out from years of shared
+pain and experience with uAPI added hastily, and almost always regretted about
+just as fast. GFX devices change really fast, requiring a paradigm shift and
+entire new set of uAPI interfaces every few years at least. Together with the
+Linux kernel's guarantee to keep existing userspace running for 10+ years this
+is already rather painful for the DRM subsystem, with multiple different uAPIs
+for the same thing co-existing. If we add a few more complete mistakes into the
+mix every year it would be entirely unmanageable.
+
 Render nodes
 ============
 
@@ -86,6 +156,43 @@ other hand, a driver requires shared state between clients which is
 visible to user-space and accessible beyond open-file boundaries, they
 cannot support render nodes.
 
+Validating changes with IGT
+===========================
+
+There's a collection of tests that aims to cover the whole functionality of
+DRM drivers and that can be used to check that changes to DRM drivers or the
+core don't regress existing functionality. This test suite is called IGT and
+its code can be found in https://cgit.freedesktop.org/drm/igt-gpu-tools/.
+
+To build IGT, start by installing its build dependencies. In Debian-based
+systems::
+
+       # apt-get build-dep intel-gpu-tools
+
+And in Fedora-based systems::
+
+       # dnf builddep intel-gpu-tools
+
+Then clone the repository::
+
+       $ git clone git://anongit.freedesktop.org/drm/igt-gpu-tools
+
+Configure the build system and start the build::
+
+       $ cd igt-gpu-tools && ./autogen.sh && make -j6
+
+Download the piglit dependency::
+
+       $ ./scripts/run-tests.sh -d
+
+And run the tests::
+
+       $ ./scripts/run-tests.sh -t kms -t core -s
+
+run-tests.sh is a wrapper around piglit that will execute the tests matching
+the -t options. A report in HTML format will be available in
+./results/html/index.html. Results can be compared with piglit.
+
 VBlank event handling
 =====================
 
index 2fe5952e90f1d8a2bc13d2c903c1dcf416fc4ec2..87aaffc22920288ebbe3e6db17f2848843416b33 100644 (file)
@@ -70,6 +70,9 @@ Frontbuffer Tracking
 .. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
    :doc: frontbuffer tracking
 
+.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.h
+   :internal:
+
 .. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
    :internal:
 
index fcac0fa72056cc298ff949d7cf0d03f531485fe8..ba92f45abb76e59f6c8324c3bd01db13b53003e9 100644 (file)
@@ -12,3 +12,4 @@ Linux GPU Driver Developer's Guide
    drm-uapi
    i915
    vga-switcheroo
+   vgaarbiter
index 4c5ce3edcfd9250978865c2a2b10f7d8538770df..981873a05d14845a334ef767e5de6673132dac09 100644 (file)
@@ -1,23 +1,10 @@
 Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,Description/Restrictions
-DRM,Generic,“rotation”,BITMASK,"{ 0, ""rotate-0"" }, { 1, ""rotate-90"" }, { 2, ""rotate-180"" }, { 3, ""rotate-270"" }, { 4, ""reflect-x"" }, { 5, ""reflect-y"" }","CRTC, Plane",rotate-(degrees) rotates the image by the specified amount in degrees in counter clockwise direction. reflect-x and reflect-y reflects the image along the specified axis prior to rotation
 ,,“scaling mode”,ENUM,"{ ""None"", ""Full"", ""Center"", ""Full aspect"" }",Connector,"Supported by: amdgpu, gma500, i915, nouveau and radeon."
 ,Connector,“EDID”,BLOB | IMMUTABLE,0,Connector,Contains id of edid blob ptr object.
 ,,“DPMS”,ENUM,"{ “On”, “Standby”, “Suspend”, “Off” }",Connector,Contains DPMS operation mode value.
 ,,“PATH”,BLOB | IMMUTABLE,0,Connector,Contains topology path to a connector.
 ,,“TILE”,BLOB | IMMUTABLE,0,Connector,Contains tiling information for a connector.
 ,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Connector,CRTC that connector is attached to (atomic)
-,Plane,“type”,ENUM | IMMUTABLE,"{ ""Overlay"", ""Primary"", ""Cursor"" }",Plane,Plane type
-,,“SRC_X”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source x coordinate in 16.16 fixed point (atomic)
-,,“SRC_Y”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source y coordinate in 16.16 fixed point (atomic)
-,,“SRC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source width in 16.16 fixed point (atomic)
-,,“SRC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source height in 16.16 fixed point (atomic)
-,,“CRTC_X”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) x coordinate (atomic)
-,,“CRTC_Y”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) y coordinate (atomic)
-,,“CRTC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) width (atomic)
-,,“CRTC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) height (atomic)
-,,“FB_ID”,OBJECT,DRM_MODE_OBJECT_FB,Plane,Scanout framebuffer (atomic)
-,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Plane,CRTC that plane is attached to (atomic)
-,,“zpos”,RANGE,"Min=0, Max=UINT_MAX","Plane,Z-order of the plane.Planes with higher Z-order values are displayed on top, planes with identical Z-order values are display in an undefined order"
 ,DVI-I,“subconnector”,ENUM,"{ “Unknown”, “DVI-D”, “DVI-A” }",Connector,TBD
 ,,“select subconnector”,ENUM,"{ “Automatic”, “DVI-D”, “DVI-A” }",Connector,TBD
 ,TV,“subconnector”,ENUM,"{ ""Unknown"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
@@ -36,12 +23,6 @@ DRM,Generic,“rotation”,BITMASK,"{ 0, ""rotate-0"" }, { 1, ""rotate-90"" }, {
 ,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
 ,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
 ,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
-,,“dirty”,ENUM | IMMUTABLE,"{ ""Off"", ""On"", ""Annotate"" }",Connector,TBD
-,,“DEGAMMA_LUT”,BLOB,0,CRTC,DRM property to set the degamma lookup table (LUT) mapping pixel data from the framebuffer before it is given to the transformation matrix. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
-,,“DEGAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the DEGAMMA_LUT property (the size depends on the underlying hardware).
-,,“CTM”,BLOB,0,CRTC,DRM property to set the current transformation matrix (CTM) apply to pixel data after the lookup through the degamma LUT and before the lookup through the gamma LUT. The data is an interpreted as a struct drm_color_ctm.
-,,“GAMMA_LUT”,BLOB,0,CRTC,DRM property to set the gamma lookup table (LUT) mapping pixel data after to the transformation matrix to data sent to the connector. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
-,,“GAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the GAMMA_LUT property (the size depends on the underlying hardware).
 i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
 ,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
 ,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
@@ -95,7 +76,6 @@ armada,CRTC,"""CSC_YUV""",ENUM,"{ ""Auto"" , ""CCIR601"", ""CCIR709"" }",CRTC,TB
 ,,"""contrast""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
 ,,"""saturation""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
 exynos,CRTC,“mode”,ENUM,"{ ""normal"", ""blank"" }",CRTC,TBD
-,Overlay,“zpos”,RANGE,"Min=0, Max=MAX_PLANE-1",Plane,TBD
 i2c/ch7006_drv,Generic,“scale”,RANGE,"Min=0, Max=2",Connector,TBD
 ,TV,“mode”,ENUM,"{ ""PAL"", ""PAL-M"",""PAL-N""}, ”PAL-Nc"" , ""PAL-60"", ""NTSC-M"", ""NTSC-J"" }",Connector,TBD
 nouveau,NV10 Overlay,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
@@ -126,4 +106,3 @@ radeon,DVI-I,“coherent”,RANGE,"Min=0, Max=1",Connector,TBD
 ,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD
 rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
 ,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
-,,"""zpos""",RANGE,"Min=1, Max=7",Plane,TBD
diff --git a/Documentation/gpu/vgaarbiter.rst b/Documentation/gpu/vgaarbiter.rst
new file mode 100644 (file)
index 0000000..0b41b05
--- /dev/null
@@ -0,0 +1,191 @@
+===========
+VGA Arbiter
+===========
+
+Graphic devices are accessed through ranges in I/O or memory space. While most
+modern devices allow relocation of such ranges, some "Legacy" VGA devices
+implemented on PCI will typically have the same "hard-decoded" addresses as
+they did on ISA. For more details see "PCI Bus Binding to IEEE Std 1275-1994
+Standard for Boot (Initialization Configuration) Firmware Revision 2.1"
+Section 7, Legacy Devices.
+
+The Resource Access Control (RAC) module inside the X server [0] existed for
+the legacy VGA arbitration task (besides other bus management tasks) when more
+than one legacy device co-exists on the same machine. But the problem happens
+when these devices are trying to be accessed by different userspace clients
+(e.g. two server in parallel). Their address assignments conflict. Moreover,
+ideally, being a userspace application, it is not the role of the X server to
+control bus resources. Therefore an arbitration scheme outside of the X server
+is needed to control the sharing of these resources. This document introduces
+the operation of the VGA arbiter implemented for the Linux kernel.
+
+vgaarb kernel/userspace ABI
+---------------------------
+
+The vgaarb is a module of the Linux Kernel. When it is initially loaded, it
+scans all PCI devices and adds the VGA ones inside the arbitration. The
+arbiter then enables/disables the decoding on different devices of the VGA
+legacy instructions. Devices which do not want/need to use the arbiter may
+explicitly tell it by calling vga_set_legacy_decoding().
+
+The kernel exports a char device interface (/dev/vga_arbiter) to the clients,
+which has the following semantics:
+
+open
+        Opens a user instance of the arbiter. By default, it's attached to the
+        default VGA device of the system.
+
+close
+        Close a user instance. Release locks made by the user
+
+read
+        Return a string indicating the status of the target like:
+
+        "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
+
+        An IO state string is of the form {io,mem,io+mem,none}, mc and
+        ic are respectively mem and io lock counts (for debugging/
+        diagnostic only). "decodes" indicate what the card currently
+        decodes, "owns" indicates what is currently enabled on it, and
+        "locks" indicates what is locked by this card. If the card is
+        unplugged, we get "invalid" then for card_ID and an -ENODEV
+        error is returned for any command until a new card is targeted.
+
+
+write
+        Write a command to the arbiter. List of commands:
+
+        target <card_ID>
+                switch target to card <card_ID> (see below)
+        lock <io_state>
+                acquires locks on target ("none" is an invalid io_state)
+        trylock <io_state>
+                non-blocking acquire locks on target (returns EBUSY if
+                unsuccessful)
+        unlock <io_state>
+                release locks on target
+        unlock all
+                release all locks on target held by this user (not implemented
+                yet)
+        decodes <io_state>
+                set the legacy decoding attributes for the card
+
+        poll
+                event if something changes on any card (not just the target)
+
+        card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
+        to go back to the system default card (TODO: not implemented yet). Currently,
+        only PCI is supported as a prefix, but the userland API may support other bus
+        types in the future, even if the current kernel implementation doesn't.
+
+Note about locks:
+
+The driver keeps track of which user has which locks on which card. It
+supports stacking, like the kernel one. This complexifies the implementation
+a bit, but makes the arbiter more tolerant to user space problems and able
+to properly cleanup in all cases when a process dies.
+Currently, a max of 16 cards can have locks simultaneously issued from
+user space for a given user (file descriptor instance) of the arbiter.
+
+In the case of devices hot-{un,}plugged, there is a hook - pci_notify() - to
+notify them being added/removed in the system and automatically added/removed
+in the arbiter.
+
+There is also an in-kernel API of the arbiter in case DRM, vgacon, or other
+drivers want to use it.
+
+In-kernel interface
+-------------------
+
+.. kernel-doc:: include/linux/vgaarb.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/vga/vgaarb.c
+   :export:
+
+libpciaccess
+------------
+
+To use the vga arbiter char device it was implemented an API inside the
+libpciaccess library. One field was added to struct pci_device (each device
+on the system)::
+
+    /* the type of resource decoded by the device */
+    int vgaarb_rsrc;
+
+Besides it, in pci_system were added::
+
+    int vgaarb_fd;
+    int vga_count;
+    struct pci_device *vga_target;
+    struct pci_device *vga_default_dev;
+
+The vga_count is used to track how many cards are being arbitrated, so for
+instance, if there is only one card, then it can completely escape arbitration.
+
+These functions below acquire VGA resources for the given card and mark those
+resources as locked. If the resources requested are "normal" (and not legacy)
+resources, the arbiter will first check whether the card is doing legacy
+decoding for that type of resource. If yes, the lock is "converted" into a
+legacy resource lock. The arbiter will first look for all VGA cards that
+might conflict and disable their IOs and/or Memory access, including VGA
+forwarding on P2P bridges if necessary, so that the requested resources can
+be used. Then, the card is marked as locking these resources and the IO and/or
+Memory access is enabled on the card (including VGA forwarding on parent
+P2P bridges if any). In the case of vga_arb_lock(), the function will block
+if some conflicting card is already locking one of the required resources (or
+any resource on a different bus segment, since P2P bridges don't differentiate
+VGA memory and IO afaik). If the card already owns the resources, the function
+succeeds.  vga_arb_trylock() will return (-EBUSY) instead of blocking. Nested
+calls are supported (a per-resource counter is maintained).
+
+Set the target device of this client. ::
+
+    int  pci_device_vgaarb_set_target   (struct pci_device *dev);
+
+For instance, in x86 if two devices on the same bus want to lock different
+resources, both will succeed (lock). If devices are in different buses and
+trying to lock different resources, only the first who tried succeeds. ::
+
+    int  pci_device_vgaarb_lock         (void);
+    int  pci_device_vgaarb_trylock      (void);
+
+Unlock resources of device. ::
+
+    int  pci_device_vgaarb_unlock       (void);
+
+Indicates to the arbiter if the card decodes legacy VGA IOs, legacy VGA
+Memory, both, or none. All cards default to both, the card driver (fbdev for
+example) should tell the arbiter if it has disabled legacy decoding, so the
+card can be left out of the arbitration process (and can be safe to take
+interrupts at any time. ::
+
+    int  pci_device_vgaarb_decodes      (int new_vgaarb_rsrc);
+
+Connects to the arbiter device, allocates the struct ::
+
+    int  pci_device_vgaarb_init         (void);
+
+Close the connection ::
+
+    void pci_device_vgaarb_fini         (void);
+
+xf86VGAArbiter (X server implementation)
+----------------------------------------
+
+X server basically wraps all the functions that touch VGA registers somehow.
+
+References
+----------
+
+Benjamin Herrenschmidt (IBM?) started this work when he discussed such design
+with the Xorg community in 2005 [1, 2]. In the end of 2007, Paulo Zanoni and
+Tiago Vignatti (both of C3SL/Federal University of Paraná) proceeded his work
+enhancing the kernel code to adapt as a kernel module and also did the
+implementation of the user space side [3]. Now (2009) Tiago Vignatti and Dave
+Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree.
+
+0) http://cgit.freedesktop.org/xorg/xserver/commit/?id=4b42448a2388d40f257774fbffdccaea87bd0347
+1) http://lists.freedesktop.org/archives/xorg/2005-March/006663.html
+2) http://lists.freedesktop.org/archives/xorg/2005-March/006745.html
+3) http://lists.freedesktop.org/archives/xorg/2007-October/029507.html
index 2a1bf69c6a26fc3c416755f192f3f8778023ceee..8c10a916de20d064a34fb43b121309b190eab98c 100644 (file)
@@ -19,5 +19,5 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
 implemented in this driver.
 
 Specification of the chip can be found here:
-ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
-ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
+ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
+ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
index 80807adb8ded52c6ad0482b0abab86e79921a8e5..7e2a228f21bccd8ab12cec93bda73831f66827cb 100644 (file)
@@ -145,6 +145,11 @@ If you want to add slave support to the bus driver:
 
 * Catch the slave interrupts and send appropriate i2c_slave_events to the backend.
 
+Note that most hardware supports being master _and_ slave on the same bus. So,
+if you extend a bus driver, please make sure that the driver supports that as
+well. In almost all cases, slave support does not need to disable the master
+functionality.
+
 Check the i2c-rcar driver as an example.
 
 
index c4eb5049da390767753032acfc583ac282bd0f0b..391decc66a18fd3da282cc009d6f239441de4a8d 100644 (file)
@@ -366,8 +366,6 @@ Domain`_ references.
 Cross-referencing from reStructuredText
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. highlight:: none
-
 To cross-reference the functions and types defined in the kernel-doc comments
 from reStructuredText documents, please use the `Sphinx C Domain`_
 references. For example::
@@ -390,8 +388,6 @@ For further details, please refer to the `Sphinx C Domain`_ documentation.
 Function documentation
 ----------------------
 
-.. highlight:: c
-
 The general format of a function and function-like macro kernel-doc comment is::
 
   /**
@@ -572,8 +568,6 @@ DocBook XML [DEPRECATED]
 Converting DocBook to Sphinx
 ----------------------------
 
-.. highlight:: none
-
 Over time, we expect all of the documents under ``Documentation/DocBook`` to be
 converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
 enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
index 46c030a49186faabaa588afd2d284975c72c2017..a4f4d693e2c1287d533c0bc05d9f292f9b529720 100644 (file)
@@ -3032,6 +3032,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                PAGE_SIZE is used as alignment.
                                PCI-PCI bridge can be specified, if resource
                                windows need to be expanded.
+                               To specify the alignment for several
+                               instances of a device, the PCI vendor,
+                               device, subvendor, and subdevice may be
+                               specified, e.g., 4096@pci:8086:9c22:103c:198f
                ecrc=           Enable/disable PCIe ECRC (transaction layer
                                end-to-end CRC checking).
                                bios: Use BIOS/firmware settings. This is the
index 04ee90099676b940608a499e84aaf38d571c0d48..201d4839931cc085124a50636d509389d72715aa 100644 (file)
@@ -144,7 +144,7 @@ logical address types are already defined will return with error ``EBUSY``.
 
        -  ``flags``
 
-       -  Flags. No flags are defined yet, so set this to 0.
+       -  Flags. See :ref:`cec-log-addrs-flags` for a list of available flags.
 
     -  .. row 7
 
@@ -201,6 +201,25 @@ logical address types are already defined will return with error ``EBUSY``.
           give the CEC framework more information about the device type, even
           though the framework won't use it directly in the CEC message.
 
+.. _cec-log-addrs-flags:
+
+.. flat-table:: Flags for struct cec_log_addrs
+    :header-rows:  0
+    :stub-columns: 0
+    :widths:       3 1 4
+
+
+    -  .. _`CEC-LOG-ADDRS-FL-ALLOW-UNREG-FALLBACK`:
+
+       -  ``CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK``
+
+       -  1
+
+       -  By default if no logical address of the requested type can be claimed, then
+         it will go back to the unconfigured state. If this flag is set, then it will
+         fallback to the Unregistered logical address. Note that if the Unregistered
+         logical address was explicitly requested, then this flag has no effect.
+
 .. _cec-versions:
 
 .. flat-table:: CEC Versions
index 7a6d6d00ce194572f8053008397daa0395ee05dc..2e1e739283960d77688da88a3a6ac791675e1759 100644 (file)
@@ -64,7 +64,8 @@ it is guaranteed that the state did change in between the two events.
 
        -  ``phys_addr``
 
-       -  The current physical address.
+       -  The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
+          valid physical address is set.
 
     -  .. row 2
 
@@ -72,7 +73,10 @@ it is guaranteed that the state did change in between the two events.
 
        -  ``log_addr_mask``
 
-       -  The current set of claimed logical addresses.
+       -  The current set of claimed logical addresses. This is 0 if no logical
+          addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
+         If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
+         has the unregistered logical address. In that case all other bits are 0.
 
 
 
index 9d05ed7f7da51648cafeee84a2a21d9711c19dde..f20c884c048afe16409a566bb36c589b39864209 100644 (file)
@@ -587,26 +587,6 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device.
 TODO
 ====
 
-The platform device problem
----------------------------
-DSA is currently implemented as a platform device driver which is far from ideal
-as was discussed in this thread:
-
-http://permalink.gmane.org/gmane.linux.network/329848
-
-This basically prevents the device driver model to be properly used and applied,
-and support non-MDIO, non-MMIO Ethernet connected switches.
-
-Another problem with the platform device driver approach is that it prevents the
-use of a modular switch drivers build due to a circular dependency, illustrated
-here:
-
-http://comments.gmane.org/gmane.linux.network/345803
-
-Attempts of reworking this has been done here:
-
-https://lwn.net/Articles/643149/
-
 Making SWITCHDEV and DSA converge towards an unified codebase
 -------------------------------------------------------------
 
index 16a924c486bf3adb856efc2a2bec72b90311f2ff..70c926ae212d397c8543cbc9da56ad815fe62da5 100644 (file)
@@ -790,13 +790,12 @@ The kernel interface functions are as follows:
      Data messages can have their contents extracted with the usual bunch of
      socket buffer manipulation functions.  A data message can be determined to
      be the last one in a sequence with rxrpc_kernel_is_data_last().  When a
-     data message has been used up, rxrpc_kernel_data_delivered() should be
-     called on it..
+     data message has been used up, rxrpc_kernel_data_consumed() should be
+     called on it.
 
-     Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
-     of.  It is possible to get extra refs on all types of message for later
-     freeing, but this may pin the state of a call until the message is finally
-     freed.
+     Messages should be handled to rxrpc_kernel_free_skb() to dispose of.  It
+     is possible to get extra refs on all types of message for later freeing,
+     but this may pin the state of a call until the message is finally freed.
 
  (*) Accept an incoming call.
 
@@ -821,12 +820,14 @@ The kernel interface functions are as follows:
      Other errors may be returned if the call had been aborted (-ECONNABORTED)
      or had timed out (-ETIME).
 
- (*) Record the delivery of a data message and free it.
+ (*) Record the delivery of a data message.
 
-       void rxrpc_kernel_data_delivered(struct sk_buff *skb);
+       void rxrpc_kernel_data_consumed(struct rxrpc_call *call,
+                                       struct sk_buff *skb);
 
-     This is used to record a data message as having been delivered and to
-     update the ACK state for the call.  The socket buffer will be freed.
+     This is used to record a data message as having been consumed and to
+     update the ACK state for the call.  The message must still be passed to
+     rxrpc_kernel_free_skb() for disposal by the caller.
 
  (*) Free a message.
 
index b96098ccfe69208cf7491c1c4885563310398bf5..708f87f78a756aaa923aedcb11103a96bf5611e7 100644 (file)
@@ -164,7 +164,32 @@ load n/2 modules more and try again.
 Again, if you find the offending module(s), it(they) must be unloaded every time
 before hibernation, and please report the problem with it(them).
 
-c) Advanced debugging
+c) Using the "test_resume" hibernation option
+
+/sys/power/disk generally tells the kernel what to do after creating a
+hibernation image.  One of the available options is "test_resume" which
+causes the just created image to be used for immediate restoration.  Namely,
+after doing:
+
+# echo test_resume > /sys/power/disk
+# echo disk > /sys/power/state
+
+a hibernation image will be created and a resume from it will be triggered
+immediately without involving the platform firmware in any way.
+
+That test can be used to check if failures to resume from hibernation are
+related to bad interactions with the platform firmware.  That is, if the above
+works every time, but resume from actual hibernation does not work or is
+unreliable, the platform firmware may be responsible for the failures.
+
+On architectures and platforms that support using different kernels to restore
+hibernation images (that is, the kernel used to read the image from storage and
+load it into memory is different from the one included in the image) or support
+kernel address space randomization, it also can be used to check if failures
+to resume may be related to the differences between the restore and image
+kernels.
+
+d) Advanced debugging
 
 In case that hibernation does not work on your system even in the minimal
 configuration and compiling more drivers as modules is not practical or some
index f1f0f59a7c47d594a9753207d713d0834a0e99b1..974916ff6608e7b55f390d69e64a8fbe7396a9e9 100644 (file)
@@ -1,75 +1,76 @@
-Power Management Interface
-
-
-The power management subsystem provides a unified sysfs interface to 
-userspace, regardless of what architecture or platform one is
-running. The interface exists in /sys/power/ directory (assuming sysfs
-is mounted at /sys). 
-
-/sys/power/state controls system power state. Reading from this file
-returns what states are supported, which is hard-coded to 'freeze',
-'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
-(Suspend-to-Disk). 
-
-Writing to this file one of those strings causes the system to
-transition into that state. Please see the file
-Documentation/power/states.txt for a description of each of those
-states.
-
-
-/sys/power/disk controls the operating mode of the suspend-to-disk
-mechanism. Suspend-to-disk can be handled in several ways. We have a
-few options for putting the system to sleep - using the platform driver
-(e.g. ACPI or other suspend_ops), powering off the system or rebooting the
-system (for testing).
-
-Additionally, /sys/power/disk can be used to turn on one of the two testing
-modes of the suspend-to-disk mechanism: 'testproc' or 'test'.  If the
-suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to
-/sys/power/state will cause the kernel to disable nonboot CPUs and freeze
-tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs.  If it is
-in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel
-to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait
-for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs.  Then,
-we are able to look in the log messages and work out, for example, which code
-is being slow and which device drivers are misbehaving.
-
-Reading from this file will display all supported modes and the currently
-selected one in brackets, for example
-
-       [shutdown] reboot test testproc
-
-Writing to this file will accept one of
-
-       'platform' (only if the platform supports it)
-       'shutdown'
-       'reboot'
-       'testproc'
-       'test'
-
-/sys/power/image_size controls the size of the image created by
-the suspend-to-disk mechanism.  It can be written a string
-representing a non-negative integer that will be used as an upper
-limit of the image size, in bytes.  The suspend-to-disk mechanism will
-do its best to ensure the image size will not exceed that number.  However,
-if this turns out to be impossible, it will try to suspend anyway using the
-smallest image possible.  In particular, if "0" is written to this file, the
-suspend image will be as small as possible.
-
-Reading from this file will display the current image size limit, which
-is set to 2/5 of available RAM by default.
-
-/sys/power/pm_trace controls the code which saves the last PM event point in
-the RTC across reboots, so that you can debug a machine that just hangs
-during suspend (or more commonly, during resume).  Namely, the RTC is only
-used to save the last PM event point if this file contains '1'.  Initially it
-contains '0' which may be changed to '1' by writing a string representing a
-nonzero integer into it.
-
-To use this debugging feature you should attempt to suspend the machine, then
-reboot it and run
-
-       dmesg -s 1000000 | grep 'hash matches'
-
-CAUTION: Using it will cause your machine's real-time (CMOS) clock to be
-set to a random invalid time after a resume.
+Power Management Interface for System Sleep
+
+Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+The power management subsystem provides userspace with a unified sysfs interface
+for system sleep regardless of the underlying system architecture or platform.
+The interface is located in the /sys/power/ directory (assuming that sysfs is
+mounted at /sys).
+
+/sys/power/state is the system sleep state control file.
+
+Reading from it returns a list of supported sleep states, encoded as:
+
+'freeze' (Suspend-to-Idle)
+'standby' (Power-On Suspend)
+'mem' (Suspend-to-RAM)
+'disk' (Suspend-to-Disk)
+
+Suspend-to-Idle is always supported.  Suspend-to-Disk is always supported
+too as long the kernel has been configured to support hibernation at all
+(ie. CONFIG_HIBERNATION is set in the kernel configuration file).  Support
+for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the
+platform.
+
+If one of the strings listed in /sys/power/state is written to it, the system
+will attempt to transition into the corresponding sleep state.  Refer to
+Documentation/power/states.txt for a description of each of those states.
+
+/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
+Specifically, it tells the kernel what to do after creating a hibernation image.
+
+Reading from it returns a list of supported options encoded as:
+
+'platform' (put the system into sleep using a platform-provided method)
+'shutdown' (shut the system down)
+'reboot' (reboot the system)
+'suspend' (trigger a Suspend-to-RAM transition)
+'test_resume' (resume-after-hibernation test mode)
+
+The currently selected option is printed in square brackets.
+
+The 'platform' option is only available if the platform provides a special
+mechanism to put the system to sleep after creating a hibernation image (ACPI
+does that, for example).  The 'suspend' option is available if Suspend-to-RAM
+is supported.  Refer to Documentation/power/basic_pm_debugging.txt for the
+description of the 'test_resume' option.
+
+To select an option, write the string representing it to /sys/power/disk.
+
+/sys/power/image_size controls the size of hibernation images.
+
+It can be written a string representing a non-negative integer that will be
+used as a best-effort upper limit of the image size, in bytes.  The hibernation
+core will do its best to ensure that the image size will not exceed that number.
+However, if that turns out to be impossible to achieve, a hibernation image will
+still be created and its size will be as small as possible.  In particular,
+writing '0' to this file will enforce hibernation images to be as small as
+possible.
+
+Reading from this file returns the current image size limit, which is set to
+around 2/5 of available RAM by default.
+
+/sys/power/pm_trace controls the PM trace mechanism saving the last suspend
+or resume event point in the RTC across reboots.
+
+It helps to debug hard lockups or reboots due to device driver failures that
+occur during system suspend or resume (which is more common) more effectively.
+
+If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume
+event point in turn will be stored in the RTC memory (overwriting the actual
+RTC information), so it will survive a system crash if one occurs right after
+storing it and it can be used later to identify the driver that caused the crash
+to happen (see Documentation/power/s2ram.txt for more information).
+
+Initially it contains '0' which may be changed to '1' by writing a string
+representing a nonzero integer into it.
index ba0a2a4a54ba1ffcb484786381b91f5113a62ad6..e32fdbb4c9a7a284976d624037df1ff4c655958c 100644 (file)
@@ -167,6 +167,8 @@ signal will be rolled back anyway.
 For signals taken in non-TM or suspended mode, we use the
 normal/non-checkpointed stack pointer.
 
+Any transaction initiated inside a sighandler and suspended on return
+from the sighandler to the kernel will get reclaimed and discarded.
 
 Failure cause codes used by kernel
 ==================================
index 6e491a6624618cd30cb67e717fd60fed50f2eaa7..a53f786ee2e9ccf7de7d48a8b5d4e5e8b09d00e5 100644 (file)
@@ -80,6 +80,10 @@ functionality of their platform when planning to use this driver:
 
 III. Module parameters
 
+- 'dma_timeout' - DMA transfer completion timeout (in msec, default value 3000).
+        This parameter set a maximum completion wait time for SYNC mode DMA
+        transfer requests and for RIO_WAIT_FOR_ASYNC ioctl requests.
+
 - 'dbg_level' - This parameter allows to control amount of debug information
         generated by this device driver. This parameter is formed by set of
         bit masks that correspond to the specific functional blocks.
index 3a2ac4bcfd789bc704987b650da6694089a91dca..e88461c4c1e69dd186a7b420215246154f9a9f09 100644 (file)
     caption a.headerlink { opacity: 0; }
     caption a.headerlink:hover { opacity: 1; }
 
-    /* inline literal: drop the borderbox and red color */
+    /* inline literal: drop the borderbox, padding and red color */
 
     code, .rst-content tt, .rst-content code {
         color: inherit;
         border: none;
+        padding: unset;
         background: inherit;
         font-size: 85%;
     }
index e8e2ebafe5fa0ea350af12b5103788e3d04de268..b63a68531afdedcab0944f9cd9b40742c9deac98 100644 (file)
@@ -64,6 +64,20 @@ The sync_file fd now can be sent to userspace.
 If the creation process fail, or the sync_file needs to be released by any
 other reason fput(sync_file->file) should be used.
 
+Receiving Sync Files from Userspace
+-----------------------------------
+
+When userspace needs to send an in-fence to the driver it passes file descriptor
+of the Sync File to the kernel. The kernel can then retrieve the fences
+from it.
+
+Interface:
+       struct fence *sync_file_get_fence(int fd);
+
+
+The returned reference is owned by the caller and must be disposed of
+afterwards using fence_put(). In case of error, a NULL is returned instead.
+
 References:
 [1] struct sync_file in include/linux/sync_file.h
 [2] All interfaces mentioned above defined in include/linux/sync_file.h
diff --git a/Documentation/vgaarbiter.txt b/Documentation/vgaarbiter.txt
deleted file mode 100644 (file)
index 014423e..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-
-VGA Arbiter
-===========
-
-Graphic devices are accessed through ranges in I/O or memory space. While most
-modern devices allow relocation of such ranges, some "Legacy" VGA devices
-implemented on PCI will typically have the same "hard-decoded" addresses as
-they did on ISA. For more details see "PCI Bus Binding to IEEE Std 1275-1994
-Standard for Boot (Initialization Configuration) Firmware Revision 2.1"
-Section 7, Legacy Devices.
-
-The Resource Access Control (RAC) module inside the X server [0] existed for
-the legacy VGA arbitration task (besides other bus management tasks) when more
-than one legacy device co-exists on the same machine. But the problem happens
-when these devices are trying to be accessed by different userspace clients
-(e.g. two server in parallel). Their address assignments conflict. Moreover,
-ideally, being a userspace application, it is not the role of the X server to
-control bus resources. Therefore an arbitration scheme outside of the X server
-is needed to control the sharing of these resources. This document introduces
-the operation of the VGA arbiter implemented for the Linux kernel.
-
-----------------------------------------------------------------------------
-
-I.  Details and Theory of Operation
-        I.1 vgaarb
-        I.2 libpciaccess
-        I.3 xf86VGAArbiter (X server implementation)
-II. Credits
-III.References
-
-
-I. Details and Theory of Operation
-==================================
-
-I.1 vgaarb
-----------
-
-The vgaarb is a module of the Linux Kernel. When it is initially loaded, it
-scans all PCI devices and adds the VGA ones inside the arbitration. The
-arbiter then enables/disables the decoding on different devices of the VGA
-legacy instructions. Devices which do not want/need to use the arbiter may
-explicitly tell it by calling vga_set_legacy_decoding().
-
-The kernel exports a char device interface (/dev/vga_arbiter) to the clients,
-which has the following semantics:
-
- open       : open user instance of the arbiter. By default, it's attached to
-              the default VGA device of the system.
-
- close      : close user instance. Release locks made by the user
-
- read       : return a string indicating the status of the target like:
-
-              "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
-
-              An IO state string is of the form {io,mem,io+mem,none}, mc and
-              ic are respectively mem and io lock counts (for debugging/
-              diagnostic only). "decodes" indicate what the card currently
-              decodes, "owns" indicates what is currently enabled on it, and
-              "locks" indicates what is locked by this card. If the card is
-              unplugged, we get "invalid" then for card_ID and an -ENODEV
-              error is returned for any command until a new card is targeted.
-
-
- write       : write a command to the arbiter. List of commands:
-
-  target <card_ID>   : switch target to card <card_ID> (see below)
-  lock <io_state>    : acquires locks on target ("none" is an invalid io_state)
-  trylock <io_state> : non-blocking acquire locks on target (returns EBUSY if
-                       unsuccessful)
-  unlock <io_state>  : release locks on target
-  unlock all         : release all locks on target held by this user (not
-                       implemented yet)
-  decodes <io_state> : set the legacy decoding attributes for the card
-
-  poll               : event if something changes on any card (not just the
-                       target)
-
-  card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
-  to go back to the system default card (TODO: not implemented yet). Currently,
-  only PCI is supported as a prefix, but the userland API may support other bus
-  types in the future, even if the current kernel implementation doesn't.
-
-Note about locks:
-
-The driver keeps track of which user has which locks on which card. It
-supports stacking, like the kernel one. This complexifies the implementation
-a bit, but makes the arbiter more tolerant to user space problems and able
-to properly cleanup in all cases when a process dies.
-Currently, a max of 16 cards can have locks simultaneously issued from
-user space for a given user (file descriptor instance) of the arbiter.
-
-In the case of devices hot-{un,}plugged, there is a hook - pci_notify() - to
-notify them being added/removed in the system and automatically added/removed
-in the arbiter.
-
-There is also an in-kernel API of the arbiter in case DRM, vgacon, or other
-drivers want to use it.
-
-
-I.2 libpciaccess
-----------------
-
-To use the vga arbiter char device it was implemented an API inside the
-libpciaccess library. One field was added to struct pci_device (each device
-on the system):
-
-    /* the type of resource decoded by the device */
-    int vgaarb_rsrc;
-
-Besides it, in pci_system were added:
-
-    int vgaarb_fd;
-    int vga_count;
-    struct pci_device *vga_target;
-    struct pci_device *vga_default_dev;
-
-
-The vga_count is used to track how many cards are being arbitrated, so for
-instance, if there is only one card, then it can completely escape arbitration.
-
-
-These functions below acquire VGA resources for the given card and mark those
-resources as locked. If the resources requested are "normal" (and not legacy)
-resources, the arbiter will first check whether the card is doing legacy
-decoding for that type of resource. If yes, the lock is "converted" into a
-legacy resource lock. The arbiter will first look for all VGA cards that
-might conflict and disable their IOs and/or Memory access, including VGA
-forwarding on P2P bridges if necessary, so that the requested resources can
-be used. Then, the card is marked as locking these resources and the IO and/or
-Memory access is enabled on the card (including VGA forwarding on parent
-P2P bridges if any). In the case of vga_arb_lock(), the function will block
-if some conflicting card is already locking one of the required resources (or
-any resource on a different bus segment, since P2P bridges don't differentiate
-VGA memory and IO afaik). If the card already owns the resources, the function
-succeeds.  vga_arb_trylock() will return (-EBUSY) instead of blocking. Nested
-calls are supported (a per-resource counter is maintained).
-
-
-Set the target device of this client.
-    int  pci_device_vgaarb_set_target   (struct pci_device *dev);
-
-
-For instance, in x86 if two devices on the same bus want to lock different
-resources, both will succeed (lock). If devices are in different buses and
-trying to lock different resources, only the first who tried succeeds.
-    int  pci_device_vgaarb_lock         (void);
-    int  pci_device_vgaarb_trylock      (void);
-
-Unlock resources of device.
-    int  pci_device_vgaarb_unlock       (void);
-
-Indicates to the arbiter if the card decodes legacy VGA IOs, legacy VGA
-Memory, both, or none. All cards default to both, the card driver (fbdev for
-example) should tell the arbiter if it has disabled legacy decoding, so the
-card can be left out of the arbitration process (and can be safe to take
-interrupts at any time.
-    int  pci_device_vgaarb_decodes      (int new_vgaarb_rsrc);
-
-Connects to the arbiter device, allocates the struct
-    int  pci_device_vgaarb_init         (void);
-
-Close the connection
-    void pci_device_vgaarb_fini         (void);
-
-
-I.3 xf86VGAArbiter (X server implementation)
---------------------------------------------
-
-(TODO)
-
-X server basically wraps all the functions that touch VGA registers somehow.
-
-
-II. Credits
-===========
-
-Benjamin Herrenschmidt (IBM?) started this work when he discussed such design
-with the Xorg community in 2005 [1, 2]. In the end of 2007, Paulo Zanoni and
-Tiago Vignatti (both of C3SL/Federal University of Paraná) proceeded his work
-enhancing the kernel code to adapt as a kernel module and also did the
-implementation of the user space side [3]. Now (2009) Tiago Vignatti and Dave
-Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree.
-
-
-III. References
-==============
-
-[0] http://cgit.freedesktop.org/xorg/xserver/commit/?id=4b42448a2388d40f257774fbffdccaea87bd0347
-[1] http://lists.freedesktop.org/archives/xorg/2005-March/006663.html
-[2] http://lists.freedesktop.org/archives/xorg/2005-March/006745.html
-[3] http://lists.freedesktop.org/archives/xorg/2007-October/029507.html
index 20bb1d00098c70dacad7a9c778087f9319b0c5c6..703fcb51b7826ceef41f6d8fc8e9a46b8046a419 100644 (file)
@@ -798,6 +798,7 @@ M:  Laura Abbott <labbott@redhat.com>
 M:     Sumit Semwal <sumit.semwal@linaro.org>
 L:     devel@driverdev.osuosl.org
 S:     Supported
+F:     Documentation/devicetree/bindings/staging/ion/
 F:     drivers/staging/android/ion
 F:     drivers/staging/android/uapi/ion.h
 F:     drivers/staging/android/uapi/ion_test.h
@@ -881,6 +882,15 @@ S: Supported
 F:     drivers/gpu/drm/arc/
 F:     Documentation/devicetree/bindings/display/snps,arcpgu.txt
 
+ARM ARCHITECTED TIMER DRIVER
+M:     Mark Rutland <mark.rutland@arm.com>
+M:     Marc Zyngier <marc.zyngier@arm.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/include/asm/arch_timer.h
+F:     arch/arm64/include/asm/arch_timer.h
+F:     drivers/clocksource/arm_arch_timer.c
+
 ARM HDLCD DRM DRIVER
 M:     Liviu Dudau <liviu.dudau@arm.com>
 S:     Supported
@@ -1004,6 +1014,7 @@ N:        meson
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 M:     Tsahee Zidenberg <tsahee@annapurnalabs.com>
 M:     Antoine Tenart <antoine.tenart@free-electrons.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-alpine/
 F:     arch/arm/boot/dts/alpine*
@@ -1613,7 +1624,8 @@ N:        rockchip
 
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
@@ -1633,7 +1645,6 @@ F:        drivers/*/*s3c64xx*
 F:     drivers/*/*s5pv210*
 F:     drivers/memory/samsung/*
 F:     drivers/soc/samsung/*
-F:     drivers/spi/spi-s3c*
 F:     Documentation/arm/Samsung/
 F:     Documentation/devicetree/bindings/arm/samsung/
 F:     Documentation/devicetree/bindings/sram/samsung-sram.txt
@@ -1821,6 +1832,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 ARM/UNIPHIER ARCHITECTURE
 M:     Masahiro Yamada <yamada.masahiro@socionext.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
 S:     Maintained
 F:     arch/arm/boot/dts/uniphier*
 F:     arch/arm/include/asm/hardware/cache-uniphier.h
@@ -2474,7 +2486,7 @@ F:        include/net/bluetooth/
 BONDING DRIVER
 M:     Jay Vosburgh <j.vosburgh@gmail.com>
 M:     Veaceslav Falico <vfalico@gmail.com>
-M:     Andy Gospodarek <gospo@cumulusnetworks.com>
+M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
@@ -2489,7 +2501,7 @@ S:        Supported
 F:     kernel/bpf/
 
 BROADCOM B44 10/100 ETHERNET DRIVER
-M:     Gary Zambrano <zambrano@broadcom.com>
+M:     Michael Chan <michael.chan@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/b44.*
@@ -3237,7 +3249,7 @@ F:        kernel/cpuset.c
 CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 M:     Johannes Weiner <hannes@cmpxchg.org>
 M:     Michal Hocko <mhocko@kernel.org>
-M:     Vladimir Davydov <vdavydov@virtuozzo.com>
+M:     Vladimir Davydov <vdavydov.dev@gmail.com>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
 S:     Maintained
@@ -3258,7 +3270,7 @@ S:        Maintained
 F:     drivers/net/wan/cosa*
 
 CPMAC ETHERNET DRIVER
-M:     Florian Fainelli <florian@openwrt.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/ti/cpmac.c
@@ -4063,6 +4075,14 @@ S:       Orphan / Obsolete
 F:     drivers/gpu/drm/i810/
 F:     include/uapi/drm/i810_drm.h
 
+DRM DRIVERS FOR MEDIATEK
+M:     CK Hu <ck.hu@mediatek.com>
+M:     Philipp Zabel <p.zabel@pengutronix.de>
+L:     dri-devel@lists.freedesktop.org
+S:     Supported
+F:     drivers/gpu/drm/mediatek/
+F:     Documentation/devicetree/bindings/display/mediatek/
+
 DRM DRIVER FOR MSM ADRENO GPU
 M:     Rob Clark <robdclark@gmail.com>
 L:     linux-arm-msm@vger.kernel.org
@@ -4524,6 +4544,12 @@ L:       linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/sb_edac.c
 
+EDAC-SKYLAKE
+M:     Tony Luck <tony.luck@intel.com>
+L:     linux-edac@vger.kernel.org
+S:     Maintained
+F:     drivers/edac/skx_edac.c
+
 EDAC-XGENE
 APPLIED MICRO (APM) X-GENE SOC EDAC
 M:     Loc Ho <lho@apm.com>
@@ -6085,7 +6111,7 @@ S:        Supported
 F:     drivers/cpufreq/intel_pstate.c
 
 INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
-M:     Maik Broemme <mbroemme@plusserver.de>
+M:     Maik Broemme <mbroemme@libmpq.org>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/fb/intelfb.txt
@@ -7448,7 +7474,8 @@ F:        Documentation/devicetree/bindings/sound/max9860.txt
 F:     sound/soc/codecs/max9860.*
 
 MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     drivers/power/max14577_charger.c
@@ -7464,7 +7491,8 @@ F:        include/dt-bindings/*/*max77802.h
 
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     drivers/*/max14577*.c
@@ -7654,7 +7682,7 @@ L:        linux-rdma@vger.kernel.org
 S:     Supported
 W:     https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
-F:     drivers/infiniband/hw/rxe/
+F:     drivers/infiniband/sw/rxe/
 F:     include/uapi/rdma/rdma_user_rxe.h
 
 MEMBARRIER SUPPORT
@@ -8141,6 +8169,15 @@ S:       Maintained
 W:     https://fedorahosted.org/dropwatch/
 F:     net/core/drop_monitor.c
 
+NETWORKING [DSA]
+M:     Andrew Lunn <andrew@lunn.ch>
+M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M:     Florian Fainelli <f.fainelli@gmail.com>
+S:     Maintained
+F:     net/dsa/
+F:     include/net/dsa.h
+F:     drivers/net/dsa/
+
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
 L:     netdev@vger.kernel.org
@@ -9230,7 +9267,7 @@ F:        drivers/pinctrl/sh-pfc/
 
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -10163,7 +10200,7 @@ S:      Maintained
 F:     drivers/platform/x86/samsung-laptop.c
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sangbeom Kim <sbkim73@samsung.com>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -10178,7 +10215,8 @@ F:      drivers/video/fbdev/s3c-fb.c
 
 SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
 M:     Sangbeom Kim <sbkim73@samsung.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Supported
@@ -10237,6 +10275,17 @@ S:     Supported
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 F:     drivers/clk/samsung/
 
+SAMSUNG SPI DRIVERS
+M:     Kukjin Kim <kgene@kernel.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Andi Shyti <andi.shyti@samsung.com>
+L:     linux-spi@vger.kernel.org
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/devicetree/bindings/spi/spi-samsung.txt
+F:     drivers/spi/spi-s3c*
+F:     include/linux/platform_data/spi-s3c64xx.h
+
 SAMSUNG SXGBE DRIVERS
 M:     Byungho An <bh74.an@samsung.com>
 M:     Girish K S <ks.giri@samsung.com>
@@ -11216,12 +11265,8 @@ S:     Odd Fixes
 F:     drivers/staging/vt665?/
 
 STAGING - WILC1000 WIFI DRIVER
-M:     Johnny Kim <johnny.kim@atmel.com>
-M:     Austin Shin <austin.shin@atmel.com>
-M:     Chris Park <chris.park@atmel.com>
-M:     Tony Cho <tony.cho@atmel.com>
-M:     Glen Lee <glen.lee@atmel.com>
-M:     Leo Kim <leo.kim@atmel.com>
+M:     Aditya Shankar <aditya.shankar@microchip.com>
+M:     Ganesh Krishna <ganesh.krishna@microchip.com>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/staging/wilc1000/
@@ -12541,7 +12586,7 @@ F:      include/linux/if_*vlan.h
 F:     net/8021q/
 
 VLYNQ BUS
-M:     Florian Fainelli <florian@openwrt.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     openwrt-devel@lists.openwrt.org (subscribers-only)
 S:     Maintained
 F:     drivers/vlynq/vlynq.c
index 70de1448c5717b6914bc6626fe1663e4d52c1e95..ce2ddb3fec9845ed84d61213ea396390905e8a34 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc8
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
@@ -635,13 +635,6 @@ endif
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 
-PHONY += gcc-plugins
-gcc-plugins: scripts_basic
-ifdef CONFIG_GCC_PLUGINS
-       $(Q)$(MAKE) $(build)=scripts/gcc-plugins
-endif
-       @:
-
 include scripts/Makefile.gcc-plugins
 
 ifdef CONFIG_READABLE_ASM
index bd8056b5b246058a8c06190811f875a00f30df8d..fd6e9712af81b5bab38f871665d2a2df66d8468d 100644 (file)
@@ -336,17 +336,6 @@ config HAVE_ARCH_SECCOMP_FILTER
            results in the system call being skipped immediately.
          - seccomp syscall wired up
 
-         For best performance, an arch should use seccomp_phase1 and
-         seccomp_phase2 directly.  It should call seccomp_phase1 for all
-         syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
-         need to be called from a ptrace-safe context.  It must then
-         call seccomp_phase2 if seccomp_phase1 returns anything other
-         than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
-
-         As an additional optimization, an arch may provide seccomp_data
-         directly to seccomp_phase1; this avoids multiple calls
-         to the syscall_xyz helpers for every syscall.
-
 config SECCOMP_FILTER
        def_bool y
        depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
@@ -461,6 +450,15 @@ config CC_STACKPROTECTOR_STRONG
 
 endchoice
 
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+       bool
+       help
+         An architecture should select this if it can walk the kernel stack
+         frames to determine if an object is part of either the arguments
+         or local variables (i.e. that it excludes saved return addresses,
+         and similar) by implementing an inline arch_within_stack_frames(),
+         which is used by CONFIG_HARDENED_USERCOPY.
+
 config HAVE_CONTEXT_TRACKING
        bool
        help
index c419b43c461dc3326638870919d8f00ec95e7ae6..466e42e96bfaf29d46567e45268dc4844ac81ac9 100644 (file)
@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
        return __cu_len;
 }
 
-extern inline long
-__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
-{
-       if (__access_ok((unsigned long)validate, len, get_fs()))
-               len = __copy_tofrom_user_nocheck(to, from, len);
-       return len;
-}
-
 #define __copy_to_user(to, from, n)                                    \
 ({                                                                     \
        __chk_user_ptr(to);                                             \
@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-
 extern inline long
 copy_to_user(void __user *to, const void *from, long n)
 {
-       return __copy_tofrom_user((__force void *)to, from, n, to);
+       if (likely(__access_ok((unsigned long)to, n, get_fs())))
+               n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
+       return n;
 }
 
 extern inline long
 copy_from_user(void *to, const void __user *from, long n)
 {
-       return __copy_tofrom_user(to, (__force void *)from, n, from);
+       if (likely(__access_ok((unsigned long)from, n, get_fs())))
+               n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
+       else
+               memset(to, 0, n);
+       return n;
 }
 
 extern void __do_clear_user(void);
index ad7860c5ce153c731264f770067e653b70568fd5..51597f344a62aced8c98cb2035c9ec40eff55e6e 100644 (file)
 
 #ifdef CONFIG_ARC_CURR_IN_REG
        ; Retrieve orig r25 and save it with rest of callee_regs
-       ld.as   r12, [r12, PT_user_r25]
+       ld      r12, [r12, PT_user_r25]
        PUSH    r12
 #else
        PUSH    r25
 
        ; SP is back to start of pt_regs
 #ifdef CONFIG_ARC_CURR_IN_REG
-       st.as   r12, [sp, PT_user_r25]
+       st      r12, [sp, PT_user_r25]
 #endif
 .endm
 
index c1d36458bfb7aa665acb30a2234660034a19aecd..4c6eed80cd8ba3bd935e60b30ffadf147326aeca 100644 (file)
@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
 .endm
 
 .macro IRQ_ENABLE  scratch
+       TRACE_ASM_IRQ_ENABLE
        lr      \scratch, [status32]
        or      \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
        flag    \scratch
-       TRACE_ASM_IRQ_ENABLE
 .endm
 
 #endif /* __ASSEMBLY__ */
index 0f92d97432a21a705af12efdff99597046b80cd9..89eeb372005180802181d860be8f215768701659 100644 (file)
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 
 #define pte_page(pte)          pfn_to_page(pte_pfn(pte))
 #define mk_pte(page, prot)     pfn_pte(page_to_pfn(page), prot)
-#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
 /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
 #define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
index a78d5670884f32490d2341b2468bf606258b6654..41faf17cd28d2ced7461f4e6860d2456fb49f8ae 100644 (file)
        "2:     ;nop\n"                         \
        "       .section .fixup, \"ax\"\n"      \
        "       .align 4\n"                     \
-       "3:     mov %0, %3\n"                   \
+       "3:     # return -EFAULT\n"             \
+       "       mov %0, %3\n"                   \
+       "       # zero out dst ptr\n"           \
+       "       mov %1,  0\n"                   \
        "       j   2b\n"                       \
        "       .previous\n"                    \
        "       .section __ex_table, \"a\"\n"   \
        "2:     ;nop\n"                         \
        "       .section .fixup, \"ax\"\n"      \
        "       .align 4\n"                     \
-       "3:     mov %0, %3\n"                   \
+       "3:     # return -EFAULT\n"             \
+       "       mov %0, %3\n"                   \
+       "       # zero out dst ptr\n"           \
+       "       mov %1,  0\n"                   \
+       "       mov %R1, 0\n"                   \
        "       j   2b\n"                       \
        "       .previous\n"                    \
        "       .section __ex_table, \"a\"\n"   \
index 0f99ac8fcbb23f051753c4fb8e551a0e13541394..0037a587320d51c4262e7c3b7b504a8dafb34ddc 100644 (file)
 
 /* Machine specific ELF Hdr flags */
 #define EF_ARC_OSABI_MSK       0x00000f00
-#define EF_ARC_OSABI_ORIG      0x00000000   /* MUST be zero for back-compat */
-#define EF_ARC_OSABI_CURRENT   0x00000300   /* v3 (no legacy syscalls) */
+
+#define EF_ARC_OSABI_V3                0x00000300   /* v3 (no legacy syscalls) */
+#define EF_ARC_OSABI_V4                0x00000400   /* v4 (64bit data any reg align) */
+
+#if __GNUC__ < 6
+#define EF_ARC_OSABI_CURRENT   EF_ARC_OSABI_V3
+#else
+#define EF_ARC_OSABI_CURRENT   EF_ARC_OSABI_V4
+#endif
 
 typedef unsigned long elf_greg_t;
 typedef unsigned long elf_fpregset_t;
index 4d9e77724bedef063bb5a9e5ee019bb271fbee6a..000dd041ab42e587620710e045f7f4b31cb0e537 100644 (file)
@@ -28,6 +28,7 @@ extern void __muldf3(void);
 extern void __divdf3(void);
 extern void __floatunsidf(void);
 extern void __floatunsisf(void);
+extern void __udivdi3(void);
 
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashrdi3);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
 EXPORT_SYMBOL(__divdf3);
 EXPORT_SYMBOL(__floatunsidf);
 EXPORT_SYMBOL(__floatunsisf);
+EXPORT_SYMBOL(__udivdi3);
 
 /* ARC optimised assembler routines */
 EXPORT_SYMBOL(memset);
index b5db9e7fd649204d1e0da5d772274544b8483c2a..be1972bd2729e7a41013d521e9349cd4ac028499 100644 (file)
@@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x)
        }
 
        eflags = x->e_flags;
-       if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+       if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
                pr_err("ABI mismatch - you need newer toolchain\n");
                force_sigsegv(SIGSEGV, current);
                return 0;
index a946400a86d0dc509858ed441edc2de367650dce..f52a0d0dc462213c9070c229a680f342897dafd5 100644 (file)
@@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
                               cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
                               cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
 
-       n += scnprintf(buf + n, len - n,
-                      "OS ABI [v3]\t: no-legacy-syscalls\n");
+       n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
+                       EF_ARC_OSABI_CURRENT >> 8,
+                       EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
+                       "no-legacy-syscalls" : "64-bit data any register aligned");
 
        return buf;
 }
index 5a294b2c3cb307ac591293afae8c8a72409c97c5..0b10efe3a6a752c59d3a3d40a74759c30e380eaa 100644 (file)
@@ -921,6 +921,15 @@ void arc_cache_init(void)
 
        printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
 
+       /*
+        * Only master CPU needs to execute rest of function:
+        *  - Assume SMP so all cores will have same cache config so
+        *    any geomtry checks will be same for all
+        *  - IOC setup / dma callbacks only need to be setup once
+        */
+       if (cpu)
+               return;
+
        if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
                struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
 
index 04f83322c9fdaf09444bb55eb4d095a68190cf9f..77ff64a874a1b4649d79e579054febd4dfdbb605 100644 (file)
@@ -61,6 +61,7 @@ void *kmap(struct page *page)
 
        return kmap_high(page);
 }
+EXPORT_SYMBOL(kmap);
 
 void *kmap_atomic(struct page *page)
 {
index 2d601d769a1cdddae7bbba2bb22571731e3d4f5e..a9c4e48bb7ec997bec394066914d26f337a2fec4 100644 (file)
@@ -35,6 +35,7 @@ config ARM
        select HARDIRQS_SW_RESEND
        select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_MMAP_RND_BITS if MMU
index 56ea5c60b31883bdf52ca01c4748061c4ed39871..61f6ccc19cfa94364e777cc68d10ce5a24093c0f 100644 (file)
@@ -260,12 +260,14 @@ machdirs := $(patsubst %,arch/arm/mach-%/,$(machine-y))
 platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
 
 ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
+ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
 ifeq ($(KBUILD_SRC),)
 KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
 else
 KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
 endif
 endif
+endif
 
 export TEXT_OFFSET GZFLAGS MMUEXT
 
index c8609d8d2c55ff9f53f2e57c76a372b130500e7e..b689172632efbb48516201b7814fbbc68d925b19 100644 (file)
 
                #address-cells = <1>;
                #size-cells = <1>;
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
        };
 };
 
index ca721670bd911a7f38e7516601576067981a3fc7..528559b33d8aeba9d0e4030d742e5434f1f00cf7 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "am33xx.dtsi"
 #include "am335x-bone-common.dtsi"
+#include <dt-bindings/display/tda998x.h>
 
 / {
        model = "TI AM335x BeagleBone Black";
                        AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLDOWN | MUX_MODE3)    /* xdma_event_intr0 */
                >;
        };
+
+       mcasp0_pins: mcasp0_pins {
+               pinctrl-single,pins = <
+                       AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLUP | MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
+                       AM33XX_IOPAD(0x99c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
+                       AM33XX_IOPAD(0x994, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
+                       AM33XX_IOPAD(0x990, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
+                       AM33XX_IOPAD(0x86c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.GPIO1_27 */
+               >;
+       };
 };
 
 &lcdc {
 };
 
 &i2c0 {
-       tda19988 {
+       tda19988: tda19988 {
                compatible = "nxp,tda998x";
                reg = <0x70>;
+
                pinctrl-names = "default", "off";
                pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
                pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
 
-               port {
-                       hdmi_0: endpoint@0 {
-                               remote-endpoint = <&lcdc_0>;
+               #sound-dai-cells = <0>;
+               audio-ports = < TDA998x_I2S     0x03>;
+
+               ports {
+                       port@0 {
+                               hdmi_0: endpoint@0 {
+                                       remote-endpoint = <&lcdc_0>;
+                               };
                        };
                };
        };
 &rtc {
        system-power-controller;
 };
+
+&mcasp0        {
+       #sound-dai-cells = <0>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&mcasp0_pins>;
+       status = "okay";
+       op-mode = <0>;  /* MCASP_IIS_MODE */
+       tdm-slots = <2>;
+       serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+                       0 0 1 0
+               >;
+       tx-num-evt = <32>;
+       rx-num-evt = <32>;
+};
+
+/ {
+       clk_mcasp0_fixed: clk_mcasp0_fixed {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <24576000>;
+       };
+
+       clk_mcasp0: clk_mcasp0 {
+               #clock-cells = <0>;
+               compatible = "gpio-gate-clock";
+               clocks = <&clk_mcasp0_fixed>;
+               enable-gpios = <&gpio1 27 0>; /* BeagleBone Black Clk enable on GPIO1_27 */
+       };
+
+       sound {
+               compatible = "simple-audio-card";
+               simple-audio-card,name = "TI BeagleBone Black";
+               simple-audio-card,format = "i2s";
+               simple-audio-card,bitclock-master = <&dailink0_master>;
+               simple-audio-card,frame-master = <&dailink0_master>;
+
+               dailink0_master: simple-audio-card,cpu {
+                       sound-dai = <&mcasp0>;
+                       clocks = <&clk_mcasp0>;
+               };
+
+               simple-audio-card,codec {
+                       sound-dai = <&tda19988>;
+               };
+       };
+};
index df63484ef9b37726218d5db98d671f6eaa29647e..e7d9ca1305faef3854ba7efb4da9efc20346c7ce 100644 (file)
 
                #address-cells = <1>;
                #size-cells = <1>;
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
 
                /* MTD partition table */
                partition@0 {
index 86f773165d5c40f1102131a827e336c6c7ea2c3b..1263c9d4cba3b5644800dcdc2c172077a83cdfe2 100644 (file)
                gpmc,wr-access-ns = <30>;
                gpmc,wr-data-mux-bus-ns = <0>;
 
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
 
                #address-cells = <1>;
                #size-cells = <1>;
index db808f92dd79c975b1f059d824e263080208d20b..90d00b407f851dfce2706d8e85bb436ac39a3921 100644 (file)
                 * associativity as these may be erroneously set
                 * up by boot loader(s).
                 */
-               cache-size = <1048576>; // 1MB
-               cache-sets = <4096>;
+               cache-size = <131072>; // 128KB
+               cache-sets = <512>;
                cache-line-size = <32>;
                arm,parity-disable;
-               arm,tag-latency = <1>;
-               arm,data-latency = <1 1>;
-               arm,dirty-latency = <1>;
+               arm,tag-latency = <1 1 1>;
+               arm,data-latency = <1 1 1>;
        };
 
        scu: scu@1f000000 {
index 2e0556af6e5eef508fe29f7ec9cdc91452253455..d3e6bd80500619ab87267ae334a084c8778554ad 100644 (file)
 
                        port@0 {
                                reg = <0>;
-                               label = "lan1";
+                               label = "lan5";
                        };
 
                        port@1 {
                                reg = <1>;
-                               label = "lan2";
+                               label = "lan4";
                        };
 
                        port@2 {
 
                        port@3 {
                                reg = <3>;
-                               label = "lan4";
+                               label = "lan2";
                        };
 
                        port@4 {
                                reg = <4>;
-                               label = "lan5";
+                               label = "lan1";
                        };
 
                        port@5 {
index caf2707680c1b2bc126b823574efe0120f9baad2..e9b47b2bbc3390edc2541f251ae5167640a445d9 100644 (file)
@@ -2,6 +2,7 @@
 
 / {
        memory {
+               device_type = "memory";
                reg = <0 0x10000000>;
        };
 
index b98252232d200ab476c2e8eaf8dc07e637ffc62f..445624a1a1de2fd3d1fda6fd9811a015e26d3dd3 100644 (file)
@@ -2,7 +2,6 @@
 #include <dt-bindings/clock/bcm2835.h>
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
 
 /* This include file covers the common peripherals and configuration between
  * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
@@ -13,6 +12,8 @@
        compatible = "brcm,bcm2835";
        model = "BCM2835";
        interrupt-parent = <&intc>;
+       #address-cells = <1>;
+       #size-cells = <1>;
 
        chosen {
                bootargs = "earlyprintk console=ttyAMA0";
index d9499310a301b735c475ec6a5c3bb955165079be..f6d135245a4b9b3785fec6beda2e83564a7fa7a0 100644 (file)
        samsung,dw-mshc-ciu-div = <3>;
        samsung,dw-mshc-sdr-timing = <0 4>;
        samsung,dw-mshc-ddr-timing = <0 2>;
-       samsung,dw-mshc-hs400-timing = <0 2>;
-       samsung,read-strobe-delay = <90>;
        pinctrl-names = "default";
        pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>;
        bus-width = <8>;
        cap-mmc-highspeed;
        mmc-hs200-1_8v;
-       mmc-hs400-1_8v;
        vmmc-supply = <&ldo20_reg>;
        vqmmc-supply = <&ldo11_reg>;
 };
index b620ac884cfd0ac53808129443e13fcc45a423ab..b13b0b2db88163eec89f9b4fb4c5b241b41577c9 100644 (file)
                                        clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>,
                                                 <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>,
                                                 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>,
-                                                <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_MLB>,
+                                                <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_DUMMY>,
                                                 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>;
                                        clock-names = "core",  "rxtx0",
                                                      "rxtx1", "rxtx2",
index 96ea936eeeb0a1dce450696b38893ed3b341ed48..240a2864d044feeed2fe649729f760220c7a8684 100644 (file)
@@ -64,7 +64,7 @@
        cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
        no-1-8-v;
        keep-power-in-suspend;
-       enable-sdio-wakup;
+       wakeup-source;
        status = "okay";
 };
 
index 95ee268ed51013436e22955ddf923ef0f204a852..2f33c463cbce4f1eff456ea75666b22efb3ae5cc 100644 (file)
                ti,y-min = /bits/ 16 <0>;
                ti,y-max = /bits/ 16 <0>;
                ti,pressure-max = /bits/ 16 <0>;
-               ti,x-plat-ohms = /bits/ 16 <400>;
+               ti,x-plate-ohms = /bits/ 16 <400>;
                wakeup-source;
        };
 };
index cf06e32ee108a221c330c8ff521e0a82d8b78e4d..4b34b54e09a193ebd93ef82566d5c012274241d5 100644 (file)
@@ -42,7 +42,7 @@
        };
 
        syscon {
-               compatible = "arm,integrator-ap-syscon";
+               compatible = "arm,integrator-ap-syscon", "syscon";
                reg = <0x11000000 0x100>;
                interrupt-parent = <&pic>;
                /* These are the logical module IRQs */
index d43f15b4f79a242d2437f6ea45626556380c010c..79430fbfec3bd17ec311625e67695aabee3e0726 100644 (file)
@@ -94,7 +94,7 @@
        };
 
        syscon {
-               compatible = "arm,integrator-cp-syscon";
+               compatible = "arm,integrator-cp-syscon", "syscon";
                reg = <0xcb000000 0x100>;
        };
 
index 00cb314d5e4db81fcc035b2a68101a557cf751a4..e23f46d15c806566abc2ec88828bc8d053ffd6e5 100644 (file)
                cpu_on          = <0x84000003>;
        };
 
-       psci {
-               compatible      = "arm,psci";
-               method          = "smc";
-               cpu_suspend     = <0x84000001>;
-               cpu_off         = <0x84000002>;
-               cpu_on          = <0x84000003>;
-       };
-
        soc {
                #address-cells = <1>;
                #size-cells = <1>;
index ef84d8699a767ca1f5fbc72c88d5e49a03576c4d..5bf62897014cd2c505258203812b757931916365 100644 (file)
 
        partition@e0000 {
                label = "u-boot environment";
-               reg = <0xe0000 0x100000>;
+               reg = <0xe0000 0x20000>;
        };
 
        partition@100000 {
index e4ecab1126011f220601ed7f31e931ec6336bebb..7175511a92da20af1afe844a248e06dc49abe06a 100644 (file)
        };
 };
 
+&pciec {
+       status = "okay";
+};
+
 &pcie0 {
        status = "okay";
 };
index 365f39ff58bb8a512debeaa1645fc1feb980f75b..0ff1c2de95bfc1a172cd2c7a98e5baf8d93260d1 100644 (file)
        ranges = <0 0 0x00000000 0x1000000>;    /* CS0: 16MB for NAND */
 
        nand@0,0 {
-               linux,mtd-name = "micron,mt29f4g16abbda3w";
+               compatible = "ti,omap2-nand";
                reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+               interrupt-parent = <&gpmc>;
+               interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+                            <1 IRQ_TYPE_NONE>; /* termcount */
+               linux,mtd-name = "micron,mt29f4g16abbda3w";
                nand-bus-width = <16>;
                ti,nand-ecc-opt = "bch8";
+               rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <44>;
                gpmc,wr-access-ns = <40>;
                gpmc,wr-data-mux-bus-ns = <0>;
                gpmc,device-width = <2>;
-
-               gpmc,page-burst-access-ns = <5>;
-               gpmc,cycle2cycle-delay-ns = <50>;
-
                #address-cells = <1>;
                #size-cells = <1>;
 
index 5e9a13c0eaf7f4ca515942438cc1c8cdc331eafd..1c2c74655416868237d3d40692217b0beaf2d557 100644 (file)
@@ -46,6 +46,7 @@
                linux,mtd-name = "micron,mt29f4g16abbda3w";
                nand-bus-width = <16>;
                ti,nand-ecc-opt = "bch8";
+               rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <44>;
index de256fa8da4870bb5325e21603b24a4b3a5583ea..3e946cac55f3ead97d21726beb96d57e748eaffc 100644 (file)
 };
 
 &gpmc {
-       ranges = <0 0 0x00000000 0x20000000>;
+       ranges = <0 0 0x30000000 0x1000000>,    /* CS0 */
+                <4 0 0x2b000000 0x1000000>,    /* CS4 */
+                <5 0 0x2c000000 0x1000000>;    /* CS5 */
 
        nand@0,0 {
                compatible = "ti,omap2-nand";
index 7df27926ead215be3fdb1e0f8b02ce078ecb935b..4f4c6efbd51892abd031de7fb3a407ac2a0be1a4 100644 (file)
@@ -55,8 +55,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index 9e24b6a1d07b8e4d02b6d0704858680015f0069b..1b304e2f1bd2fd4fd5be23dafcd0f364991fd131 100644 (file)
@@ -27,8 +27,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index 334109e14613d6eb3a406f69eb56dce2f9f445c4..82e98ee3023ada85319d96c22da2ac985216b04f 100644 (file)
@@ -15,9 +15,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <4 0 0x2b000000 0x1000000>,    /* CS4 */
-                <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        smsc1: ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index c0ba86c3a2abf1f1463d6644d12f28cb6870d7cb..0d0dae3a16949f58c3a62c2826365118a1691cee 100644 (file)
                clock-names = "saradc", "apb_pclk";
                interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
                #io-channel-cells = <1>;
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index cd33f017089001469df88ff6077253cb477afdb7..91c4b3c7a8d51def2cd4ef18b3e1a5ec7628fb9f 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index 99bbcc2c9b8978b28be5a253f6be236a8896a315..e2cd683b4e4b3d19825adf4f28da4b356179e43d 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index d294e82447a292bb24e0989191494693f7ee0ac3..8b063ab10c19e9d2faff0f4e9b9a62421e091f45 100644 (file)
                        interrupt-names = "mmcirq";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_mmc0>;
-                       clock-names = "mmc";
-                       clocks = <&clk_s_c0_flexgen CLK_MMC_0>;
+                       clock-names = "mmc", "icn";
+                       clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
                        bus-width = <8>;
                        non-removable;
                };
                        interrupt-names = "mmcirq";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_sd1>;
-                       clock-names = "mmc";
-                       clocks = <&clk_s_c0_flexgen CLK_MMC_1>;
+                       clock-names = "mmc", "icn";
+                       clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
                        resets = <&softreset STIH407_MMC1_SOFTRESET>;
                        bus-width = <4>;
                };
index 18ed1ad10d32bbb251746e3011c541f9cfcbc861..40318869c733668a15e3d318ff291c3221336125 100644 (file)
@@ -41,7 +41,8 @@
                        compatible = "st,st-ohci-300x";
                        reg = <0x9a03c00 0x100>;
                        interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -57,7 +58,8 @@
                        interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usb0>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -71,7 +73,8 @@
                        compatible = "st,st-ohci-300x";
                        reg = <0x9a83c00 0x100>;
                        interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -87,7 +90,8 @@
                        interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usb1>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
                        reset-names = "power", "softreset";
index e012890e0cf2f9b998697deaacff1fca74a56769..a17ba0243db3920b2ca5f50a8bbf8e40b5222759 100644 (file)
@@ -84,7 +84,7 @@
                        trips {
                                cpu_alert0: cpu_alert0 {
                                        /* milliCelsius */
-                                       temperature = <850000>;
+                                       temperature = <85000>;
                                        hysteresis = <2000>;
                                        type = "passive";
                                };
index 1dfc492cc004735094f4e1888a868e9d9e43fbe1..1444fbd543e724e367b0091669a7642724023ed1 100644 (file)
                palmas: tps65913@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <0 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 70cf40996c3f433a12393ac14e989ff27688e8bf..966a7fc044af193295d5888af3e868ff8f3c70da 100644 (file)
                palmas: pmic@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 17dd14545862cef60bcb72105be8466e5d7ff364..a161fa1dfb6136dcd598ba8064630049a2c832b3 100644 (file)
@@ -63,7 +63,7 @@
                palmas: pmic@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 0e97b4b871f9b045fb8b01d4e9a8cd686f775fd6..6c7b06854fced378be0aa62efe1c9929f183979b 100644 (file)
@@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = {
 
 static void locomo_handler(struct irq_desc *desc)
 {
-       struct locomo *lchip = irq_desc_get_chip_data(desc);
+       struct locomo *lchip = irq_desc_get_handler_data(desc);
        int req, i;
 
        /* Acknowledge the parent IRQ */
@@ -200,8 +200,7 @@ static void locomo_setup_irq(struct locomo *lchip)
         * Install handler for IRQ_LOCOMO_HW.
         */
        irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
-       irq_set_chip_data(lchip->irq, lchip);
-       irq_set_chained_handler(lchip->irq, locomo_handler);
+       irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip);
 
        /* Install handlers for IRQ_LOCOMO_* */
        for ( ; irq <= lchip->irq_base + 3; irq++) {
index fb0a0a4dfea4da26fd635ccbad4a06de0a72ce94..2e076c492005b57b5ce978fc87b03e1c2292b41b 100644 (file)
@@ -472,8 +472,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
         * specifies that S0ReadyInt and S1ReadyInt should be '1'.
         */
        sa1111_writel(0, irqbase + SA1111_INTPOL0);
-       sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
-                     SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
+       sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
+                     BIT(IRQ_S1_READY_NINT & 31),
                      irqbase + SA1111_INTPOL1);
 
        /* clear all IRQs */
@@ -754,7 +754,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
        if (sachip->irq != NO_IRQ) {
                ret = sa1111_setup_irq(sachip, pd->irq_base);
                if (ret)
-                       goto err_unmap;
+                       goto err_clk;
        }
 
 #ifdef CONFIG_ARCH_SA1100
@@ -799,6 +799,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
 
        return 0;
 
+ err_clk:
+       clk_disable(sachip->clk);
  err_unmap:
        iounmap(sachip->base);
  err_clk_unprep:
@@ -869,9 +871,9 @@ struct sa1111_save_data {
 
 #ifdef CONFIG_PM
 
-static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
+static int sa1111_suspend_noirq(struct device *dev)
 {
-       struct sa1111 *sachip = platform_get_drvdata(dev);
+       struct sa1111 *sachip = dev_get_drvdata(dev);
        struct sa1111_save_data *save;
        unsigned long flags;
        unsigned int val;
@@ -934,9 +936,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
  *     restored by their respective drivers, and must be called
  *     via LDM after this function.
  */
-static int sa1111_resume(struct platform_device *dev)
+static int sa1111_resume_noirq(struct device *dev)
 {
-       struct sa1111 *sachip = platform_get_drvdata(dev);
+       struct sa1111 *sachip = dev_get_drvdata(dev);
        struct sa1111_save_data *save;
        unsigned long flags, id;
        void __iomem *base;
@@ -952,7 +954,7 @@ static int sa1111_resume(struct platform_device *dev)
        id = sa1111_readl(sachip->base + SA1111_SKID);
        if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
                __sa1111_remove(sachip);
-               platform_set_drvdata(dev, NULL);
+               dev_set_drvdata(dev, NULL);
                kfree(save);
                return 0;
        }
@@ -1003,8 +1005,8 @@ static int sa1111_resume(struct platform_device *dev)
 }
 
 #else
-#define sa1111_suspend NULL
-#define sa1111_resume  NULL
+#define sa1111_suspend_noirq NULL
+#define sa1111_resume_noirq  NULL
 #endif
 
 static int sa1111_probe(struct platform_device *pdev)
@@ -1017,7 +1019,7 @@ static int sa1111_probe(struct platform_device *pdev)
                return -EINVAL;
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
-               return -ENXIO;
+               return irq;
 
        return __sa1111_probe(&pdev->dev, mem, irq);
 }
@@ -1038,6 +1040,11 @@ static int sa1111_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct dev_pm_ops sa1111_pm_ops = {
+       .suspend_noirq = sa1111_suspend_noirq,
+       .resume_noirq = sa1111_resume_noirq,
+};
+
 /*
  *     Not sure if this should be on the system bus or not yet.
  *     We really want some way to register a system device at
@@ -1050,10 +1057,9 @@ static int sa1111_remove(struct platform_device *pdev)
 static struct platform_driver sa1111_device_driver = {
        .probe          = sa1111_probe,
        .remove         = sa1111_remove,
-       .suspend        = sa1111_suspend,
-       .resume         = sa1111_resume,
        .driver         = {
                .name   = "sa1111",
+               .pm     = &sa1111_pm_ops,
        },
 };
 
index b6e54ee9bdbd8e54a4c740d0d2308dd26f682b42..ca39c04fec6b7af28847b78b6b3ff36a75811b31 100644 (file)
@@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_FIRMWARE_MEMMAP=y
 CONFIG_FANOTIFY=y
-CONFIG_PRINTK_TIME=1
+CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_PAGE_POISONING=y
index 89260516735720460b4002e462ae5d3e5811125b..4f366b0370e939a27056f0230b49140ddee85503 100644 (file)
@@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_FIRMWARE_MEMMAP=y
 CONFIG_FANOTIFY=y
-CONFIG_PRINTK_TIME=1
+CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_PAGE_POISONING=y
index 71b42e66488a13746692d0706504c4d737019d92..78cd2f197e015bf83e7ed4db934f957dabe137ea 100644 (file)
@@ -161,6 +161,7 @@ CONFIG_USB_MON=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
+CONFIG_NOP_USB_XCEIV=y
 CONFIG_KEYSTONE_USB_PHY=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
index 2c8665cd9dc5981e723cfed6136065c0dfa71bca..ea3566fb92e22e22358e61fb33184f1dde120cc4 100644 (file)
@@ -781,7 +781,7 @@ CONFIG_MXS_DMA=y
 CONFIG_DMA_BCM2835=y
 CONFIG_DMA_OMAP=y
 CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_VDMA=y
+CONFIG_XILINX_DMA=y
 CONFIG_DMA_SUN6I=y
 CONFIG_STAGING=y
 CONFIG_SENSORS_ISL29018=y
index da3c0428507be43479f48e80e49b2b80b63fe51c..aef022a87c5379bc9627b0fa8fe144b266e17e9c 100644 (file)
@@ -284,7 +284,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                err = blkcipher_walk_done(desc, &walk,
                                          walk.nbytes % AES_BLOCK_SIZE);
        }
-       if (nbytes) {
+       if (walk.nbytes % AES_BLOCK_SIZE) {
                u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
                u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
                u8 __aligned(8) tail[AES_BLOCK_SIZE];
index d0131ee6f6af920d5c3bc6479c2e515e1d706539..3f82e9da7cec03604059bcdcddb8217635138065 100644 (file)
@@ -47,6 +47,7 @@
 #define PMD_SECT_WB            (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
 #define PMD_SECT_MINICACHE     (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
 #define PMD_SECT_WBWA          (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_CACHE_MASK    (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
 #define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
 
 /*
index f8f1cff62065b7548bb54ce9bc653c684ddf5eb4..4cd664abfcd327ed530b3876e0ee774c2f06b0ad 100644 (file)
@@ -62,6 +62,7 @@
 #define PMD_SECT_WT            (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
 #define PMD_SECT_WB            (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
 #define PMD_SECT_WBWA          (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
+#define PMD_SECT_CACHE_MASK    (_AT(pmdval_t, 7) << 2)
 
 /*
  * + Level 3 descriptor (PTE)
index 62a6f65029e61aebf9b64e1df59fb9383f4064a9..a93c0f99acf7767c680158cf96acef87d1f0da51 100644 (file)
@@ -480,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 static inline unsigned long __must_check
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       unsigned int __ua_flags = uaccess_save_and_enable();
+       unsigned int __ua_flags;
+
+       check_object_size(to, n, false);
+       __ua_flags = uaccess_save_and_enable();
        n = arm_copy_from_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
@@ -495,11 +498,15 @@ static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
-       unsigned int __ua_flags = uaccess_save_and_enable();
+       unsigned int __ua_flags;
+
+       check_object_size(from, n, true);
+       __ua_flags = uaccess_save_and_enable();
        n = arm_copy_to_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
 #else
+       check_object_size(from, n, true);
        return arm_copy_to_user(to, from, n);
 #endif
 }
index bc5f50799d75627fb3eb22b9afe6d849373e77d4..9f157e7c51e75cc468de327bd9c5403353885849 100644 (file)
@@ -295,6 +295,7 @@ __und_svc_fault:
        bl      __und_fault
 
 __und_svc_finish:
+       get_thread_info tsk
        ldr     r5, [sp, #S_PSR]                @ Get SVC cpsr
        svc_exit r5                             @ return from exception
  UNWIND(.fnend         )
index 0b1e4a93d67edfe15899220087a4bdc53966f90c..15d073ae5da2a2123980b2fc67c3986161cd0839 100644 (file)
@@ -142,6 +142,19 @@ ARM_BE8(orr        r7, r7, #(1 << 25))     @ HSCTLR.EE
        and     r7, #0x1f               @ Preserve HPMN
        mcr     p15, 4, r7, c1, c1, 1   @ HDCR
 
+       @ Make sure NS-SVC is initialised appropriately
+       mrc     p15, 0, r7, c1, c0, 0   @ SCTLR
+       orr     r7, #(1 << 5)           @ CP15 barriers enabled
+       bic     r7, #(3 << 7)           @ Clear SED/ITD for v8 (RES0 for v7)
+       bic     r7, #(3 << 19)          @ WXN and UWXN disabled
+       mcr     p15, 0, r7, c1, c0, 0   @ SCTLR
+
+       mrc     p15, 0, r7, c0, c0, 0   @ MIDR
+       mcr     p15, 4, r7, c0, c0, 0   @ VPIDR
+
+       mrc     p15, 0, r7, c0, c0, 5   @ MPIDR
+       mcr     p15, 4, r7, c0, c0, 5   @ VMPIDR
+
 #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
        @ make CNTP_* and CNTPCT accessible from PL1
        mrc     p15, 0, r7, c0, c1, 1   @ ID_PFR1
index 087acb569b63a4bd90982e0c9b15fc2313636c53..5f221acd21aebb3ca1c2ee560fb68241bc1e02c9 100644 (file)
@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
        mm_segment_t fs;
        long ret, err, i;
 
-       if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
+       if (maxevents <= 0 ||
+                       maxevents > (INT_MAX/sizeof(*kbuf)) ||
+                       maxevents > (INT_MAX/sizeof(*events)))
                return -EINVAL;
+       if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
+               return -EFAULT;
        kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
        if (!kbuf)
                return -ENOMEM;
@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
 
        if (nsops < 1 || nsops > SEMOPM)
                return -EINVAL;
+       if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
+               return -EFAULT;
        sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
        if (!sops)
                return -ENOMEM;
index d94bb9093ead7d10641aecfaed668d0c3ee3fa39..c94b90d437724f43c60e8748f7fe0f028e3672d3 100644 (file)
@@ -158,8 +158,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        int i;
 
-       kvm_free_stage2_pgd(kvm);
-
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                if (kvm->vcpus[i]) {
                        kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -1009,9 +1007,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
 
        switch (ioctl) {
        case KVM_CREATE_IRQCHIP: {
+               int ret;
                if (!vgic_present)
                        return -ENXIO;
-               return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+               mutex_lock(&kvm->lock);
+               ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+               mutex_unlock(&kvm->lock);
+               return ret;
        }
        case KVM_ARM_SET_DEVICE_ADDR: {
                struct kvm_arm_device_addr dev_addr;
index bda27b6b1aa2b5dc5d2e20e18f24ae41d3958ed4..e9a5c0e0c11543fa6918ed618dff6840ff03055e 100644 (file)
@@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        smp_rmb();
 
        pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
-       if (is_error_pfn(pfn))
+       if (is_error_noslot_pfn(pfn))
                return -EFAULT;
 
        if (kvm_is_device_pfn(pfn)) {
@@ -1714,7 +1714,8 @@ int kvm_mmu_init(void)
                 kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
 
        if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
-           hyp_idmap_start <  kern_hyp_va(~0UL)) {
+           hyp_idmap_start <  kern_hyp_va(~0UL) &&
+           hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
                /*
                 * The idmap page is intersecting with the VA space,
                 * it is not safe to continue further.
@@ -1893,6 +1894,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
 
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
 {
+       kvm_free_stage2_pgd(kvm);
 }
 
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
index dc7c6edeab39a89d24f8211d4d11ace5494bafee..61284b9389cf5e41de92215b3fa45ee9c2bc79df 100644 (file)
@@ -1,13 +1,13 @@
 menuconfig ARCH_CLPS711X
        bool "Cirrus Logic EP721x/EP731x-based"
        depends on ARCH_MULTI_V4T
-       select ARCH_REQUIRE_GPIOLIB
        select AUTO_ZRELADDR
        select CLKSRC_OF
        select CLPS711X_TIMER
        select COMMON_CLK
        select CPU_ARM720T
        select GENERIC_CLOCKEVENTS
+       select GPIOLIB
        select MFD_SYSCON
        select OF_IRQ
        select USE_OF
index 3750575c73c5f040247ca3d096552e718e2d613c..06332f6265652e16dbd5503dcce3f45425e4fefb 100644 (file)
@@ -255,6 +255,12 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
                return -ENOMEM;
        }
 
+       /*
+        * Clear the OF_POPULATED flag set in of_irq_init so that
+        * later the Exynos PMU platform device won't be skipped.
+        */
+       of_node_clear_flag(node, OF_POPULATED);
+
        return 0;
 }
 
index fd87205324710ffcfca1871c01ea23a7da8856b4..0df062d8b2c942f84a31a923e0a4f221c6c9366d 100644 (file)
@@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node,
        for (i = 0; i < IMR_NUM; i++)
                writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
 
+       /*
+        * Clear the OF_POPULATED flag set in of_irq_init so that
+        * later the GPC power domain driver will not be skipped.
+        */
+       of_node_clear_flag(node, OF_POPULATED);
+
        return 0;
 }
 IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
index 5d9bfab279dd8cc3bba37c0bbfe2f2d5904f3850..6bb7d9cf1e389e55b517cdd7eff6b8ed99b9944c 100644 (file)
@@ -64,6 +64,7 @@ static void __init imx6ul_init_machine(void)
        if (parent == NULL)
                pr_warn("failed to initialize soc device\n");
 
+       of_platform_default_populate(NULL, NULL, parent);
        imx6ul_enet_init();
        imx_anatop_init();
        imx6ul_pm_init();
index 58924b3844df551a270aac552b0803fb5eedf8dc..fe708e26d021d30293d0e5a4a8b9cce3a67ae976 100644 (file)
@@ -295,7 +295,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
                val &= ~BM_CLPCR_SBYOS;
                if (cpu_is_imx6sl())
                        val |= BM_CLPCR_BYPASS_PMIC_READY;
-               if (cpu_is_imx6sl() || cpu_is_imx6sx())
+               if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
                        val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
                else
                        val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
@@ -310,7 +310,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
                val |= 0x3 << BP_CLPCR_STBY_COUNT;
                val |= BM_CLPCR_VSTBY;
                val |= BM_CLPCR_SBYOS;
-               if (cpu_is_imx6sl())
+               if (cpu_is_imx6sl() || cpu_is_imx6sx())
                        val |= BM_CLPCR_BYPASS_PMIC_READY;
                if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
                        val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
index e53c6cfcab51cd12c798fd11d663686c77761b02..6c6497e80a7b13433d833923d8c5003c52039d9a 100644 (file)
@@ -1,5 +1,4 @@
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
-       -I$(srctree)/arch/arm/plat-orion/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include
 
 AFLAGS_coherency_ll.o          := -Wa,-march=armv7-a
 CFLAGS_pmsu.o                  := -march=armv7-a
index c073fb57dd13102dfc239c67cd6860fcc281ed2c..6f2d0aec05139fc1f09b5a57a973423c7cf90882 100644 (file)
@@ -220,9 +220,6 @@ static int am33xx_cm_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout(_is_module_ready(inst, clkctrl_offs),
                          MAX_MODULE_READY_TIME, i);
 
@@ -246,9 +243,6 @@ static int am33xx_cm_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) ==
                                CLKCTRL_IDLEST_DISABLED),
                                MAX_MODULE_READY_TIME, i);
index 2c0e07ed6b995b77b2c3a01aaaba457173251335..2ab27ade136a2dbca7e225102cb9700d35674504 100644 (file)
@@ -278,9 +278,6 @@ static int omap4_cminst_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs),
                          MAX_MODULE_READY_TIME, i);
 
@@ -304,9 +301,6 @@ static int omap4_cminst_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) ==
                           CLKCTRL_IDLEST_DISABLED),
                          MAX_MODULE_DISABLE_TIME, i);
index 5b709383381c8ae0bdafe014253537804043d97d..1052b29697b8946a5407e919369e2aaf5efba409 100644 (file)
@@ -1053,6 +1053,10 @@ static int _omap4_wait_target_disable(struct omap_hwmod *oh)
        if (oh->flags & HWMOD_NO_IDLEST)
                return 0;
 
+       if (!oh->prcm.omap4.clkctrl_offs &&
+           !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
+               return 0;
+
        return omap_cm_wait_module_idle(oh->clkdm->prcm_partition,
                                        oh->clkdm->cm_inst,
                                        oh->prcm.omap4.clkctrl_offs, 0);
@@ -2971,6 +2975,10 @@ static int _omap4_wait_target_ready(struct omap_hwmod *oh)
        if (!_find_mpu_rt_port(oh))
                return 0;
 
+       if (!oh->prcm.omap4.clkctrl_offs &&
+           !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
+               return 0;
+
        /* XXX check module SIDLEMODE, hardreset status */
 
        return omap_cm_wait_module_ready(oh->clkdm->prcm_partition,
index 4041bad79a9abbe9bc4ca74abd29027c01bf5d57..78904017f18ce03669fdf396cbedf31d735b8f19 100644 (file)
@@ -443,8 +443,12 @@ struct omap_hwmod_omap2_prcm {
  * HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT: Some IP blocks don't have a PRCM
  *     module-level context loss register associated with them; this
  *     flag bit should be set in those cases
+ * HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET: Some IP blocks have a valid CLKCTRL
+ *     offset of zero; this flag bit should be set in those cases to
+ *     distinguish from hwmods that have no clkctrl offset.
  */
 #define HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT                (1 << 0)
+#define HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET                (1 << 1)
 
 /**
  * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
index 55c5878577f44c36da9284beae1b367e69094799..e2d84aa7f595ff3fa8672a3c6f87412a13b3e095 100644 (file)
@@ -29,6 +29,7 @@
 #define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl))
 #define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl))
 #define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst))
+#define PRCM_FLAGS(oh, flag) ((oh).prcm.omap4.flags = (flag))
 
 /*
  * 'l3' class
@@ -1296,6 +1297,7 @@ static void omap_hwmod_am33xx_clkctrl(void)
        CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET);
+       PRCM_FLAGS(am33xx_rtc_hwmod, HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET);
index d72ee6185d5e0b377f14adb7ac5a780a5287cbea..1cc4a6f3954e1936c30005a9b61bcda65bbccd69 100644 (file)
@@ -722,8 +722,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
  * display serial interface controller
  */
 
+static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
 static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
        .name = "dsi",
+       .sysc   = &omap3xxx_dsi_sysc,
 };
 
 static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
index 567496bd250a2fc5c8037ed203dc71d203521be0..29100beb2e7f201403ce9bc84c78dab1aea818eb 100644 (file)
@@ -11,11 +11,13 @@ if ARCH_OXNAS
 
 config MACH_OX810SE
        bool "Support OX810SE Based Products"
+       select ARCH_HAS_RESET_CONTROLLER
        select COMMON_CLK_OXNAS
        select CPU_ARM926T
        select MFD_SYSCON
        select OXNAS_RPS_TIMER
        select PINCTRL_OXNAS
+       select RESET_CONTROLLER
        select RESET_OXNAS
        select VERSATILE_FPGA_IRQ
        help
index dc109dc3a622834bcca135322672e754cc1db488..10bfdb169366b0a7b4e6403cd3fd5242a940f3e8 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>      /* symbol_get ; symbol_put */
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/major.h>
index c410d84b243dbe59ed35b147416b473dfac9743e..66070acaa888cc5778f5651592691c34a0adfca5 100644 (file)
@@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
 };
 
 static struct smc91x_platdata smc91x_platdata = {
-       .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                SMC91X_USE_DMA | SMC91X_NOWAIT,
 };
 
 static struct platform_device smc91x_device = {
index 7245f3359564f27b58722d5c14e635929f627a36..d6159f8ef0c245405d5b0c00d32497562f280a05 100644 (file)
@@ -137,6 +137,18 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
        // no D+ pullup; lubbock can't connect/disconnect in software
 };
 
+static void lubbock_init_pcmcia(void)
+{
+       struct clk *clk;
+
+       /* Add an alias for the SA1111 PCMCIA clock */
+       clk = clk_get_sys("pxa2xx-pcmcia", NULL);
+       if (!IS_ERR(clk)) {
+               clkdev_create(clk, NULL, "1800");
+               clk_put(clk);
+       }
+}
+
 static struct resource sa1111_resources[] = {
        [0] = {
                .start  = 0x10000000,
@@ -467,6 +479,8 @@ static void __init lubbock_init(void)
        pxa_set_btuart_info(NULL);
        pxa_set_stuart_info(NULL);
 
+       lubbock_init_pcmcia();
+
        clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
        pxa_set_udc_info(&udc_info);
        pxa_set_fb_info(NULL, &sharp_lm8v31);
index 1080580b1343d1bdf164230bff82d6fed2e8b0d5..2c150bfc0cd5128dcc252e63ca485076e9469ba9 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>      /* symbol_get ; symbol_put */
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/gpio_keys.h>
index 3f06cd90567a7136abe266cf579bc8d4899eb86b..056369ef250e8a4447b592efbd0775b0b7b3710f 100644 (file)
@@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
 };
 
 static struct smc91x_platdata xcep_smc91x_info = {
-       .flags  = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
+       .flags  = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                 SMC91X_NOWAIT | SMC91X_USE_DMA,
 };
 
 static struct platform_device smc91x_device = {
index dae8d86ef4ccc75c4bb3dfbb98aba9e7e12eec9f..4048821309566281d61e5dd478f2db7c921b9491 100644 (file)
@@ -1,8 +1,7 @@
 #
 # Makefile for the linux kernel.
 #
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
-       -I$(srctree)/arch/arm/plat-versatile/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include
 
 obj-y                                  := core.o
 obj-$(CONFIG_REALVIEW_DT)              += realview-dt.o
index baf174542e36fdf096e11deca2032bab1015ef99..a0ead0ae23d64387e0dee144f81537be00475ac9 100644 (file)
@@ -93,7 +93,8 @@ static struct smsc911x_platform_config smsc911x_config = {
 };
 
 static struct smc91x_platdata smc91x_platdata = {
-       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                SMC91X_NOWAIT,
 };
 
 static struct platform_device realview_eth_device = {
index 72b9e96715070f2c47b13b7f7aade101abb9586e..fa7fb716e388a7ef4f4da1b89f58090e60805245 100644 (file)
@@ -5,7 +5,7 @@
 #
 # Licensed under GPLv2
 
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/arch/arm/plat-samsung/include
 
 # Core
 
index cbf53bb9c814dc328bb209ad3ee580da7f2f6a6f..0db46895c82a4729d40b6c94c62482cd00ae1c9b 100644 (file)
@@ -125,6 +125,8 @@ static unsigned long clk_36864_get_rate(struct clk *clk)
 }
 
 static struct clkops clk_36864_ops = {
+       .enable         = clk_cpu_enable,
+       .disable        = clk_cpu_disable,
        .get_rate       = clk_36864_get_rate,
 };
 
@@ -140,9 +142,8 @@ static struct clk_lookup sa11xx_clkregs[] = {
        CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864),
 };
 
-static int __init sa11xx_clk_init(void)
+int __init sa11xx_clk_init(void)
 {
        clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs));
        return 0;
 }
-core_initcall(sa11xx_clk_init);
index 345e63f4eb71f53d0038adc005c8a0f052714b2b..3e09beddb6e8f4e84e0863691c7d33c91bae404a 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <mach/hardware.h>
 #include <mach/irqs.h>
+#include <mach/reset.h>
 
 #include "generic.h"
 #include <clocksource/pxa.h>
@@ -95,6 +96,8 @@ static void sa1100_power_off(void)
 
 void sa11x0_restart(enum reboot_mode mode, const char *cmd)
 {
+       clear_reset_status(RESET_STATUS_ALL);
+
        if (mode == REBOOT_SOFT) {
                /* Jump into ROM at address 0 */
                soft_restart(0);
@@ -388,6 +391,7 @@ void __init sa1100_init_irq(void)
        sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start);
 
        sa1100_init_gpio();
+       sa11xx_clk_init();
 }
 
 /*
index 0d92e119b36b13d28c2ab8036c21f23963a54ff8..68199b603ff717f7b5dfb514abd62c3b21704d1c 100644 (file)
@@ -44,3 +44,5 @@ int sa11x0_pm_init(void);
 #else
 static inline int sa11x0_pm_init(void) { return 0; }
 #endif
+
+int sa11xx_clk_init(void);
index 1525d7b5f1b74b6d06ac1276a56a45c23781e060..88149f85bc49dfa7a62c6b395bdf7a3e3f770e7b 100644 (file)
@@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
 };
 
 static struct smc91x_platdata smc91x_platdata = {
-       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
 };
 
 static struct platform_device smc91x_device = {
index f3dba6f356e29446c0960af2d37e51d2eacc8302..02e21bceb0856bc5ac5c769af3362afab2927c99 100644 (file)
@@ -40,5 +40,8 @@ bool shmobile_smp_cpu_can_disable(unsigned int cpu)
 bool __init shmobile_smp_init_fallback_ops(void)
 {
        /* fallback on PSCI/smp_ops if no other DT based method is detected */
+       if (!IS_ENABLED(CONFIG_SMP))
+               return false;
+
        return platform_can_secondary_boot() ? true : false;
 }
index 62437b57813eab3c1cff4fc275386b37378dc9d8..73e3adbc133096eca9cdf652d37ff110f5e35850 100644 (file)
 
 #define REGULATOR_IRQ_MASK     BIT(2)  /* IRQ2, active low */
 
-static void __iomem *irqc;
-
-static const u8 da9063_mask_regs[] = {
-       DA9063_REG_IRQ_MASK_A,
-       DA9063_REG_IRQ_MASK_B,
-       DA9063_REG_IRQ_MASK_C,
-       DA9063_REG_IRQ_MASK_D,
-};
-
-/* DA9210 System Control and Event Registers */
+/* start of DA9210 System Control and Event Registers */
 #define DA9210_REG_MASK_A              0x54
-#define DA9210_REG_MASK_B              0x55
-
-static const u8 da9210_mask_regs[] = {
-       DA9210_REG_MASK_A,
-       DA9210_REG_MASK_B,
-};
-
-static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
-                            unsigned int nregs)
-{
-       unsigned int i;
 
-       dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
+static void __iomem *irqc;
 
-       for (i = 0; i < nregs; i++) {
-               int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
-               if (error) {
-                       dev_err(&client->dev, "i2c error %d\n", error);
-                       return;
-               }
-       }
-}
+/* first byte sets the memory pointer, following are consecutive reg values */
+static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff };
+static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff };
+
+static struct i2c_msg da9xxx_msgs[2] = {
+       {
+               .addr = 0x58,
+               .len = ARRAY_SIZE(da9063_irq_clr),
+               .buf = da9063_irq_clr,
+       }, {
+               .addr = 0x68,
+               .len = ARRAY_SIZE(da9210_irq_clr),
+               .buf = da9210_irq_clr,
+       },
+};
 
 static int regulator_quirk_notify(struct notifier_block *nb,
                                  unsigned long action, void *data)
@@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb,
        client = to_i2c_client(dev);
        dev_dbg(dev, "Detected %s\n", client->name);
 
-       if ((client->addr == 0x58 && !strcmp(client->name, "da9063")))
-               da9xxx_mask_irqs(client, da9063_mask_regs,
-                                ARRAY_SIZE(da9063_mask_regs));
-       else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
-               da9xxx_mask_irqs(client, da9210_mask_regs,
-                                ARRAY_SIZE(da9210_mask_regs));
+       if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) ||
+           (client->addr == 0x68 && !strcmp(client->name, "da9210"))) {
+               int ret;
+
+               dev_info(&client->dev, "clearing da9063/da9210 interrupts\n");
+               ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs));
+               if (ret != ARRAY_SIZE(da9xxx_msgs))
+                       dev_err(&client->dev, "i2c error %d\n", ret);
+       }
 
        mon = ioread32(irqc + IRQC_MONITOR);
        if (mon & REGULATOR_IRQ_MASK)
index 62f4d01941f718bf02a6e24c19042d6c65b961d9..30fe03f95c85b9b4b3700337f402290d10e320e4 100644 (file)
@@ -137,7 +137,7 @@ void __init init_default_cache_policy(unsigned long pmd)
 
        initial_pmd_value = pmd;
 
-       pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
+       pmd &= PMD_SECT_CACHE_MASK;
 
        for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
                if (cache_policies[i].pmd == pmd) {
@@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz)
 {
        void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
 
-       BUG_ON(!ptr);
+       if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+               BUG();
        return ptr;
 }
 
@@ -1155,10 +1156,19 @@ void __init sanity_check_meminfo(void)
 {
        phys_addr_t memblock_limit = 0;
        int highmem = 0;
-       phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
+       u64 vmalloc_limit;
        struct memblock_region *reg;
        bool should_use_highmem = false;
 
+       /*
+        * Let's use our own (unoptimized) equivalent of __pa() that is
+        * not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
+        * The result is used as the upper bound on physical memory address
+        * and may itself be outside the valid range for which phys_addr_t
+        * and therefore __pa() is defined.
+        */
+       vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+
        for_each_memblock(memory, reg) {
                phys_addr_t block_start = reg->base;
                phys_addr_t block_end = reg->base + reg->size;
@@ -1183,10 +1193,11 @@ void __init sanity_check_meminfo(void)
                        if (reg->size > size_limit) {
                                phys_addr_t overlap_size = reg->size - size_limit;
 
-                               pr_notice("Truncating RAM at %pa-%pa to -%pa",
-                                         &block_start, &block_end, &vmalloc_limit);
-                               memblock_remove(vmalloc_limit, overlap_size);
+                               pr_notice("Truncating RAM at %pa-%pa",
+                                         &block_start, &block_end);
                                block_end = vmalloc_limit;
+                               pr_cont(" to -%pa", &block_end);
+                               memblock_remove(vmalloc_limit, overlap_size);
                                should_use_highmem = true;
                        }
                }
index a7123b4e129dfc40f9c0d0df4417d086e23ee9a0..d00d52c9de3e0228c34aeebb0ec86d4b280220f9 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/hwcap.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
+#include <asm/memory.h>
 
 #include "proc-macros.S"
 
index b0b82f5ea33825943fb75066cf1af0d52f17b3c2..f193414d0f6ff78a2ac91b48160390935fa68546 100644 (file)
@@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
 static struct vcpu_info __percpu *xen_vcpu_info;
 
 /* Linux <-> Xen vCPU id mapping */
-DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
 
 /* These are unused until we support booting "pre-ballooned" */
@@ -170,9 +170,6 @@ static int xen_starting_cpu(unsigned int cpu)
        pr_info("Xen: initializing cpu%d\n", cpu);
        vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
 
-       /* Direct vCPU id mapping for ARM guests. */
-       per_cpu(xen_vcpu_id, cpu) = cpu;
-
        info.mfn = virt_to_gfn(vcpup);
        info.offset = xen_offset_in_page(vcpup);
 
@@ -330,6 +327,7 @@ static int __init xen_guest_init(void)
 {
        struct xen_add_to_physmap xatp;
        struct shared_info *shared_info_page = NULL;
+       int cpu;
 
        if (!xen_domain())
                return 0;
@@ -380,7 +378,8 @@ static int __init xen_guest_init(void)
                return -ENOMEM;
 
        /* Direct vCPU id mapping for ARM guests. */
-       per_cpu(xen_vcpu_id, 0) = 0;
+       for_each_possible_cpu(cpu)
+               per_cpu(xen_vcpu_id, cpu) = cpu;
 
        xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
        if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
index 69c8787bec7d3f3e343b592b01997667f3f8c53d..bc3f00f586f1111fa41c5e36d6530569f0a5b3a8 100644 (file)
@@ -54,6 +54,7 @@ config ARM64
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_HUGE_VMAP
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
index bb2616b161576b4a664536ccae5c761544e93167..be5d824ebdba2dab24840bb7808abcc40da2053e 100644 (file)
@@ -8,7 +8,7 @@ config ARCH_SUNXI
 
 config ARCH_ALPINE
        bool "Annapurna Labs Alpine platform"
-       select ALPINE_MSI
+       select ALPINE_MSI if PCI
        help
          This enables support for the Annapurna Labs Alpine
          Soc family.
@@ -66,7 +66,7 @@ config ARCH_LG1K
 config ARCH_HISI
        bool "Hisilicon SoC Family"
        select ARM_TIMER_SP804
-       select HISILICON_IRQ_MBIGEN
+       select HISILICON_IRQ_MBIGEN if PCI
        help
          This enables support for Hisilicon ARMv8 SoC family
 
index 445aa678f9141e2398e2cd3dbfe82da0258bb5be..c2b9bcb0ef61ac42189d8fcadc423c30878f3842 100644 (file)
                /* Local timer */
                timer {
                        compatible = "arm,armv8-timer";
-                       interrupts = <1 13 0xf01>,
-                                    <1 14 0xf01>,
-                                    <1 11 0xf01>,
-                                    <1 10 0xf01>;
+                       interrupts = <1 13 0xf08>,
+                                    <1 14 0xf08>,
+                                    <1 11 0xf08>,
+                                    <1 10 0xf08>;
                };
 
                timer0: timer0@ffc03000 {
index e502c24b0ac79cca089c7f22e19c83e3ea9bf2d4..bf6c8d0510028b472efc4e7a34033e8ca0d8f7c8 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupts = <GIC_PPI 13
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 14
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 11
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 10
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>;
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        xtal: xtal-clk {
index f1c2c713f9b0896b2dc777a960f84fe741d43ff8..c29dab9d18345100e17cd8f8db97a4db9e4427a2 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 0 0xff01>,      /* Secure Phys IRQ */
-                            <1 13 0xff01>,     /* Non-secure Phys IRQ */
-                            <1 14 0xff01>,     /* Virt IRQ */
-                            <1 15 0xff01>;     /* Hyp IRQ */
+               interrupts = <1 0 0xff08>,      /* Secure Phys IRQ */
+                            <1 13 0xff08>,     /* Non-secure Phys IRQ */
+                            <1 14 0xff08>,     /* Virt IRQ */
+                            <1 15 0xff08>;     /* Hyp IRQ */
                clock-frequency = <50000000>;
        };
 
diff --git a/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi b/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
new file mode 120000 (symlink)
index 0000000..3937b77
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm2835-rpi.dtsi
\ No newline at end of file
index 6f47dd2bb1db4342b45165c5e88b01edc1deb556..7841b724e340e1441867254c9a89c53c014eb031 100644 (file)
@@ -1,7 +1,7 @@
 /dts-v1/;
 #include "bcm2837.dtsi"
-#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi"
-#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi"
+#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-smsc9514.dtsi"
 
 / {
        compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
index f2a31d06845d84a33bbd575ca6bf634e94a4f773..8216bbb29fe0758ff315098fb1319d97fea91469 100644 (file)
@@ -1,4 +1,4 @@
-#include "../../../../arm/boot/dts/bcm283x.dtsi"
+#include "bcm283x.dtsi"
 
 / {
        compatible = "brcm,bcm2836";
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
new file mode 120000 (symlink)
index 0000000..dca7c05
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x.dtsi
new file mode 120000 (symlink)
index 0000000..5f54e4c
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x.dtsi
\ No newline at end of file
index f53b0955bfd31c44ab05dc328bef1e4bd3dadcf4..d4a12fad8afdeecbb522557aedf1f8013b55748d 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>;
+                             IRQ_TYPE_LEVEL_LOW)>;
        };
 
        pmu {
index 2eb9b225f0bcc193c27520a332a7e68ff55410dc..04dc8a8d15399ea3eb9e63f8215fcbc8840c56f3 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0xff01>,
-                            <1 14 0xff01>,
-                            <1 11 0xff01>,
-                            <1 10 0xff01>;
+               interrupts = <1 13 4>,
+                            <1 14 4>,
+                            <1 11 4>,
+                            <1 10 4>;
        };
 
        pmu {
index 299f3ce969ab8517a602ff7addda417ecd5aa5f3..c528dd52ba2d39b30547ab964eda219b1068a043 100644 (file)
@@ -12,6 +12,7 @@
 /dts-v1/;
 #include "exynos7.dtsi"
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/samsung,s2mps11.h>
 
 / {
        model = "Samsung Exynos7 Espresso board based on EXYNOS7";
@@ -43,6 +44,8 @@
 
 &rtc {
        status = "okay";
+       clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>;
+       clock-names = "rtc", "rtc_src";
 };
 
 &watchdog {
index ca663dfe51891f11e4cd2964048c3590a6e330fe..162831546e183d192459fee3ae40df1b6bf2929d 100644 (file)
 
                timer {
                        compatible = "arm,armv8-timer";
-                       interrupts = <1 13 0xff01>,
-                                    <1 14 0xff01>,
-                                    <1 11 0xff01>,
-                                    <1 10 0xff01>;
+                       interrupts = <1 13 0xff08>,
+                                    <1 14 0xff08>,
+                                    <1 11 0xff08>,
+                                    <1 10 0xff08>;
                };
 
                pmu_system_controller: system-controller@105c0000 {
index e669fbd7f9c36dee51c70dc68a50dd681e4cf886..a67e210e201935a65768b0505e683801624bc3b1 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0x1>, /* Physical Secure PPI */
-                            <1 14 0x1>, /* Physical Non-Secure PPI */
-                            <1 11 0x1>, /* Virtual PPI */
-                            <1 10 0x1>; /* Hypervisor PPI */
+               interrupts = <1 13 0xf08>, /* Physical Secure PPI */
+                            <1 14 0xf08>, /* Physical Non-Secure PPI */
+                            <1 11 0xf08>, /* Virtual PPI */
+                            <1 10 0xf08>; /* Hypervisor PPI */
        };
 
        pmu {
index 21023a388c29d81ec7b7ae2f3a809b8d93d1a523..e3b6034ea5d9084190cddfcf76fdc2e7283cb0d9 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */
-                            <1 14 0x8>, /* Physical Non-Secure PPI, active-low */
-                            <1 11 0x8>, /* Virtual PPI, active-low */
-                            <1 10 0x8>; /* Hypervisor PPI, active-low */
+               interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
+                            <1 14 4>, /* Physical Non-Secure PPI, active-low */
+                            <1 11 4>, /* Virtual PPI, active-low */
+                            <1 10 4>; /* Hypervisor PPI, active-low */
        };
 
        pmu {
index eab1a42fb934d7c4219c9eb7230b9c244236bdb9..c2a6745f168cbd32116f478d8c76bcc18d23104e 100644 (file)
 
                        timer {
                                compatible = "arm,armv8-timer";
-                               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+                               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
                        };
 
                        odmi: odmi@300000 {
index d02a900378e1037f8750ac3f993dc6be58c61aeb..4f44d1191bfd348f2969339d557141d43da278cf 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index c223915f0907df01b095e3d0ccc1cdbb41a44e39..d73bdc8c91156dc0ceb5b7245990cc0abe72d34b 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0xf01>,
-                            <1 14 0xf01>,
-                            <1 11 0xf01>,
-                            <1 10 0xf01>;
+               interrupts = <1 13 4>,
+                            <1 14 4>,
+                            <1 11 4>,
+                            <1 10 4>;
        };
 
        soc {
index e595f22e7e4b67521c5e1720ea0fafeb7273a1c0..3e2e51fbd2bce2d3abdf8390625d1c833bb449c5 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupt-parent = <&gic>;
-               interrupts = <1 13 0xf01>,
-                            <1 14 0xf01>,
-                            <1 11 0xf01>,
-                            <1 10 0xf01>;
+               interrupts = <1 13 0xf08>,
+                            <1 14 0xf08>,
+                            <1 11 0xf08>,
+                            <1 10 0xf08>;
        };
 
        amba_apu {
index 0555b7caaf2c2960a1e4f02eca120ddd5d3d01b1..eadf4855ad2d995072022254000a83cd0c476a0d 100644 (file)
@@ -1,4 +1,3 @@
-# CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_AUDIT=y
@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
 CONFIG_CGROUP_HUGETLB=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_NET_NS is not set
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
+CONFIG_SECCOMP=y
 CONFIG_XEN=y
 CONFIG_KEXEC=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -84,10 +88,37 @@ CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
 CONFIG_BPF_JIT=y
 CONFIG_CFG80211=m
 CONFIG_MAC80211=m
@@ -103,6 +134,7 @@ CONFIG_MTD=y
 CONFIG_MTD_M25P80=y
 CONFIG_MTD_SPI_NOR=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
 CONFIG_VIRTIO_BLK=y
 CONFIG_SRAM=y
 # CONFIG_SCSI_PROC_FS is not set
@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
 CONFIG_TUN=y
+CONFIG_VETH=m
 CONFIG_VIRTIO_NET=y
 CONFIG_AMD_XGBE=y
 CONFIG_NET_XGENE=y
@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y
 CONFIG_PWM_SAMSUNG=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA=y
 CONFIG_AUTOFS4_FS=y
-CONFIG_FUSE_FS=y
-CONFIG_CUSE=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
index 5c888049d061a3720b738c73a5fa11cccf9d8311..6b2aa0fd6cd0c7e266082be8d4cd4ad9c3101632 100644 (file)
@@ -216,7 +216,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                err = blkcipher_walk_done(desc, &walk,
                                          walk.nbytes % AES_BLOCK_SIZE);
        }
-       if (nbytes) {
+       if (walk.nbytes % AES_BLOCK_SIZE) {
                u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
                u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
                u8 __aligned(8) tail[AES_BLOCK_SIZE];
index 4b6b3f72a2158a06e07f36529912c81c0b5fb43b..b71420a12f2613456c4233fb96596f4f0fe72e44 100644 (file)
@@ -61,8 +61,6 @@
 
 #define AARCH64_BREAK_KGDB_DYN_DBG     \
        (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
-#define KGDB_DYN_BRK_INS_BYTE(x)       \
-       ((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
 
 #define CACHE_FLUSH_IS_SAFE            1
 
index 61b49150dfa3046cb521c6fc54232d718e840460..1737aecfcc5e462c78e6c47ab06ffe2e2fc12c60 100644 (file)
@@ -22,7 +22,6 @@
 
 #define __ARCH_WANT_KPROBES_INSN_SLOT
 #define MAX_INSN_SIZE                  1
-#define MAX_STACK_SIZE                 128
 
 #define flush_insn_slot(p)             do { } while (0)
 #define kretprobe_blacklist_size       0
@@ -47,7 +46,6 @@ struct kprobe_ctlblk {
        struct prev_kprobe prev_kprobe;
        struct kprobe_step_ctx ss_ctx;
        struct pt_regs jprobe_saved_regs;
-       char jprobes_stack[MAX_STACK_SIZE];
 };
 
 void arch_remove_kprobe(struct kprobe *);
index 0a456bef8c792d91a30a4c2caf24cb6fb96546e2..2fee2f59288c94d70814771ed06fced11ede369d 100644 (file)
@@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
 #define _percpu_read(pcp)                                              \
 ({                                                                     \
        typeof(pcp) __retval;                                           \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)),      \
                                              sizeof(pcp));             \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        __retval;                                                       \
 })
 
 #define _percpu_write(pcp, val)                                                \
 do {                                                                   \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val),       \
                                sizeof(pcp));                           \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
 } while(0)                                                             \
 
 #define _pcp_protect(operation, pcp, val)                      \
index e875a5a551d7debeab3c34ed2d52e9beae7c8f7d..89206b568cd42365090a8be788da1bbd35466f72 100644 (file)
@@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 #define arch_read_relax(lock)  cpu_relax()
 #define arch_write_relax(lock) cpu_relax()
 
+/*
+ * Accesses appearing in program order before a spin_lock() operation
+ * can be reordered with accesses inside the critical section, by virtue
+ * of arch_spin_lock being constructed using acquire semantics.
+ *
+ * In cases where this is problematic (e.g. try_to_wake_up), an
+ * smp_mb__before_spinlock() can restore the required ordering.
+ */
+#define smp_mb__before_spinlock()      smp_mb()
+
 #endif /* __ASM_SPINLOCK_H */
index 5e834d10b2913d91bf3620c0e5930cdb5e14035e..c47257c91b77e3d6516000c0c8bec5705b97b6dc 100644 (file)
@@ -265,22 +265,25 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        kasan_check_write(to, n);
-       return  __arch_copy_from_user(to, from, n);
+       check_object_size(to, n, false);
+       return __arch_copy_from_user(to, from, n);
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        kasan_check_read(from, n);
-       return  __arch_copy_to_user(to, from, n);
+       check_object_size(from, n, true);
+       return __arch_copy_to_user(to, from, n);
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        kasan_check_write(to, n);
 
-       if (access_ok(VERIFY_READ, from, n))
+       if (access_ok(VERIFY_READ, from, n)) {
+               check_object_size(to, n, false);
                n = __arch_copy_from_user(to, from, n);
-       else /* security hole - plug it */
+       else /* security hole - plug it */
                memset(to, 0, n);
        return n;
 }
@@ -289,8 +292,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
 {
        kasan_check_read(from, n);
 
-       if (access_ok(VERIFY_WRITE, to, n))
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               check_object_size(from, n, true);
                n = __arch_copy_to_user(to, from, n);
+       }
        return n;
 }
 
index 96e4a2b64cc1221420540df2d7255dd2aca1b298..441420ca7d084af131bf7e7373c7bfc22c8ae26f 100644 (file)
@@ -353,6 +353,8 @@ el1_sync:
        lsr     x24, x1, #ESR_ELx_EC_SHIFT      // exception class
        cmp     x24, #ESR_ELx_EC_DABT_CUR       // data abort in EL1
        b.eq    el1_da
+       cmp     x24, #ESR_ELx_EC_IABT_CUR       // instruction abort in EL1
+       b.eq    el1_ia
        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
        b.eq    el1_undef
        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
@@ -364,6 +366,11 @@ el1_sync:
        cmp     x24, #ESR_ELx_EC_BREAKPT_CUR    // debug exception in EL1
        b.ge    el1_dbg
        b       el1_inv
+
+el1_ia:
+       /*
+        * Fall through to the Data abort case
+        */
 el1_da:
        /*
         * Data abort handling
index b77f58355da11c43ab872f24dbcec134305191ed..3e7b050e99dcee0fecfac38c628eaa5c38991db3 100644 (file)
@@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
        isb
        bl      __create_page_tables            // recreate kernel mapping
 
+       tlbi    vmalle1                         // Remove any stale TLB entries
+       dsb     nsh
+
        msr     sctlr_el1, x19                  // re-enable the MMU
        isb
        ic      iallu                           // flush instructions fetched
index 21ab5df9fa76ddea54d58abf77812593ddbb0f6d..65d81f965e7491daba3e05be40052776a6d1f64d 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/sections.h>
 #include <asm/smp.h>
 #include <asm/suspend.h>
+#include <asm/sysreg.h>
 #include <asm/virt.h>
 
 /*
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
        set_pte(pte, __pte(virt_to_phys((void *)dst) |
                         pgprot_val(PAGE_KERNEL_EXEC)));
 
-       /* Load our new page tables */
-       asm volatile("msr       ttbr0_el1, %0;"
-                    "isb;"
-                    "tlbi      vmalle1is;"
-                    "dsb       ish;"
-                    "isb" : : "r"(virt_to_phys(pgd)));
+       /*
+        * Load our new page tables. A strict BBM approach requires that we
+        * ensure that TLBs are free of any entries that may overlap with the
+        * global mappings we are about to install.
+        *
+        * For a real hibernate/resume cycle TTBR0 currently points to a zero
+        * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+        * runtime services), while for a userspace-driven test_resume cycle it
+        * points to userspace page tables (and we must point it at a zero page
+        * ourselves). Elsewhere we only (un)install the idmap with preemption
+        * disabled, so T0SZ should be as required regardless.
+        */
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+       isb();
 
        *phys_dst_addr = virt_to_phys((void *)dst);
 
@@ -393,6 +404,38 @@ int swsusp_arch_resume(void)
        void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
                                          void *, phys_addr_t, phys_addr_t);
 
+       /*
+        * Restoring the memory image will overwrite the ttbr1 page tables.
+        * Create a second copy of just the linear map, and use this when
+        * restoring.
+        */
+       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+       if (!tmp_pg_dir) {
+               pr_err("Failed to allocate memory for temporary page tables.");
+               rc = -ENOMEM;
+               goto out;
+       }
+       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+       if (rc)
+               goto out;
+
+       /*
+        * Since we only copied the linear map, we need to find restore_pblist's
+        * linear map address.
+        */
+       lm_restore_pblist = LMADDR(restore_pblist);
+
+       /*
+        * We need a zero page that is zero before & after resume in order to
+        * to break before make on the ttbr1 page tables.
+        */
+       zero_page = (void *)get_safe_page(GFP_ATOMIC);
+       if (!zero_page) {
+               pr_err("Failed to allocate zero page.");
+               rc = -ENOMEM;
+               goto out;
+       }
+
        /*
         * Locate the exit code in the bottom-but-one page, so that *NULL
         * still has disastrous affects.
@@ -418,27 +461,6 @@ int swsusp_arch_resume(void)
         */
        __flush_dcache_area(hibernate_exit, exit_size);
 
-       /*
-        * Restoring the memory image will overwrite the ttbr1 page tables.
-        * Create a second copy of just the linear map, and use this when
-        * restoring.
-        */
-       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
-       if (!tmp_pg_dir) {
-               pr_err("Failed to allocate memory for temporary page tables.");
-               rc = -ENOMEM;
-               goto out;
-       }
-       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
-       if (rc)
-               goto out;
-
-       /*
-        * Since we only copied the linear map, we need to find restore_pblist's
-        * linear map address.
-        */
-       lm_restore_pblist = LMADDR(restore_pblist);
-
        /*
         * KASLR will cause the el2 vectors to be in a different location in
         * the resumed kernel. Load hibernate's temporary copy into el2.
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
                __hyp_set_vectors(el2_vectors);
        }
 
-       /*
-        * We need a zero page that is zero before & after resume in order to
-        * to break before make on the ttbr1 page tables.
-        */
-       zero_page = (void *)get_safe_page(GFP_ATOMIC);
-
        hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
                       resume_hdr.reenter_kernel, lm_restore_pblist,
                       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
index 8c57f6496e56dc65f624569f9496f8907991516b..e017a9493b92e907235cc19e7df7e53397c3f56e 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/bug.h>
 #include <linux/irq.h>
 #include <linux/kdebug.h>
 #include <linux/kgdb.h>
 #include <linux/kprobes.h>
+#include <asm/debug-monitors.h>
+#include <asm/insn.h>
 #include <asm/traps.h>
 
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@@ -338,15 +341,24 @@ void kgdb_arch_exit(void)
        unregister_die_notifier(&kgdb_notifier);
 }
 
-/*
- * ARM instructions are always in LE.
- * Break instruction is encoded in LE format
- */
-struct kgdb_arch arch_kgdb_ops = {
-       .gdb_bpt_instr = {
-               KGDB_DYN_BRK_INS_BYTE(0),
-               KGDB_DYN_BRK_INS_BYTE(1),
-               KGDB_DYN_BRK_INS_BYTE(2),
-               KGDB_DYN_BRK_INS_BYTE(3),
-       }
-};
+struct kgdb_arch arch_kgdb_ops;
+
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+       int err;
+
+       BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
+
+       err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
+       if (err)
+               return err;
+
+       return aarch64_insn_write((void *)bpt->bpt_addr,
+                       (u32)AARCH64_BREAK_KGDB_DYN_DBG);
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+       return aarch64_insn_write((void *)bpt->bpt_addr,
+                       *(u32 *)bpt->saved_instr);
+}
index bf97685882883e8970e472edeb0b457cbc2b76e6..c6b0f40620d868d1bf031db8e0cb0dfff3b134dc 100644 (file)
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 static void __kprobes
 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
 
-static inline unsigned long min_stack_size(unsigned long addr)
-{
-       unsigned long size;
-
-       if (on_irq_stack(addr, raw_smp_processor_id()))
-               size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
-       else
-               size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
-
-       return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
-}
-
 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
 {
        /* prepare insn slot */
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct jprobe *jp = container_of(p, struct jprobe, kp);
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       long stack_ptr = kernel_stack_pointer(regs);
 
        kcb->jprobe_saved_regs = *regs;
        /*
-        * As Linus pointed out, gcc assumes that the callee
-        * owns the argument space and could overwrite it, e.g.
-        * tailcall optimization. So, to be absolutely safe
-        * we also save and restore enough stack bytes to cover
-        * the argument area.
+        * Since we can't be sure where in the stack frame "stacked"
+        * pass-by-value arguments are stored we just don't try to
+        * duplicate any of the stack. Do not use jprobes on functions that
+        * use more than 64 bytes (after padding each to an 8 byte boundary)
+        * of arguments, or pass individual arguments larger than 16 bytes.
         */
-       kasan_disable_current();
-       memcpy(kcb->jprobes_stack, (void *)stack_ptr,
-              min_stack_size(stack_ptr));
-       kasan_enable_current();
 
        instruction_pointer_set(regs, (unsigned long) jp->entry);
        preempt_disable();
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        }
        unpause_graph_tracing();
        *regs = kcb->jprobe_saved_regs;
-       kasan_disable_current();
-       memcpy((void *)stack_addr, kcb->jprobes_stack,
-              min_stack_size(stack_addr));
-       kasan_enable_current();
        preempt_enable_no_resched();
        return 1;
 }
index 9a3aec97ac091bd8503e539544b3a62bec0bd143..ccf79d849e0a89e8a0694d04c005f8508d590de3 100644 (file)
@@ -101,12 +101,20 @@ ENTRY(cpu_resume)
        bl      el2_setup               // if in EL2 drop to EL1 cleanly
        /* enable the MMU early - so we can access sleep_save_stash by va */
        adr_l   lr, __enable_mmu        /* __cpu_setup will return here */
-       ldr     x27, =_cpu_resume       /* __enable_mmu will branch here */
+       adr_l   x27, _resume_switched   /* __enable_mmu will branch here */
        adrp    x25, idmap_pg_dir
        adrp    x26, swapper_pg_dir
        b       __cpu_setup
 ENDPROC(cpu_resume)
 
+       .pushsection    ".idmap.text", "ax"
+_resume_switched:
+       ldr     x8, =_cpu_resume
+       br      x8
+ENDPROC(_resume_switched)
+       .ltorg
+       .popsection
+
 ENTRY(_cpu_resume)
        mrs     x1, mpidr_el1
        adrp    x8, mpidr_hash
index 76a6d9263908faf4800cb0807952d17c6f8276d9..3ff173e9258230921fc26f895efbdaee38728e1e 100644 (file)
@@ -201,12 +201,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        return ret;
 }
 
-static void smp_store_cpu_info(unsigned int cpuid)
-{
-       store_cpu_topology(cpuid);
-       numa_store_cpu_info(cpuid);
-}
-
 /*
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
@@ -254,7 +248,7 @@ asmlinkage void secondary_start_kernel(void)
         */
        notify_cpu_starting(cpu);
 
-       smp_store_cpu_info(cpu);
+       store_cpu_topology(cpu);
 
        /*
         * OK, now it's safe to let the boot CPU continue.  Wait for
@@ -661,9 +655,9 @@ void __init smp_init_cpus(void)
                acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
                                      acpi_parse_gic_cpu_interface, 0);
 
-       if (cpu_count > NR_CPUS)
-               pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
-                       cpu_count, NR_CPUS);
+       if (cpu_count > nr_cpu_ids)
+               pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
+                       cpu_count, nr_cpu_ids);
 
        if (!bootcpu_valid) {
                pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
@@ -677,7 +671,7 @@ void __init smp_init_cpus(void)
         * with entries in cpu_logical_map while initializing the cpus.
         * If the cpu set-up fails, invalidate the cpu_logical_map entry.
         */
-       for (i = 1; i < NR_CPUS; i++) {
+       for (i = 1; i < nr_cpu_ids; i++) {
                if (cpu_logical_map(i) != INVALID_HWID) {
                        if (smp_cpu_setup(i))
                                cpu_logical_map(i) = INVALID_HWID;
@@ -689,10 +683,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        int err;
        unsigned int cpu;
+       unsigned int this_cpu;
 
        init_cpu_topology();
 
-       smp_store_cpu_info(smp_processor_id());
+       this_cpu = smp_processor_id();
+       store_cpu_topology(this_cpu);
+       numa_store_cpu_info(this_cpu);
 
        /*
         * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
@@ -719,6 +716,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                        continue;
 
                set_cpu_present(cpu, true);
+               numa_store_cpu_info(cpu);
        }
 }
 
index ae7855f16ec2966f6cde3c2639fe3af360e9a676..5a84b45626032ea1260eea2536da42231dcbfa4f 100644 (file)
@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
 
        /*
         * We must restore the 32-bit state before the sysregs, thanks
-        * to Cortex-A57 erratum #852523.
+        * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
         */
        __sysreg32_restore_state(vcpu);
        __sysreg_restore_guest_state(guest_ctxt);
index b0b225ceca18f956a805c2862f299d0ee9575749..e51367d159d0250aaa7e53d33803caddfbae9de8 100644 (file)
@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  * Architected system registers.
  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  *
- * We could trap ID_DFR0 and tell the guest we don't support performance
- * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
- * NAKed, so it will read the PMCR anyway.
- *
- * Therefore we tell the guest we have 0 counters.  Unfortunately, we
- * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
- * all PM registers, which doesn't crash the guest kernel at least.
- *
  * Debug handling: We do trap most, if not all debug related system
  * registers. The implementation is good enough to ensure that a guest
  * can use these with minimal performance degradation. The drawback is
@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
        { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
 
        /* ICC_SRE */
-       { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
+       { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
 
        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
 
index f94b80eb295dc48cd26a8d8ba01094abc9d669e8..9c3e75df21804bdefec8587550e53c8b3fd8920e 100644 (file)
@@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 
 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
 {
-       pte_t *pte = pte_offset_kernel(pmd, 0);
+       pte_t *pte = pte_offset_kernel(pmd, 0UL);
        unsigned long addr;
        unsigned i;
 
@@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
 
 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
 {
-       pmd_t *pmd = pmd_offset(pud, 0);
+       pmd_t *pmd = pmd_offset(pud, 0UL);
        unsigned long addr;
        unsigned i;
 
@@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
 
 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
 {
-       pud_t *pud = pud_offset(pgd, 0);
+       pud_t *pud = pud_offset(pgd, 0UL);
        unsigned long addr;
        unsigned i;
 
index c8beaa0da7df4e60e3939c962b268cf666337dde..05d2bd776c69b932397ccec82306d8fbb46272bf 100644 (file)
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
 }
 #endif
 
+static bool is_el1_instruction_abort(unsigned int esr)
+{
+       return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
+}
+
 /*
  * The kernel tried to access some page that wasn't present.
  */
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
 {
        /*
         * Are we prepared to handle this kernel fault?
+        * We are almost certainly not prepared to handle instruction faults.
         */
-       if (fixup_exception(regs))
+       if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
                return;
 
        /*
@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr)
        unsigned int ec       = ESR_ELx_EC(esr);
        unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
 
-       return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+       return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
+              (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
 }
 
 static bool is_el0_instruction_abort(unsigned int esr)
@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
                if (regs->orig_addr_limit == KERNEL_DS)
                        die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
 
+               if (is_el1_instruction_abort(esr))
+                       die("Attempting to execute userspace memory", regs, esr);
+
                if (!search_exception_tables(regs->pc))
                        die("Accessing user space memory outside uaccess.h routines", regs, esr);
        }
index c7fe3ec70774a6c81cb0940940eba389ffa6acb7..5bb15eab6f00e42bee6142a489746dc961298e87 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/module.h>
 #include <linux/of.h>
 
+#include <asm/acpi.h>
+
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 nodemask_t numa_nodes_parsed __initdata;
index 5bb61de2320172c806ee58959e3f721b2b243a99..9d37e967fa198779e01a5cffb35782da5ef2f6f6 100644 (file)
@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
 
        msr     tcr_el1, x8
        msr     vbar_el1, x9
+
+       /*
+        * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
+        * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
+        * exception. Mask them until local_dbg_restore() in cpu_suspend()
+        * resets them.
+        */
+       disable_dbg
        msr     mdscr_el1, x10
+
        msr     sctlr_el1, x12
        /*
         * Restore oslsr_el1 by writing oslar_el1
index 68cf638faf4867aef2d92b91f9ab48770100f132..b1ec1fa064632258ac1a4bb50b3b22416bd43c0f 100644 (file)
@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
 
 extern __kernel_size_t copy_to_user(void __user *to, const void *from,
                                    __kernel_size_t n);
-extern __kernel_size_t copy_from_user(void *to, const void __user *from,
+extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
                                      __kernel_size_t n);
 
 static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
 {
        return __copy_user(to, (const void __force *)from, n);
 }
+static inline __kernel_size_t copy_from_user(void *to,
+                                              const void __user *from,
+                                              __kernel_size_t n)
+{
+       size_t res = ___copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
+}
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
index d93ead02daeda4635e772268b328dc652f1d86df..7c6cf14f09854aacd6f31cafedc39a88d451d18a 100644 (file)
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
 /*
  * Userspace access stuff.
  */
-EXPORT_SYMBOL(copy_from_user);
+EXPORT_SYMBOL(___copy_from_user);
 EXPORT_SYMBOL(copy_to_user);
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(strncpy_from_user);
index ea59c04b07de8e3a09ed486b8c48a9ffe25ad5c1..075373471da11d011871ab4681e7ded58732ddfb 100644 (file)
         */
        .text
        .align  1
-       .global copy_from_user
-       .type   copy_from_user, @function
-copy_from_user:
+       .global ___copy_from_user
+       .type   ___copy_from_user, @function
+___copy_from_user:
        branch_if_kernel r8, __copy_user
        ret_if_privileged r8, r11, r10, r10
        rjmp    __copy_user
-       .size   copy_from_user, . - copy_from_user
+       .size   ___copy_from_user, . - ___copy_from_user
 
        .global copy_to_user
        .type   copy_to_user, @function
index 12f5d6851bbcb3e28c7ba5421a849df3cabbf4fd..0a2a70096d8b2dc7345b6e1ca85c68b3cdd39b9e 100644 (file)
@@ -171,11 +171,12 @@ static inline int bad_user_access_length(void)
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (access_ok(VERIFY_READ, from, n))
+       if (likely(access_ok(VERIFY_READ, from, n))) {
                memcpy(to, (const void __force *)from, n);
-       else
-               return n;
-       return 0;
+               return 0;
+       }
+       memset(to, 0, n);
+       return n;
 }
 
 static inline unsigned long __must_check
index c6db52ba3a06653e8b6a22a277c607d82c69de46..10c57771822d5d8f25eed0ccc64ceacfb2dca5ca 100644 (file)
@@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
 #include <linux/smc91x.h>
 
 static struct smc91x_platdata smc91x_info = {
-       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                SMC91X_NOWAIT,
        .leda = RPC_LED_100_10,
        .ledb = RPC_LED_TX_RX,
 };
index f35525b5581991689f0e4b178c5672a2dadbe060..57d1c43726d928600d777940d9836d9a7865c1f3 100644 (file)
@@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
 #include <linux/smc91x.h>
 
 static struct smc91x_platdata smc91x_info = {
-       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                SMC91X_NOWAIT,
        .leda = RPC_LED_100_10,
        .ledb = RPC_LED_TX_RX,
 };
index e3530d0f13ee74e9c4b2bdfbaf10e226499af2e9..56c7d5750abd66c0b75e7af2d6f0738a18236b6e 100644 (file)
@@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
 extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
 extern unsigned long __do_clear_user(void __user *to, unsigned long n);
 
-static inline unsigned long
-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       if (access_ok(VERIFY_WRITE, to, n))
-               return __copy_user(to, from, n);
-       return n;
-}
-
-static inline unsigned long
-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_user_zeroing(to, from, n);
-       return n;
-}
-
-static inline unsigned long
-__generic_clear_user(void __user *to, unsigned long n)
-{
-       if (access_ok(VERIFY_WRITE, to, n))
-               return __do_clear_user(to, n);
-       return n;
-}
-
 static inline long
 __strncpy_from_user(char *dst, const char __user *src, long count)
 {
@@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
        else if (n == 24)
                __asm_copy_from_user_24(to, from, ret);
        else
-               ret = __generic_copy_from_user(to, from, n);
+               ret = __copy_user_zeroing(to, from, n);
 
        return ret;
 }
@@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
        else if (n == 24)
                __asm_copy_to_user_24(to, from, ret);
        else
-               ret = __generic_copy_to_user(to, from, n);
+               ret = __copy_user(to, from, n);
 
        return ret;
 }
@@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
        else if (n == 24)
                __asm_clear_24(to, ret);
        else
-               ret = __generic_clear_user(to, n);
+               ret = __do_clear_user(to, n);
 
        return ret;
 }
 
 
-#define clear_user(to, n)                              \
-       (__builtin_constant_p(n) ?                      \
-        __constant_clear_user(to, n) :                 \
-        __generic_clear_user(to, n))
+static inline size_t clear_user(void __user *to, size_t n)
+{
+       if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+               return n;
+       if (__builtin_constant_p(n))
+               return __constant_clear_user(to, n);
+       else
+               return __do_clear_user(to, n);
+}
 
-#define copy_from_user(to, from, n)                    \
-       (__builtin_constant_p(n) ?                      \
-        __constant_copy_from_user(to, from, n) :       \
-        __generic_copy_from_user(to, from, n))
+static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
+{
+       if (unlikely(!access_ok(VERIFY_READ, from, n))) {
+               memset(to, 0, n);
+               return n;
+       }
+       if (__builtin_constant_p(n))
+               return __constant_copy_from_user(to, from, n);
+       else
+               return __copy_user_zeroing(to, from, n);
+}
 
-#define copy_to_user(to, from, n)                      \
-       (__builtin_constant_p(n) ?                      \
-        __constant_copy_to_user(to, from, n) :         \
-        __generic_copy_to_user(to, from, n))
+static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
+{
+       if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+               return n;
+       if (__builtin_constant_p(n))
+               return __constant_copy_to_user(to, from, n);
+       else
+               return __copy_user(to, from, n);
+}
 
 /* We let the __ versions of copy_from/to_user inline, because they're often
  * used in fast paths and have only a small space overhead.
index 3ac9a59d65d4129142d06ba9516455e0c5dc96db..87d9e34c5df866072ced6de336ea07e3755853eb 100644 (file)
@@ -263,19 +263,25 @@ do {                                                      \
 extern long __memset_user(void *dst, unsigned long count);
 extern long __memcpy_user(void *dst, const void *src, unsigned long count);
 
-#define clear_user(dst,count)                  __memset_user(____force(dst), (count))
+#define __clear_user(dst,count)                        __memset_user(____force(dst), (count))
 #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
 #define __copy_to_user_inatomic(to, from, n)   __memcpy_user(____force(to), (from), (n))
 
 #else
 
-#define clear_user(dst,count)                  (memset(____force(dst), 0, (count)), 0)
+#define __clear_user(dst,count)                        (memset(____force(dst), 0, (count)), 0)
 #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
 #define __copy_to_user_inatomic(to, from, n)   (memcpy(____force(to), (from), (n)), 0)
 
 #endif
 
-#define __clear_user clear_user
+static inline unsigned long __must_check
+clear_user(void __user *to, unsigned long n)
+{
+       if (likely(__access_ok(to, n)))
+               n = __clear_user(to, n);
+       return n;
+}
 
 static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
index 2e221c5f0203b22d3bda055862548afb3803bb2d..f86918aed9e1181e3dc2546a1b0520f814138164 100644 (file)
@@ -3,6 +3,8 @@
 
 #ifdef __KERNEL__
 
+#include <linux/types.h>
+
 /* H8/300 internal I/O functions */
 
 #define __raw_readb __raw_readb
index f000a382bc7f62f28dfe980ab184904c70681914..f61cfb28e9f283800f968c2953705851da429e68 100644 (file)
@@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
 {
        long res = __strnlen_user(src, n);
 
-       /* return from strnlen can't be zero -- that would be rubbish. */
+       if (unlikely(!res))
+               return -EFAULT;
 
        if (res > n) {
                copy_from_user(dst, src, n);
index 6a15083cc366df9c13962206952d266625034c7f..18ca6a9ce566cc3b8463d22ecfe3e27c25fc24f7 100644 (file)
@@ -52,6 +52,7 @@ config IA64
        select MODULES_USE_ELF_RELA
        select ARCH_USE_CMPXCHG_LOCKREF
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
index 2189d5ddc1eeef552dd602875ae7d34c8baf17f7..bfe13196f7708f6e574f2ecdbbb34c2acc29855e 100644 (file)
@@ -241,12 +241,16 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
+       check_object_size(from, count, true);
+
        return __copy_user(to, (__force void __user *) from, count);
 }
 
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
+       check_object_size(to, count, false);
+
        return __copy_user((__force void __user *) to, from, count);
 }
 
@@ -258,22 +262,23 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        const void *__cu_from = (from);                                                 \
        long __cu_len = (n);                                                            \
                                                                                        \
-       if (__access_ok(__cu_to, __cu_len, get_fs()))                                   \
-               __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);   \
+       if (__access_ok(__cu_to, __cu_len, get_fs())) {                                 \
+               check_object_size(__cu_from, __cu_len, true);                   \
+               __cu_len = __copy_user(__cu_to, (__force void __user *)  __cu_from, __cu_len);  \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
-#define copy_from_user(to, from, n)                                                    \
-({                                                                                     \
-       void *__cu_to = (to);                                                           \
-       const void __user *__cu_from = (from);                                          \
-       long __cu_len = (n);                                                            \
-                                                                                       \
-       __chk_user_ptr(__cu_from);                                                      \
-       if (__access_ok(__cu_from, __cu_len, get_fs()))                                 \
-               __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
-       __cu_len;                                                                       \
-})
+static inline unsigned long
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       check_object_size(to, n, false);
+       if (likely(__access_ok(from, n, get_fs())))
+               n = __copy_user((__force void __user *) to, from, n);
+       else
+               memset(to, 0, n);
+       return n;
+}
 
 #define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
 
index cac7014daef3aa6949d17a72824a757dfdbee5b4..6f8982157a75849b491c8461c0c3b34d4c7c4088 100644 (file)
@@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs);
 #define __get_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
        long __gu_err = 0;                                              \
-       unsigned long __gu_val;                                         \
+       unsigned long __gu_val = 0;                                     \
        might_fault();                                                  \
        __get_user_size(__gu_val, (ptr), (size), __gu_err);             \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
index 2dcee3a88867536b2186f01252f1bf1d842ec279..9202f82dfce60e919d884e37c89924ade5a33569 100644 (file)
@@ -213,7 +213,6 @@ static inline int frame_extra_sizes(int f)
 
 static inline void adjustformat(struct pt_regs *regs)
 {
-       ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
        /*
         * set format byte to make stack appear modulo 4, which it will
         * be when doing the rte
index 8282cbce7e399a84488e675af0751341d24f0205..273e61225c277ae67ba28dfae4ef9123e3ef34a9 100644 (file)
@@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
 static inline unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (access_ok(VERIFY_READ, from, n))
+       if (likely(access_ok(VERIFY_READ, from, n)))
                return __copy_user_zeroing(to, from, n);
+       memset(to, 0, n);
        return n;
 }
 
index 11fa51c89617deb1a303c6bfdbf82b2a32a1e4db..c0ec116b3993a3a61b852c9daf14a34ab0962e15 100644 (file)
@@ -390,7 +390,6 @@ void __init mem_init(void)
 
        free_all_bootmem();
        mem_init_print_info(NULL);
-       show_mem(0);
 }
 
 void free_initmem(void)
index 331b0d35f89ce301ad9ba876be7322f417d7c909..826676778094f23a8ced8264b5c448184c0fa006 100644 (file)
@@ -227,7 +227,7 @@ extern long __user_bad(void);
 
 #define __get_user(x, ptr)                                             \
 ({                                                                     \
-       unsigned long __gu_val;                                         \
+       unsigned long __gu_val = 0;                                     \
        /*unsigned long __gu_ptr = (unsigned long)(ptr);*/              \
        long __gu_err;                                                  \
        switch (sizeof(*(ptr))) {                                       \
@@ -373,10 +373,13 @@ extern long __user_bad(void);
 static inline long copy_from_user(void *to,
                const void __user *from, unsigned long n)
 {
+       unsigned long res = n;
        might_fault();
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_from_user(to, from, n);
-       return n;
+       if (likely(access_ok(VERIFY_READ, from, n)))
+               res = __copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 #define __copy_to_user(to, from, n)    \
index 26388562e300ee1ad01acfe856117b369af3ca73..212ff92920d23e4a66d3b258815450e2934b594c 100644 (file)
@@ -65,6 +65,7 @@ config MIPS
        select ARCH_CLOCKSOURCE_DATA
        select HANDLE_DOMAIN_IRQ
        select HAVE_EXIT_THREAD
+       select HAVE_REGS_AND_STACK_ACCESS_API
 
 menu "Machine selection"
 
index f0e314ceb8baa84d36ea6664772f71df46ad003f..7f975b20b20c713e6225d14e3984013f57fe5026 100644 (file)
@@ -113,42 +113,6 @@ config SPINLOCK_TEST
        help
          Add several files to the debugfs to test spinlock speed.
 
-if CPU_MIPSR6
-
-choice
-       prompt "Compact branch policy"
-       default MIPS_COMPACT_BRANCHES_OPTIMAL
-
-config MIPS_COMPACT_BRANCHES_NEVER
-       bool "Never (force delay slot branches)"
-       help
-         Pass the -mcompact-branches=never flag to the compiler in order to
-         force it to always emit branches with delay slots, and make no use
-         of the compact branch instructions introduced by MIPSr6. This is
-         useful if you suspect there may be an issue with compact branches in
-         either the compiler or the CPU.
-
-config MIPS_COMPACT_BRANCHES_OPTIMAL
-       bool "Optimal (use where beneficial)"
-       help
-         Pass the -mcompact-branches=optimal flag to the compiler in order for
-         it to make use of compact branch instructions where it deems them
-         beneficial, and use branches with delay slots elsewhere. This is the
-         default compiler behaviour, and should be used unless you have a
-         reason to choose otherwise.
-
-config MIPS_COMPACT_BRANCHES_ALWAYS
-       bool "Always (force compact branches)"
-       help
-         Pass the -mcompact-branches=always flag to the compiler in order to
-         force it to always emit compact branches, making no use of branch
-         instructions with delay slots. This can result in more compact code
-         which may be beneficial in some scenarios.
-
-endchoice
-
-endif # CPU_MIPSR6
-
 config SCACHE_DEBUGFS
        bool "L2 cache debugfs entries"
        depends on DEBUG_FS
index efd7a9dc93c4161a381081c569a5b4e91b182f5e..598ab2930fce67bb373827d7bbb09be35880a75e 100644 (file)
@@ -203,10 +203,6 @@ endif
 toolchain-virt                         := $(call cc-option-yn,$(mips-cflags) -mvirt)
 cflags-$(toolchain-virt)               += -DTOOLCHAIN_SUPPORTS_VIRT
 
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER)   += -mcompact-branches=never
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS)  += -mcompact-branches=always
-
 #
 # Firmware support
 #
index 2e7378467c5cb113e77068625fcca20d0a9b89b9..cc3a1e33a600ec7a8dd6f73ca12354955065d44d 100644 (file)
@@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
        struct clk *clk;
 
        clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
-       if (!clk)
+       if (IS_ERR(clk))
                panic("failed to allocate %s clock structure", name);
 
        return clk;
index b31fbc9d6eae23ea37e07591f5c664e3f9346903..37a932d9148c214e2cb3fe9668af7330f3f8bf52 100644 (file)
@@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void)
 {
        return of_platform_bus_probe(NULL, octeon_ids, NULL);
 }
-device_initcall(octeon_publish_devices);
+arch_initcall(octeon_publish_devices);
 
 MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
 MODULE_LICENSE("GPL");
index 56584a659183db05b7dc5a8697ab8497bd4e45cd..83054f79f72aa62595923ca017936f6cd60075fd 100644 (file)
        ldc1    $f28, THREAD_FPR28(\thread)
        ldc1    $f30, THREAD_FPR30(\thread)
        ctc1    \tmp, fcr31
+       .set    pop
        .endm
 
        .macro  fpu_restore_16odd thread
index 0cf5ac1f72452dc9c4f6980e500e5fde43e08b4c..8ff2cbdf2c3ee8f74a4b3e9df6c7369c1f39dee8 100644 (file)
@@ -15,8 +15,8 @@
 static inline bool __should_swizzle_bits(volatile void *a)
 {
        extern const bool octeon_should_swizzle_table[];
+       u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
 
-       unsigned long did = ((unsigned long)a >> 40) & 0xff;
        return octeon_should_swizzle_table[did];
 }
 
@@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a)
 
 #define __should_swizzle_bits(a)       false
 
-static inline bool __should_swizzle_addr(unsigned long p)
+static inline bool __should_swizzle_addr(u64 p)
 {
        /* boot bus? */
        return ((p >> 40) & 0xff) == 0;
index 2f82bfa3a77347155a1526dde873387d01dcf138..c9f5769dfc8fca9d10c1ce4fe12ff62d3c4c8c66 100644 (file)
 #define CP0_EBASE $15, 1
 
        .macro  kernel_entry_setup
+#ifdef CONFIG_SMP
        mfc0    t0, CP0_EBASE
        andi    t0, t0, 0x3ff           # CPUNum
        beqz    t0, 1f
        # CPUs other than zero goto smp_bootstrap
        j       smp_bootstrap
+#endif /* CONFIG_SMP */
 
 1:
        .endm
index ea0cd9773914cc06c96fa4dbc783cd84f441c856..5f987598054f906dea71ad364b2be9cc8c74355e 100644 (file)
@@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
  */
 static inline unsigned long ___pa(unsigned long x)
 {
-       if (config_enabled(CONFIG_64BIT)) {
+       if (IS_ENABLED(CONFIG_64BIT)) {
                /*
                 * For MIPS64 the virtual address may either be in one of
                 * the compatibility segements ckseg0 or ckseg1, or it may
@@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
                return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
        }
 
-       if (!config_enabled(CONFIG_EVA)) {
+       if (!IS_ENABLED(CONFIG_EVA)) {
                /*
                 * We're using the standard MIPS32 legacy memory map, ie.
                 * the address x is going to be in kseg0 or kseg1. We can
index 11b965f98d9589f7ba1686877a9f054a4c1ae42e..21a2aaba20d5ed4ae9eb551b19ab208dbf5aa3c2 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/thread_info.h>
+#include <linux/string.h>
 #include <asm/asm-eva.h>
 
 /*
@@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
                        __cu_len = __invoke_copy_from_user(__cu_to,     \
                                                           __cu_from,   \
                                                           __cu_len);   \
+               } else {                                                \
+                       memset(__cu_to, 0, __cu_len);                   \
                }                                                       \
        }                                                               \
        __cu_len;                                                       \
index c3372cac6db220eebe2ff6eba44a1a71c01275be..0a7e10b5f9e39eb312e0e48b2eab1966df6854f3 100644 (file)
@@ -1164,7 +1164,9 @@ fpu_emul:
                regs->regs[31] = r31;
                regs->cp0_epc = epc;
                if (!used_math()) {     /* First time FPU user.  */
+                       preempt_disable();
                        err = init_fpu();
+                       preempt_enable();
                        set_used_math();
                }
                lose_fpu(1);    /* Save FPU state for the emulator. */
index 7429ad09fbe3e1178ad37f0e34ed2f1651ac70ac..d2d061520a23000116cc9c6d9f0a7f4a077aa372 100644 (file)
@@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
                return -EOPNOTSUPP;
 
        /* Avoid inadvertently triggering emulation */
-       if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
-           !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+       if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
+           !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
                return -EOPNOTSUPP;
-       if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
+       if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
                return -EOPNOTSUPP;
 
        /* FR = 0 not supported in MIPS R6 */
-       if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+       if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
                return -EOPNOTSUPP;
 
        /* Proceed with the mode switch */
index 36cf8d65c47dbef02c408fd9b30428a8a4828998..3be0e6ba2797c4c5055982d908c9a277c975651d 100644 (file)
@@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
        int x = boot_mem_map.nr_map;
        int i;
 
+       /*
+        * If the region reaches the top of the physical address space, adjust
+        * the size slightly so that (start + size) doesn't overflow
+        */
+       if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
+               --size;
+
        /* Sanity check */
        if (start + size < start) {
                pr_warn("Trying to add an invalid memory region, skipped\n");
index f95f094f36e4eb035d3edc919f6573c6ebc90f65..b0baf48951faabffac7a53fde37dc79748dea0ac 100644 (file)
@@ -322,6 +322,9 @@ asmlinkage void start_secondary(void)
        cpumask_set_cpu(cpu, &cpu_coherent_mask);
        notify_cpu_starting(cpu);
 
+       cpumask_set_cpu(cpu, &cpu_callin_map);
+       synchronise_count_slave(cpu);
+
        set_cpu_online(cpu, true);
 
        set_cpu_sibling_map(cpu);
@@ -329,10 +332,6 @@ asmlinkage void start_secondary(void)
 
        calculate_cpu_foreign_map();
 
-       cpumask_set_cpu(cpu, &cpu_callin_map);
-
-       synchronise_count_slave(cpu);
-
        /*
         * irq will be enabled in ->smp_finish(), enabling it too early
         * is dangerous.
index 8452d933a6453c9a1f8655d56a3e0812605a6a12..1149b30c9aebf7a00a0723f46a823bbe8c602425 100644 (file)
@@ -222,7 +222,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
                return NOTIFY_DONE;
 
        switch (val) {
-       case DIE_BREAK:
+       case DIE_UPROBE:
                if (uprobe_pre_sstep_notifier(regs))
                        return NOTIFY_STOP;
                break;
index 9abe447a4b480a8254633fd1afa9f3f7dbac319e..f9dbfb14af3358e67b3ef3e922709ab38ba0bdd8 100644 (file)
@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
 static void __init init_vdso_image(struct mips_vdso_image *image)
 {
        unsigned long num_pages, i;
+       unsigned long data_pfn;
 
        BUG_ON(!PAGE_ALIGNED(image->data));
        BUG_ON(!PAGE_ALIGNED(image->size));
 
        num_pages = image->size / PAGE_SIZE;
 
-       for (i = 0; i < num_pages; i++) {
-               image->mapping.pages[i] =
-                       virt_to_page(image->data + (i * PAGE_SIZE));
-       }
+       data_pfn = __phys_to_pfn(__pa_symbol(image->data));
+       for (i = 0; i < num_pages; i++)
+               image->mapping.pages[i] = pfn_to_page(data_pfn + i);
 }
 
 static int __init init_vdso(void)
index 6eb52b9c98183b95134710c22187b1a6f0225ef9..e788515f766b46cefb2a36dfc95ab6bfcec6e8e3 100644 (file)
@@ -1642,8 +1642,14 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
 
        preempt_disable();
        if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
-               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
-                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
+                   kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
+                       kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
+                               __func__, va, vcpu, read_c0_entryhi());
+                       er = EMULATE_FAIL;
+                       preempt_enable();
+                       goto done;
+               }
        } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
                   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
                int index;
@@ -1680,12 +1686,18 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
                                                                run, vcpu);
                                preempt_enable();
                                goto dont_update_pc;
-                       } else {
-                               /*
-                                * We fault an entry from the guest tlb to the
-                                * shadow host TLB
-                                */
-                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
+                       }
+                       /*
+                        * We fault an entry from the guest tlb to the
+                        * shadow host TLB
+                        */
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
+                               kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, va, index, vcpu,
+                                       read_c0_entryhi());
+                               er = EMULATE_FAIL;
+                               preempt_enable();
+                               goto done;
                        }
                }
        } else {
@@ -2659,7 +2671,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
                         * OK we have a Guest TLB entry, now inject it into the
                         * shadow host TLB
                         */
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
+                               kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, va, index, vcpu,
+                                       read_c0_entryhi());
+                               er = EMULATE_FAIL;
+                       }
                }
        }
 
index 57319ee57c4fdd1aada66e1648adc3981edf9dc3..121008c0fcc92e81847176b0fa016947f8ef8cd0 100644 (file)
@@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
        srcu_idx = srcu_read_lock(&kvm->srcu);
        pfn = gfn_to_pfn(kvm, gfn);
 
-       if (is_error_pfn(pfn)) {
+       if (is_error_noslot_pfn(pfn)) {
                kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
                err = -EFAULT;
                goto out;
@@ -99,7 +99,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
        }
 
        gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
-       if (gfn >= kvm->arch.guest_pmap_npages) {
+       if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
                kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
                        gfn, badvaddr);
                kvm_mips_dump_host_tlbs();
@@ -138,35 +138,49 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
        unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
        struct kvm *kvm = vcpu->kvm;
        kvm_pfn_t pfn0, pfn1;
+       gfn_t gfn0, gfn1;
+       long tlb_lo[2];
        int ret;
 
-       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
-               pfn0 = 0;
-               pfn1 = 0;
-       } else {
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
-                                          >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
-                                          >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               pfn0 = kvm->arch.guest_pmap[
-                       mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
-               pfn1 = kvm->arch.guest_pmap[
-                       mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
+       tlb_lo[0] = tlb->tlb_lo[0];
+       tlb_lo[1] = tlb->tlb_lo[1];
+
+       /*
+        * The commpage address must not be mapped to anything else if the guest
+        * TLB contains entries nearby, or commpage accesses will break.
+        */
+       if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
+                       VPN2_MASK & (PAGE_MASK << 1)))
+               tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+       gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
+       gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
+       if (gfn0 >= kvm->arch.guest_pmap_npages ||
+           gfn1 >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
+                       __func__, gfn0, gfn1, tlb->tlb_hi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               return -1;
        }
 
+       if (kvm_mips_map_page(kvm, gfn0) < 0)
+               return -1;
+
+       if (kvm_mips_map_page(kvm, gfn1) < 0)
+               return -1;
+
+       pfn0 = kvm->arch.guest_pmap[gfn0];
+       pfn1 = kvm->arch.guest_pmap[gfn1];
+
        /* Get attributes from the Guest TLB */
        entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
                ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
-               (tlb->tlb_lo[0] & ENTRYLO_D) |
-               (tlb->tlb_lo[0] & ENTRYLO_V);
+               (tlb_lo[0] & ENTRYLO_D) |
+               (tlb_lo[0] & ENTRYLO_V);
        entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
                ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
-               (tlb->tlb_lo[1] & ENTRYLO_D) |
-               (tlb->tlb_lo[1] & ENTRYLO_V);
+               (tlb_lo[1] & ENTRYLO_D) |
+               (tlb_lo[1] & ENTRYLO_V);
 
        kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
                  tlb->tlb_lo[0], tlb->tlb_lo[1]);
@@ -354,9 +368,15 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
                                local_irq_restore(flags);
                                return KVM_INVALID_INST;
                        }
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
-                                                            &vcpu->arch.
-                                                            guest_tlb[index]);
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                               &vcpu->arch.guest_tlb[index])) {
+                               kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, opc, index, vcpu,
+                                       read_c0_entryhi());
+                               kvm_mips_dump_guest_tlbs(vcpu);
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
                        inst = *(opc);
                }
                local_irq_restore(flags);
index 72a4642eee2c509ec9576b0bb2a7110cfe8b3584..4a094f7acb3dfe1d9b72f51bb28eb0c31afefd65 100644 (file)
@@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp)
        /* Set EPC to return to post-branch instruction */
        xcp->cp0_epc = current->thread.bd_emu_cont_pc;
        pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
+       MIPS_FPU_EMU_INC_STATS(ds_emul);
        return true;
 }
index cd72805b64a764c0a33f7d7ffb8c10d8119ac509..fa7d8d3790bfc960bc7d4b358e9fb1a5120c04e2 100644 (file)
@@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
                 * If address-based cache ops don't require an SMP call, then
                 * use them exclusively for small flushes.
                 */
-               size = start - end;
+               size = end - start;
                cache_size = icache_size;
                if (!cpu_has_ic_fills_f_dc) {
                        size *= 2;
index a5509e7dcad2ce0fe8504b67ce325303e51894f8..2c3749d98f04b2f78ee6e70035a6e175d50a819e 100644 (file)
@@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
 {
        struct maar_config cfg[BOOT_MEM_MAP_MAX];
        unsigned i, num_configured, num_cfg = 0;
-       phys_addr_t skip;
 
        for (i = 0; i < boot_mem_map.nr_map; i++) {
                switch (boot_mem_map.map[i].type) {
@@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
                        continue;
                }
 
-               skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
+               /* Round lower up */
                cfg[num_cfg].lower = boot_mem_map.map[i].addr;
-               cfg[num_cfg].lower += skip;
+               cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
 
-               cfg[num_cfg].upper = cfg[num_cfg].lower;
-               cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
-               cfg[num_cfg].upper -= skip;
+               /* Round upper down */
+               cfg[num_cfg].upper = boot_mem_map.map[i].addr +
+                                       boot_mem_map.map[i].size;
+               cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
 
                cfg[num_cfg].attrs = MIPS_MAAR_S;
                num_cfg++;
index 20f7bf6de384d06d5beebaeb216d2e6d4f675350..d012e877a95a02993ab57f9b7e6d73f767978344 100644 (file)
@@ -166,6 +166,7 @@ struct __large_struct { unsigned long buf[100]; };
                "2:\n"                                          \
                "       .section        .fixup,\"ax\"\n"        \
                "3:\n\t"                                        \
+               "       mov             0,%1\n"                 \
                "       mov             %3,%0\n"                \
                "       jmp             2b\n"                   \
                "       .previous\n"                            \
index 7826e6c364e74f0075e43081e5db367062baef30..ce8899e5e171370ebc88c08a91727ef9232febf9 100644 (file)
@@ -9,7 +9,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 unsigned long
 __generic_copy_to_user(void *to, const void *from, unsigned long n)
@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
 {
        if (access_ok(VERIFY_READ, from, n))
                __copy_user_zeroing(to, from, n);
+       else
+               memset(to, 0, n);
        return n;
 }
 
index caa51ff85a3c7d0ac7c57cf034758185aad64724..0ab82324c8174559ddd478aa0a2dd171b652b8c0 100644 (file)
@@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
 static inline long copy_from_user(void *to, const void __user *from,
                                unsigned long n)
 {
-       if (!access_ok(VERIFY_READ, from, n))
-               return n;
-       return __copy_from_user(to, from, n);
+       unsigned long res = n;
+       if (access_ok(VERIFY_READ, from, n))
+               res = __copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 static inline long copy_to_user(void __user *to, const void *from,
@@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
 
 #define __get_user_unknown(val, size, ptr, err) do {                   \
        err = 0;                                                        \
-       if (copy_from_user(&(val), ptr, size)) {                        \
+       if (__copy_from_user(&(val), ptr, size)) {                      \
                err = -EFAULT;                                          \
        }                                                               \
        } while (0)
@@ -166,7 +169,7 @@ do {                                                                        \
        ({                                                              \
        long __gu_err = -EFAULT;                                        \
        const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);              \
-       unsigned long __gu_val;                                         \
+       unsigned long __gu_val = 0;                                     \
        __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
        (x) = (__force __typeof__(x))__gu_val;                          \
        __gu_err;                                                       \
index a6bd07ca3d6c08dc49af6ab4738fe85e1ddcd485..5cc6b4f1b79516a7a5882886b59884d03529df6d 100644 (file)
@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
 static inline unsigned long
 copy_from_user(void *to, const void *from, unsigned long n)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_tofrom_user(to, from, n);
-       if ((unsigned long)from < TASK_SIZE) {
-               over = (unsigned long)from + n - TASK_SIZE;
-               return __copy_tofrom_user(to, from, n - over) + over;
-       }
-       return n;
+       unsigned long res = n;
+
+       if (likely(access_ok(VERIFY_READ, from, n)))
+               res = __copy_tofrom_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 static inline unsigned long
 copy_to_user(void *to, const void *from, unsigned long n)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_WRITE, to, n))
-               return __copy_tofrom_user(to, from, n);
-       if ((unsigned long)to < TASK_SIZE) {
-               over = (unsigned long)to + n - TASK_SIZE;
-               return __copy_tofrom_user(to, from, n - over) + over;
-       }
+       if (likely(access_ok(VERIFY_WRITE, to, n)))
+               n = __copy_tofrom_user(to, from, n);
        return n;
 }
 
@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
 static inline __must_check unsigned long
 clear_user(void *addr, unsigned long size)
 {
-
-       if (access_ok(VERIFY_WRITE, addr, size))
-               return __clear_user(addr, size);
-       if ((unsigned long)addr < TASK_SIZE) {
-               unsigned long over = (unsigned long)addr + size - TASK_SIZE;
-               return __clear_user(addr, size - over) + over;
-       }
+       if (likely(access_ok(VERIFY_WRITE, addr, size)))
+               size = __clear_user(addr, size);
        return size;
 }
 
index cd87781031653e78693610f7539151537a3c6398..af12c2db9bb8536649c514f7fc78c2a10a4e2e23 100644 (file)
@@ -1,6 +1,5 @@
 config PARISC
        def_bool y
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_IDE
        select HAVE_OPROFILE
index 1a8f6f95689e645676b4aa58a30e0c4f88f6c176..f6a4c016304b657149d8c87f23e0c564e875a36b 100644 (file)
@@ -245,7 +245,6 @@ CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_KEYS=y
 # CONFIG_CRYPTO_HW is not set
 CONFIG_FONTS=y
index 7e0792658952bbc3fa3e5ec2a7d21276171c8929..c564e6e1fa23424c39efa967cce4a42ac9ae4f2f 100644 (file)
@@ -291,7 +291,6 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
index 0f59fd9ca20526d8a27066c724ab2499965ec62c..482847865dac26ffd890ca529be741cddea4e747 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm-generic/uaccess-unaligned.h>
 
 #include <linux/bug.h>
+#include <linux/string.h>
 
 #define VERIFY_READ 0
 #define VERIFY_WRITE 1
@@ -208,26 +209,30 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-extern void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-        __compiletime_error("copy_from_user() buffer size is not provably correct")
-#else
-        __compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
 
 static inline unsigned long __must_check copy_from_user(void *to,
                                           const void __user *from,
                                           unsigned long n)
 {
         int sz = __compiletime_object_size(to);
-        int ret = -EFAULT;
+        unsigned long ret = n;
 
-        if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+        if (likely(sz == -1 || sz >= n))
                 ret = __copy_from_user(to, from, n);
-        else
-                copy_from_user_overflow();
+        else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
+       else
+                __bad_copy_user();
 
+       if (unlikely(ret))
+               memset(to + (n - ret), 0, ret);
         return ret;
 }
 
index c0ae62520d1575526c83b014d194adc0f6a4428a..274d5bc6ecce4aba9e1d19b1cd0e1d6f42ba0eb3 100644 (file)
 #define        ENOTCONN        235     /* Transport endpoint is not connected */
 #define        ESHUTDOWN       236     /* Cannot send after transport endpoint shutdown */
 #define        ETOOMANYREFS    237     /* Too many references: cannot splice */
-#define EREFUSED       ECONNREFUSED    /* for HP's NFS apparently */
 #define        ETIMEDOUT       238     /* Connection timed out */
 #define        ECONNREFUSED    239     /* Connection refused */
-#define EREMOTERELEASE 240     /* Remote peer released connection */
+#define        EREFUSED        ECONNREFUSED    /* for HP's NFS apparently */
+#define        EREMOTERELEASE  240     /* Remote peer released connection */
 #define        EHOSTDOWN       241     /* Host is down */
 #define        EHOSTUNREACH    242     /* No route to host */
 
index 5adc339eb7c8dab935fbbc933a64e811f6800ccc..0c2a94a0f7518b8082ecda3f0307ead9534ea972 100644 (file)
@@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency);
 
 DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 
-extern int update_cr16_clocksource(void);      /* from time.c */
-
 /*
 **     PARISC CPU driver - claim "device" and initialize CPU data structures.
 **
@@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev)
        }
 #endif
 
-       /* If we've registered more than one cpu,
-        * we'll use the jiffies clocksource since cr16
-        * is not synchronized between CPUs.
-        */
-       update_cr16_clocksource();
-
        return 0;
 }
 
index 505cf1ac5af24ecef4731f845fd6de516ae1f306..4b0b963d52a757e83674149f4162faf3eafb7456 100644 (file)
@@ -221,18 +221,6 @@ static struct clocksource clocksource_cr16 = {
        .flags                  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-int update_cr16_clocksource(void)
-{
-       /* since the cr16 cycle counters are not synchronized across CPUs,
-          we'll check if we should switch to a safe clocksource: */
-       if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
-               clocksource_change_rating(&clocksource_cr16, 0);
-               return 1;
-       }
-
-       return 0;
-}
-
 void __init start_cpu_itimer(void)
 {
        unsigned int cpu = smp_processor_id();
index ec4047e170a0e6dc5267509f36974f8cbaa1fa84..927d2ab2ce08a68c2574c41e6d1b732ecf597d9b 100644 (file)
@@ -166,6 +166,7 @@ config PPC
        select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
        select GENERIC_CPU_AUTOPROBE
        select HAVE_VIRT_CPU_ACCOUNTING
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
index ca254546cd05a1a4fe09e87e63945d7b38b8873f..1934707bf321ecf47a835bc7a0c4cd88f092ed07 100644 (file)
@@ -66,29 +66,28 @@ endif
 UTS_MACHINE := $(OLDARCH)
 
 ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-override CC    += -mlittle-endian
-ifneq ($(cc-name),clang)
-override CC    += -mno-strict-align
-endif
-override AS    += -mlittle-endian
 override LD    += -EL
-override CROSS32CC += -mlittle-endian
 override CROSS32AS += -mlittle-endian
 LDEMULATION    := lppc
 GNUTARGET      := powerpcle
 MULTIPLEWORD   := -mno-multiple
 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect)
 else
-ifeq ($(call cc-option-yn,-mbig-endian),y)
-override CC    += -mbig-endian
-override AS    += -mbig-endian
-endif
 override LD    += -EB
 LDEMULATION    := ppc
 GNUTARGET      := powerpc
 MULTIPLEWORD   := -mmultiple
 endif
 
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mbig-endian)
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mlittle-endian
+ifneq ($(cc-name),clang)
+  cflags-$(CONFIG_CPU_LITTLE_ENDIAN)   += -mno-strict-align
+endif
+
+aflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mbig-endian)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mlittle-endian
+
 ifeq ($(HAS_BIARCH),y)
 override AS    += -a$(CONFIG_WORD_SIZE)
 override LD    += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
@@ -232,6 +231,9 @@ cpu-as-$(CONFIG_E200)               += -Wa,-me200
 KBUILD_AFLAGS += $(cpu-as-y)
 KBUILD_CFLAGS += $(cpu-as-y)
 
+KBUILD_AFLAGS += $(aflags-y)
+KBUILD_CFLAGS += $(cflags-y)
+
 head-y                         := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o
 head-$(CONFIG_8xx)             := arch/powerpc/kernel/head_8xx.o
 head-$(CONFIG_40x)             := arch/powerpc/kernel/head_40x.o
index bfe3d37a24ef3a24c07e5a9f7ed8df3a13b4bf59..9fa046d56ebadd6ad25e62b5a29a853b123cd30a 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/cpufeature.h>
 #include <asm/switch_to.h>
 
 #define CHKSUM_BLOCK_SIZE      1
@@ -157,7 +158,7 @@ static void __exit crc32c_vpmsum_mod_fini(void)
        crypto_unregister_shash(&alg);
 }
 
-module_init(crc32c_vpmsum_mod_init);
+module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init);
 module_exit(crc32c_vpmsum_mod_fini);
 
 MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
index 2ef55f8968a2b1fba3f34ce7312e07ce8eb42287..b312b152461b0539a22c5939ba7728a38f5d8d32 100644 (file)
@@ -15,7 +15,7 @@ static inline bool early_cpu_has_feature(unsigned long feature)
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
 #include <linux/jump_label.h>
 
-#define NUM_CPU_FTR_KEYS       64
+#define NUM_CPU_FTR_KEYS       BITS_PER_LONG
 
 extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
 
index 3d7fc06532a16a7b620a58342d70e41fd69a33da..01b8a13f022467be64ccd46f248344bdf96e9a41 100644 (file)
@@ -19,4 +19,17 @@ extern u64 pnv_first_deep_stop_state;
 
 #endif
 
+/* Idle state entry routines */
+#ifdef CONFIG_PPC_P7_NAP
+#define        IDLE_STATE_ENTER_SEQ(IDLE_INST)                         \
+       /* Magic NAP/SLEEP/WINKLE mode enter sequence */        \
+       std     r0,0(r1);                                       \
+       ptesync;                                                \
+       ld      r0,0(r1);                                       \
+1:     cmp     cr0,r0,r0;                                      \
+       bne     1b;                                             \
+       IDLE_INST;                                              \
+       b       .
+#endif /* CONFIG_PPC_P7_NAP */
+
 #endif
index 666bef4ebfae72a88c4bceebacbc18a7bfa7b390..9377bdf42eb8aafa983b038afc15a2e96bf33873 100644 (file)
@@ -3,6 +3,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/cpumask.h>
+#include <asm/cpu_has_feature.h>
 
 /*
  * Mapping of threads to cores
index 57fec8ac7b924cdeabb4a21b65d46785e9fbf745..ddf54f5bbdd1c05efbd286ec305beac3c459d8d6 100644 (file)
@@ -186,6 +186,7 @@ label##3:                                           \
 
 #ifndef __ASSEMBLY__
 void apply_feature_fixups(void);
+void setup_feature_keys(void);
 #endif
 
 #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
index 88b4901ac4eef45edd2b48c4305f005abcc68847..85b7a1a21e228571df158782f36a79e20728cff9 100644 (file)
@@ -21,7 +21,7 @@
 #ifndef __ASM_PPC64_HMI_H__
 #define __ASM_PPC64_HMI_H__
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 
 #define        CORE_TB_RESYNC_REQ_BIT          63
 #define MAX_SUBCORE_PER_CORE           4
index 148303e7771ff6f2a438b31a07984cd11c9e48dd..6a6792bb39fbc8616034732322bd497ed30c5961 100644 (file)
@@ -183,11 +183,6 @@ struct paca_struct {
         */
        u16 in_mce;
        u8 hmi_event_available;          /* HMI event is available */
-       /*
-        * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
-        * more details
-        */
-       struct sibling_subcore_state *sibling_subcore_state;
 #endif
 
        /* Stuff for accurate time accounting */
@@ -202,6 +197,13 @@ struct paca_struct {
        struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
 #endif
        struct kvmppc_host_state kvm_hstate;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+       /*
+        * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
+        * more details
+        */
+       struct sibling_subcore_state *sibling_subcore_state;
+#endif
 #endif
 };
 
index b5e88e4a171ab3e5bb005125373bc82050ae10ba..c0309c59bed8dd7e9ad3311eb3d792dc865c766b 100644 (file)
@@ -301,6 +301,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
 /* Allocate & free a PCI host bridge structure */
 extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
 extern void pcibios_free_controller(struct pci_controller *phb);
+extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge);
 
 #ifdef CONFIG_PCI
 extern int pcibios_vaddr_is_ioport(void __iomem *address);
index 0a74ebe934e1cbcb61105b63d54a959614bca33d..17c8380673a60637c61fec5772162bf0ae5523cb 100644 (file)
@@ -75,14 +75,6 @@ static inline void disable_kernel_spe(void)
 static inline void __giveup_spe(struct task_struct *t) { }
 #endif
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-extern void flush_tmregs_to_thread(struct task_struct *);
-#else
-static inline void flush_tmregs_to_thread(struct task_struct *t)
-{
-}
-#endif
-
 static inline void clear_task_ebb(struct task_struct *t)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
index b7c20f0b8fbeebe03a1c57a4c62f86c0c42e962d..c266227fdd5ba5320b2d0cfb318feab1b7815cb8 100644 (file)
@@ -308,29 +308,20 @@ extern unsigned long __copy_tofrom_user(void __user *to,
 static inline unsigned long copy_from_user(void *to,
                const void __user *from, unsigned long n)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_READ, from, n))
+       if (likely(access_ok(VERIFY_READ, from, n))) {
+               check_object_size(to, n, false);
                return __copy_tofrom_user((__force void __user *)to, from, n);
-       if ((unsigned long)from < TASK_SIZE) {
-               over = (unsigned long)from + n - TASK_SIZE;
-               return __copy_tofrom_user((__force void __user *)to, from,
-                               n - over) + over;
        }
+       memset(to, 0, n);
        return n;
 }
 
 static inline unsigned long copy_to_user(void __user *to,
                const void *from, unsigned long n)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_WRITE, to, n))
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               check_object_size(from, n, true);
                return __copy_tofrom_user(to, (__force void __user *)from, n);
-       if ((unsigned long)to < TASK_SIZE) {
-               over = (unsigned long)to + n - TASK_SIZE;
-               return __copy_tofrom_user(to, (__force void __user *)from,
-                               n - over) + over;
        }
        return n;
 }
@@ -372,6 +363,9 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
                if (ret == 0)
                        return 0;
        }
+
+       check_object_size(to, n, false);
+
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -398,6 +392,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
+
+       check_object_size(from, n, true);
+
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 
@@ -422,10 +419,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
        might_fault();
        if (likely(access_ok(VERIFY_WRITE, addr, size)))
                return __clear_user(addr, size);
-       if ((unsigned long)addr < TASK_SIZE) {
-               unsigned long over = (unsigned long)addr + size - TASK_SIZE;
-               return __clear_user(addr, size - over) + over;
-       }
        return size;
 }
 
index f5f729c115781a8d6c9c1850f89558b06383c761..f0b238516e9b44b5afabc52a8460daaa6e81ca97 100644 (file)
@@ -159,6 +159,8 @@ extern void xics_teardown_cpu(void);
 extern void xics_kexec_teardown_cpu(int secondary);
 extern void xics_migrate_irqs_away(void);
 extern void icp_native_eoi(struct irq_data *d);
+extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type);
+extern int xics_retrigger(struct irq_data *data);
 #ifdef CONFIG_SMP
 extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
                               unsigned int strict_check);
index b2027a5cf50817649667cb148087912324b87893..fe4c075bcf50eda75905e7c53b4830914cef5b00 100644 (file)
@@ -41,7 +41,7 @@ obj-$(CONFIG_VDSO32)          += vdso32/
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)       += hw_breakpoint.o
 obj-$(CONFIG_PPC_BOOK3S_64)    += cpu_setup_ppc970.o cpu_setup_pa6t.o
 obj-$(CONFIG_PPC_BOOK3S_64)    += cpu_setup_power.o
-obj-$(CONFIG_PPC_BOOK3S_64)    += mce.o mce_power.o hmi.o
+obj-$(CONFIG_PPC_BOOK3S_64)    += mce.o mce_power.o
 obj-$(CONFIG_PPC_BOOK3E_64)    += exceptions-64e.o idle_book3e.o
 obj-$(CONFIG_PPC64)            += vdso64/
 obj-$(CONFIG_ALTIVEC)          += vecemu.o
index c9bc78e9c6101b2ae5016efec858c5c38b0f4158..7429556eb8df7e468b447a1b6d4c541253c06295 100644 (file)
@@ -168,10 +168,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
        int n = 0, l = 0;
        char buffer[128];
 
-       n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n",
+       n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
                       edev->phb->global_number, pdn->busno,
                       PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
-       pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n",
+       pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
                edev->phb->global_number, pdn->busno,
                PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
 
index 6b8bc0dd09d4a6e769adae004867b839bb6f4c6f..5afd03e5e8b865e3dc0b08b2293364296a884eff 100644 (file)
@@ -368,13 +368,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 tabort_syscall:
        /* Firstly we need to enable TM in the kernel */
        mfmsr   r10
-       li      r13, 1
-       rldimi  r10, r13, MSR_TM_LG, 63-MSR_TM_LG
+       li      r9, 1
+       rldimi  r10, r9, MSR_TM_LG, 63-MSR_TM_LG
        mtmsrd  r10, 0
 
        /* tabort, this dooms the transaction, nothing else */
-       li      r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
-       TABORT(R13)
+       li      r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+       TABORT(R9)
 
        /*
         * Return directly to userspace. We have corrupted user register state,
@@ -382,8 +382,8 @@ tabort_syscall:
         * resume after the tbegin of the aborted transaction with the
         * checkpointed register state.
         */
-       li      r13, MSR_RI
-       andc    r10, r10, r13
+       li      r9, MSR_RI
+       andc    r10, r10, r9
        mtmsrd  r10, 1
        mtspr   SPRN_SRR0, r11
        mtspr   SPRN_SRR1, r12
index 41091fdf9bd88fbe68d9ef15aeb04c1b59b18e77..bffec73dbffc19695765a6e9b5bf95f19bbabc2a 100644 (file)
@@ -144,29 +144,14 @@ machine_check_pSeries_1:
         * vector
         */
        SET_SCRATCH0(r13)               /* save r13 */
-#ifdef CONFIG_PPC_P7_NAP
-BEGIN_FTR_SECTION
-       /* Running native on arch 2.06 or later, check if we are
-        * waking up from nap. We only handle no state loss and
-        * supervisor state loss. We do -not- handle hypervisor
-        * state loss at this time.
+       /*
+        * Running native on arch 2.06 or later, we may wakeup from winkle
+        * inside machine check. If yes, then last bit of HSPGR0 would be set
+        * to 1. Hence clear it unconditionally.
         */
-       mfspr   r13,SPRN_SRR1
-       rlwinm. r13,r13,47-31,30,31
-       OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
-       beq     9f
-
-       mfspr   r13,SPRN_SRR1
-       rlwinm. r13,r13,47-31,30,31
-       /* waking up from powersave (nap) state */
-       cmpwi   cr1,r13,2
-       /* Total loss of HV state is fatal. let's just stay stuck here */
-       OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
-       bgt     cr1,.
-9:
-       OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#endif /* CONFIG_PPC_P7_NAP */
+       GET_PACA(r13)
+       clrrdi  r13,r13,1
+       SET_PACA(r13)
        EXCEPTION_PROLOG_0(PACA_EXMC)
 BEGIN_FTR_SECTION
        b       machine_check_powernv_early
@@ -500,7 +485,23 @@ machine_check_fwnmi:
        EXCEPTION_PROLOG_0(PACA_EXMC)
 machine_check_pSeries_0:
        EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
-       EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
+       /*
+        * The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
+        * difference that MSR_RI is not enabled, because PACA_EXMC is being
+        * used, so nested machine check corrupts it. machine_check_common
+        * enables MSR_RI.
+        */
+       ld      r12,PACAKBASE(r13)
+       ld      r10,PACAKMSR(r13)
+       xori    r10,r10,MSR_RI
+       mfspr   r11,SPRN_SRR0
+       LOAD_HANDLER(r12, machine_check_common)
+       mtspr   SPRN_SRR0,r12
+       mfspr   r12,SPRN_SRR1
+       mtspr   SPRN_SRR1,r10
+       rfid
+       b       .       /* prevent speculative execution */
+
        KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
        KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
@@ -984,14 +985,17 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 machine_check_common:
 
        mfspr   r10,SPRN_DAR
-       std     r10,PACA_EXGEN+EX_DAR(r13)
+       std     r10,PACA_EXMC+EX_DAR(r13)
        mfspr   r10,SPRN_DSISR
-       stw     r10,PACA_EXGEN+EX_DSISR(r13)
+       stw     r10,PACA_EXMC+EX_DSISR(r13)
        EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
        FINISH_NAP
        RECONCILE_IRQ_STATE(r10, r11)
-       ld      r3,PACA_EXGEN+EX_DAR(r13)
-       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
+       ld      r3,PACA_EXMC+EX_DAR(r13)
+       lwz     r4,PACA_EXMC+EX_DSISR(r13)
+       /* Enable MSR_RI when finished with PACA_EXMC */
+       li      r10,MSR_RI
+       mtmsrd  r10,1
        std     r3,_DAR(r1)
        std     r4,_DSISR(r1)
        bl      save_nvgprs
@@ -1273,25 +1277,51 @@ machine_check_handle_early:
         * Check if thread was in power saving mode. We come here when any
         * of the following is true:
         * a. thread wasn't in power saving mode
-        * b. thread was in power saving mode with no state loss or
-        *    supervisor state loss
+        * b. thread was in power saving mode with no state loss,
+        *    supervisor state loss or hypervisor state loss.
         *
-        * Go back to nap again if (b) is true.
+        * Go back to nap/sleep/winkle mode again if (b) is true.
         */
        rlwinm. r11,r12,47-31,30,31     /* Was it in power saving mode? */
        beq     4f                      /* No, it wasn;t */
        /* Thread was in power saving mode. Go back to nap again. */
        cmpwi   r11,2
-       bne     3f
-       /* Supervisor state loss */
+       blt     3f
+       /* Supervisor/Hypervisor state loss */
        li      r0,1
        stb     r0,PACA_NAPSTATELOST(r13)
 3:     bl      machine_check_queue_event
        MACHINE_CHECK_HANDLER_WINDUP
        GET_PACA(r13)
        ld      r1,PACAR1(r13)
-       li      r3,PNV_THREAD_NAP
-       b       pnv_enter_arch207_idle_mode
+       /*
+        * Check what idle state this CPU was in and go back to same mode
+        * again.
+        */
+       lbz     r3,PACA_THREAD_IDLE_STATE(r13)
+       cmpwi   r3,PNV_THREAD_NAP
+       bgt     10f
+       IDLE_STATE_ENTER_SEQ(PPC_NAP)
+       /* No return */
+10:
+       cmpwi   r3,PNV_THREAD_SLEEP
+       bgt     2f
+       IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
+       /* No return */
+
+2:
+       /*
+        * Go back to winkle. Please note that this thread was woken up in
+        * machine check from winkle and have not restored the per-subcore
+        * state. Hence before going back to winkle, set last bit of HSPGR0
+        * to 1. This will make sure that if this thread gets woken up
+        * again at reset vector 0x100 then it will get chance to restore
+        * the subcore state.
+        */
+       ori     r13,r13,1
+       SET_PACA(r13)
+       IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
+       /* No return */
 4:
 #endif
        /*
diff --git a/arch/powerpc/kernel/hmi.c b/arch/powerpc/kernel/hmi.c
deleted file mode 100644 (file)
index e3f738e..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Hypervisor Maintenance Interrupt (HMI) handling.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
- * Copyright 2015 IBM Corporation
- * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
- */
-
-#undef DEBUG
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <asm/paca.h>
-#include <asm/hmi.h>
-
-void wait_for_subcore_guest_exit(void)
-{
-       int i;
-
-       /*
-        * NULL bitmap pointer indicates that KVM module hasn't
-        * been loaded yet and hence no guests are running.
-        * If no KVM is in use, no need to co-ordinate among threads
-        * as all of them will always be in host and no one is going
-        * to modify TB other than the opal hmi handler.
-        * Hence, just return from here.
-        */
-       if (!local_paca->sibling_subcore_state)
-               return;
-
-       for (i = 0; i < MAX_SUBCORE_PER_CORE; i++)
-               while (local_paca->sibling_subcore_state->in_guest[i])
-                       cpu_relax();
-}
-
-void wait_for_tb_resync(void)
-{
-       if (!local_paca->sibling_subcore_state)
-               return;
-
-       while (test_bit(CORE_TB_RESYNC_REQ_BIT,
-                               &local_paca->sibling_subcore_state->flags))
-               cpu_relax();
-}
index ba79d15f4ddd7c0d8ce946e15098d977a00338fa..bd739fed26e3203aae73807399c43a939c248d38 100644 (file)
                                PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
                                PSSCR_MTL_MASK
 
-/* Idle state entry routines */
-
-#define        IDLE_STATE_ENTER_SEQ(IDLE_INST)                         \
-       /* Magic NAP/SLEEP/WINKLE mode enter sequence */        \
-       std     r0,0(r1);                                       \
-       ptesync;                                                \
-       ld      r0,0(r1);                                       \
-1:     cmp     cr0,r0,r0;                                      \
-       bne     1b;                                             \
-       IDLE_INST;                                              \
-       b       .
-
        .text
 
 /*
@@ -363,8 +351,8 @@ _GLOBAL(power9_idle_stop)
  * cr3 - set to gt if waking up with partial/complete hypervisor state loss
  */
 _GLOBAL(pnv_restore_hyp_resource)
-       ld      r2,PACATOC(r13);
 BEGIN_FTR_SECTION
+       ld      r2,PACATOC(r13);
        /*
         * POWER ISA 3. Use PSSCR to determine if we
         * are waking up from deep idle state
@@ -395,6 +383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
         */
        clrldi  r5,r13,63
        clrrdi  r13,r13,1
+
+       /* Now that we are sure r13 is corrected, load TOC */
+       ld      r2,PACATOC(r13);
        cmpwi   cr4,r5,1
        mtspr   SPRN_HSPRG0,r13
 
@@ -420,7 +411,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  *
  * r13 - PACA
  * cr3 - gt if waking up with partial/complete hypervisor state loss
- * cr4 - eq if waking up from complete hypervisor state loss.
+ * cr4 - gt or eq if waking up from complete hypervisor state loss.
  */
 _GLOBAL(pnv_wakeup_tb_loss)
        ld      r1,PACAR1(r13)
@@ -462,7 +453,7 @@ lwarx_loop2:
         * At this stage
         * cr2 - eq if first thread to wakeup in core
         * cr3-  gt if waking up with partial/complete hypervisor state loss
-        * cr4 - eq if waking up from complete hypervisor state loss.
+        * cr4 - gt or eq if waking up from complete hypervisor state loss.
         */
 
        ori     r15,r15,PNV_CORE_IDLE_LOCK_BIT
@@ -490,7 +481,7 @@ first_thread_in_subcore:
         * If waking up from sleep, subcore state is not lost. Hence
         * skip subcore state restore
         */
-       bne     cr4,subcore_state_restored
+       blt     cr4,subcore_state_restored
 
        /* Restore per-subcore state */
        ld      r4,_SDR1(r1)
@@ -535,7 +526,7 @@ timebase_resync:
         * If waking up from sleep, per core state is not lost, skip to
         * clear_lock.
         */
-       bne     cr4,clear_lock
+       blt     cr4,clear_lock
 
        /*
         * First thread in the core to wake up and its waking up with
@@ -566,7 +557,7 @@ common_exit:
         * If waking up from sleep, hypervisor state is not lost. Hence
         * skip hypervisor state restore.
         */
-       bne     cr4,hypervisor_state_restored
+       blt     cr4,hypervisor_state_restored
 
        /* Waking up from winkle */
 
index 3ed8ec09b5c93292ca1c584c9ed637870aa1369a..e785cc9e1ecd8bb0e442168412278bdbcf70afdd 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/kprobes.h>
 #include <linux/ptrace.h>
 #include <linux/preempt.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/kdebug.h>
 #include <linux/slab.h>
 #include <asm/code-patching.h>
index ef267fd9dd225a3c7f3dfbcacc057a0bf28dca8e..5e7ece0fda9f5b802eb561ce51774cfb625032a6 100644 (file)
@@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
        mce->in_use = 1;
 
        mce->initiator = MCE_INITIATOR_CPU;
-       if (handled)
+       /* Mark it recovered if we have handled it and MSR(RI=1). */
+       if (handled && (regs->msr & MSR_RI))
                mce->disposition = MCE_DISPOSITION_RECOVERED;
        else
                mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
index a5c0153ede37f21d6dd8f47beac764587789fbc5..e58908066b0e4e3fd3c2729ce7a7c49cad7f8d0e 100644 (file)
@@ -78,6 +78,7 @@ EXPORT_SYMBOL(get_pci_dma_ops);
 static int get_phb_number(struct device_node *dn)
 {
        int ret, phb_id = -1;
+       u32 prop_32;
        u64 prop;
 
        /*
@@ -86,8 +87,10 @@ static int get_phb_number(struct device_node *dn)
         * reading "ibm,opal-phbid", only present in OPAL environment.
         */
        ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
-       if (ret)
-               ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop);
+       if (ret) {
+               ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
+               prop = prop_32;
+       }
 
        if (!ret)
                phb_id = (int)(prop & (MAX_PHBS - 1));
@@ -150,6 +153,42 @@ void pcibios_free_controller(struct pci_controller *phb)
 }
 EXPORT_SYMBOL_GPL(pcibios_free_controller);
 
+/*
+ * This function is used to call pcibios_free_controller()
+ * in a deferred manner: a callback from the PCI subsystem.
+ *
+ * _*DO NOT*_ call pcibios_free_controller() explicitly if
+ * this is used (or it may access an invalid *phb pointer).
+ *
+ * The callback occurs when all references to the root bus
+ * are dropped (e.g., child buses/devices and their users).
+ *
+ * It's called as .release_fn() of 'struct pci_host_bridge'
+ * which is associated with the 'struct pci_controller.bus'
+ * (root bus) - it expects .release_data to hold a pointer
+ * to 'struct pci_controller'.
+ *
+ * In order to use it, register .release_fn()/release_data
+ * like this:
+ *
+ * pci_set_host_bridge_release(bridge,
+ *                             pcibios_free_controller_deferred
+ *                             (void *) phb);
+ *
+ * e.g. in the pcibios_root_bridge_prepare() callback from
+ * pci_create_root_bus().
+ */
+void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
+{
+       struct pci_controller *phb = (struct pci_controller *)
+                                        bridge->release_data;
+
+       pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
+
+       pcibios_free_controller(phb);
+}
+EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
+
 /*
  * The function is used to return the minimal alignment
  * for memory or I/O windows of the associated P2P bridge.
index 58ccf86415b46cd5c2db593424ecde772cd0d959..9ee2623e0f674977ee8e8f07bbfb1297a2f05dc8 100644 (file)
@@ -1074,26 +1074,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
 #endif
 }
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void flush_tmregs_to_thread(struct task_struct *tsk)
-{
-       /*
-        * Process self tracing is not yet supported through
-        * ptrace interface. Ptrace generic code should have
-        * prevented this from happening in the first place.
-        * Warn once here with the message, if some how it
-        * is attempted.
-        */
-       WARN_ONCE(tsk == current,
-               "Not expecting ptrace on self: TM regs may be incorrect\n");
-
-       /*
-        * If task is not current, it should have been flushed
-        * already to it's thread_struct during __switch_to().
-        */
-}
-#endif
-
 struct task_struct *__switch_to(struct task_struct *prev,
        struct task_struct *new)
 {
index 6ee4b72cda4201840cf2b85f38661831c5e37233..d3eff99e938c121f8a1ec5f68ab2f2b0a75d1604 100644 (file)
@@ -695,7 +695,7 @@ unsigned char ibm_architecture_vec[] = {
        OV4_MIN_ENT_CAP,                /* minimum VP entitled capacity */
 
        /* option vector 5: PAPR/OF options */
-       VECTOR_LENGTH(18),              /* length */
+       VECTOR_LENGTH(21),              /* length */
        0,                              /* don't ignore, don't halt */
        OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
        OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
@@ -726,8 +726,11 @@ unsigned char ibm_architecture_vec[] = {
        0,
        0,
        OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
-       OV5_FEAT(OV5_PFO_HW_842),
-       OV5_FEAT(OV5_SUB_PROCESSORS),
+       OV5_FEAT(OV5_PFO_HW_842),                               /* Byte 17 */
+       0,                                                      /* Byte 18 */
+       0,                                                      /* Byte 19 */
+       0,                                                      /* Byte 20 */
+       OV5_FEAT(OV5_SUB_PROCESSORS),                           /* Byte 21 */
 
        /* option vector 6: IBM PAPR hints */
        VECTOR_LENGTH(3),               /* length */
@@ -2940,7 +2943,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
 
        /* Don't print anything after quiesce under OPAL, it crashes OFW */
        if (of_platform != PLATFORM_OPAL) {
-               prom_printf("Booting Linux via __start() ...\n");
+               prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
                prom_debug("->dt_header_start=0x%x\n", hdr);
        }
 
index 4f3c5756cc09898f984de4cc6cf6fc7c1ba830ac..bf91658a8a406b051e2072e2ce948317abfefe29 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/switch_to.h>
+#include <asm/tm.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
@@ -118,6 +119,24 @@ static const struct pt_regs_offset regoffset_table[] = {
        REG_OFFSET_END,
 };
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static void flush_tmregs_to_thread(struct task_struct *tsk)
+{
+       /*
+        * If task is not current, it will have been flushed already to
+        * it's thread_struct during __switch_to().
+        *
+        * A reclaim flushes ALL the state.
+        */
+
+       if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
+               tm_reclaim_current(TM_CAUSE_SIGNAL);
+
+}
+#else
+static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
+#endif
+
 /**
  * regs_query_register_offset() - query register offset from its name
  * @name:      the name of a register
index c3e861df4b203ce5be8e0e9f0fb8f4e236d41cd5..24ec3ea4b3a2eeeeae2e0f713226acd252bab806 100644 (file)
@@ -93,15 +93,16 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
  * and we are running with enough of the MMU enabled to have our
  * proper kernel virtual addresses
  *
- * Find out what kind of machine we're on and save any data we need
- * from the early boot process (devtree is copied on pmac by prom_init()).
- * This is called very early on the boot process, after a minimal
- * MMU environment has been set up but before MMU_init is called.
+ * We do the initial parsing of the flat device-tree and prepares
+ * for the MMU to be fully initialized.
  */
 extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
 
 notrace void __init machine_init(u64 dt_ptr)
 {
+       /* Configure static keys first, now that we're relocated. */
+       setup_feature_keys();
+
        /* Enable early debugging if any specified (see udbg.h) */
        udbg_early_init();
 
index eafb9a79e0116b600624a16212c2a02bfb46e363..7ac8e6eaab5ba24566f1f6fe06829e22727e86ea 100644 (file)
@@ -300,6 +300,7 @@ void __init early_setup(unsigned long dt_ptr)
 
        /* Apply all the dynamic patching */
        apply_feature_fixups();
+       setup_feature_keys();
 
        /* Initialize the hash table or TLB handling */
        early_init_mmu();
index b6aa378aff636800a668ac67e525827fefd1ae76..a7daf749b97f272ba4819646b9f2515fb845c29a 100644 (file)
@@ -1226,7 +1226,21 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
                (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
        if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
                goto bad;
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       /*
+        * If there is a transactional state then throw it away.
+        * The purpose of a sigreturn is to destroy all traces of the
+        * signal frame, this includes any transactional state created
+        * within in. We only check for suspended as we can never be
+        * active in the kernel, we are active, there is nothing better to
+        * do than go ahead and Bad Thing later.
+        * The cause is not important as there will never be a
+        * recheckpoint so it's not user visible.
+        */
+       if (MSR_TM_SUSPENDED(mfmsr()))
+               tm_reclaim_current(0);
+
        if (__get_user(tmp, &rt_sf->uc.uc_link))
                goto bad;
        uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
index 7e49984d4331543de9a3d88f24d23a67b48e76df..70409bb90a957f8d62a5f1b5c1be69aee12e6c91 100644 (file)
@@ -676,7 +676,21 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
        if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
                goto badframe;
        set_current_blocked(&set);
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       /*
+        * If there is a transactional state then throw it away.
+        * The purpose of a sigreturn is to destroy all traces of the
+        * signal frame, this includes any transactional state created
+        * within in. We only check for suspended as we can never be
+        * active in the kernel, we are active, there is nothing better to
+        * do than go ahead and Bad Thing later.
+        * The cause is not important as there will never be a
+        * recheckpoint so it's not user visible.
+        */
+       if (MSR_TM_SUSPENDED(mfmsr()))
+               tm_reclaim_current(0);
+
        if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
                goto badframe;
        if (MSR_TM_ACTIVE(msr)) {
index 25a39052bf6b5653c3ab0317298864a2873fd911..9c6f3fd580597e5fdfcc5fc46031d9be0aaac74b 100644 (file)
@@ -830,7 +830,7 @@ int __cpu_disable(void)
 
        /* Update sibling maps */
        base = cpu_first_thread_sibling(cpu);
-       for (i = 0; i < threads_per_core; i++) {
+       for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
                cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
                cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
                cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
index 2cb589264cb748869e2228ff1806e2067af65fe4..62859ebe0062630bae83eaa328ea0ace0654a713 100644 (file)
@@ -25,7 +25,8 @@
 #include <linux/user.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/module.h>      /* print_modules */
 #include <linux/prctl.h>
 #include <linux/delay.h>
 #include <linux/kprobes.h>
index 6767605ea8da2eddb54152dbf92538467fe2dc3e..4111d30badfad30fa1eb7dd29abb6a26d8a338de 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/security.h>
 #include <linux/memblock.h>
 
+#include <asm/cpu_has_feature.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
index cbabd143acae8e9db3b3883be876527b68b83574..78a7449bf489d49a400ae45703ce77bdc7206f4b 100644 (file)
@@ -30,7 +30,7 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
        $(call if_changed,vdso32ld)
 
 # strip rule for the .so file
@@ -39,12 +39,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 # assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
        $(call if_changed_dep,vdso32as)
 
 # actual build commands
 quiet_cmd_vdso32ld = VDSO32L $@
-      cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
 quiet_cmd_vdso32as = VDSO32A $@
       cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
 
index c710802b8fb685a7cb5815d86387f7debb356d9e..366ae09b14c1e3a5179987d312ec4d1fedf86313 100644 (file)
@@ -23,7 +23,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
        $(call if_changed,vdso64ld)
 
 # strip rule for the .so file
@@ -32,12 +32,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 # assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
        $(call if_changed_dep,vdso64as)
 
 # actual build commands
 quiet_cmd_vdso64ld = VDSO64L $@
-      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
 quiet_cmd_vdso64as = VDSO64A $@
       cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
 
index 1f9e5529e692555a5d76f3519fb8c87875a73793..855d4b95d75255a328e5320765427125ea8b49de 100644 (file)
@@ -78,6 +78,7 @@ kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
 
 ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+       book3s_hv_hmi.o \
        book3s_hv_rmhandlers.o \
        book3s_hv_rm_mmu.o \
        book3s_hv_ras.o \
diff --git a/arch/powerpc/kvm/book3s_hv_hmi.c b/arch/powerpc/kvm/book3s_hv_hmi.c
new file mode 100644 (file)
index 0000000..e3f738e
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Hypervisor Maintenance Interrupt (HMI) handling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * Copyright 2015 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/paca.h>
+#include <asm/hmi.h>
+
+void wait_for_subcore_guest_exit(void)
+{
+       int i;
+
+       /*
+        * NULL bitmap pointer indicates that KVM module hasn't
+        * been loaded yet and hence no guests are running.
+        * If no KVM is in use, no need to co-ordinate among threads
+        * as all of them will always be in host and no one is going
+        * to modify TB other than the opal hmi handler.
+        * Hence, just return from here.
+        */
+       if (!local_paca->sibling_subcore_state)
+               return;
+
+       for (i = 0; i < MAX_SUBCORE_PER_CORE; i++)
+               while (local_paca->sibling_subcore_state->in_guest[i])
+                       cpu_relax();
+}
+
+void wait_for_tb_resync(void)
+{
+       if (!local_paca->sibling_subcore_state)
+               return;
+
+       while (test_bit(CORE_TB_RESYNC_REQ_BIT,
+                               &local_paca->sibling_subcore_state->flags))
+               cpu_relax();
+}
index a75ba38a2d81415dfcbe497c27b1329fdd6c8736..05aa11399a7867c1a4148d0f9eb4bf80bca3ee9d 100644 (file)
@@ -1329,20 +1329,16 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
        xics->kvm = kvm;
 
        /* Already there ? */
-       mutex_lock(&kvm->lock);
        if (kvm->arch.xics)
                ret = -EEXIST;
        else
                kvm->arch.xics = xics;
-       mutex_unlock(&kvm->lock);
 
        if (ret) {
                kfree(xics);
                return ret;
        }
 
-       xics_debugfs_init(xics);
-
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        if (cpu_has_feature(CPU_FTR_ARCH_206)) {
                /* Enable real mode support */
@@ -1354,9 +1350,17 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
        return 0;
 }
 
+static void kvmppc_xics_init(struct kvm_device *dev)
+{
+       struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
+
+       xics_debugfs_init(xics);
+}
+
 struct kvm_device_ops kvm_xics_ops = {
        .name = "kvm-xics",
        .create = kvmppc_xics_create,
+       .init = kvmppc_xics_init,
        .destroy = kvmppc_xics_free,
        .set_attr = xics_set_attr,
        .get_attr = xics_get_attr,
index d90870a66b60b4b0820a3044466f70716e1ed610..aa8214f30c920e05c6bf62349c6ea52e0ca68dc9 100644 (file)
@@ -127,17 +127,19 @@ _GLOBAL(csum_partial_copy_generic)
        stw     r7,12(r1)
        stw     r8,8(r1)
 
-       andi.   r0,r4,1                 /* is destination address even ? */
-       cmplwi  cr7,r0,0
        addic   r12,r6,0
        addi    r6,r4,-4
        neg     r0,r4
        addi    r4,r3,-4
        andi.   r0,r0,CACHELINE_MASK    /* # bytes to start of cache line */
+       crset   4*cr7+eq
        beq     58f
 
        cmplw   0,r5,r0                 /* is this more than total to do? */
        blt     63f                     /* if not much to do */
+       rlwinm  r7,r6,3,0x8
+       rlwnm   r12,r12,r7,0,31 /* odd destination address: rotate one byte */
+       cmplwi  cr7,r7,0        /* is destination address even ? */
        andi.   r8,r0,3                 /* get it word-aligned first */
        mtctr   r8
        beq+    61f
@@ -237,7 +239,7 @@ _GLOBAL(csum_partial_copy_generic)
 66:    addze   r3,r12
        addi    r1,r1,16
        beqlr+  cr7
-       rlwinm  r3,r3,8,0,31    /* swap bytes for odd destination */
+       rlwinm  r3,r3,8,0,31    /* odd destination address: rotate one byte */
        blr
 
 /* read fault */
index 74145f02ad417b496ceba07b0f114a2bbd77bc75..043415f0bdb1646fa85f7bb26d04f0241c68ff63 100644 (file)
@@ -188,7 +188,10 @@ void __init apply_feature_fixups(void)
                          &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
 #endif
        do_final_fixups();
+}
 
+void __init setup_feature_keys(void)
+{
        /*
         * Initialise jump label. This causes all the cpu/mmu_has_feature()
         * checks to take on their correct polarity based on the current set of
index a4db22f6502150a25a082769edc54ee58bf7211d..bb1ffc559f38b799b5f93b9aec9a931d0088143f 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/perf_event.h>
index dfdb90cb44039f364d92cecd2e920eccb25d07e5..9f1983404e1a8fdcf6d9c7c809263d243eadf1e7 100644 (file)
@@ -113,7 +113,12 @@ BEGIN_FTR_SECTION
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
        b       slb_finish_load_1T
 
-0:
+0:     /*
+        * For userspace addresses, make sure this is region 0.
+        */
+       cmpdi   r9, 0
+       bne     8f
+
        /* when using slices, we extract the psize off the slice bitmaps
         * and then we need to get the sllp encoding off the mmu_psize_defs
         * array.
index 8eb82b043dd859195179d267b177f2fd2331639c..d93dd4acf40bd277f0af1668d9fed183ffd8dc52 100644 (file)
@@ -528,7 +528,6 @@ static struct platform_driver mpc512x_lpbfifo_driver = {
        .remove = mpc512x_lpbfifo_remove,
        .driver = {
                .name = DRV_NAME,
-               .owner = THIS_MODULE,
                .of_match_table = mpc512x_lpbfifo_match,
        },
 };
index dbcd0303afed591803e660cea96591fa421eaa81..63c5ab6489c94d4c3e3e14c1f381bcee519c9c29 100644 (file)
@@ -222,7 +222,6 @@ static const struct of_device_id mcu_of_match_table[] = {
 static struct i2c_driver mcu_driver = {
        .driver = {
                .name = "mcu-mpc8349emitx",
-               .owner = THIS_MODULE,
                .of_match_table = mcu_of_match_table,
        },
        .probe = mcu_probe,
index 5be15cff758df193bd935baa50867a5dc9e3b063..2975754c65ea9514e4d7d10284a2beb42d6dd3cb 100644 (file)
@@ -496,8 +496,10 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
        gang = alloc_spu_gang();
        SPUFS_I(inode)->i_ctx = NULL;
        SPUFS_I(inode)->i_gang = gang;
-       if (!gang)
+       if (!gang) {
+               ret = -ENOMEM;
                goto out_iput;
+       }
 
        inode->i_op = &simple_dir_inode_operations;
        inode->i_fop = &simple_dir_operations;
index dafba1057a477b12d4d13b1a028b4071e9c8bf33..dfd310031549d9007252a0416688a0605ebccd4d 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/tty.h>
 #include <linux/serial_core.h>
 #include <linux/of_platform.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
index 80804f9916eee7f8957dd6d9bda81fd4b086c0df..f97bab8e37a2688e4d3ef46fcf6f8150d1cf0255 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/pci.h>
 #include <linux/kdev_t.h>
 #include <linux/console.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <linux/seq_file.h>
index 309d9ccccd509c83097386dca03621f4db3b7ca3..c61667e8bb06c51385fbe21c18b288aeb889f2da 100644 (file)
@@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
        if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
            !firmware_has_feature(FW_FEATURE_LPAR)) {
                dev->dev.archdata.dma_ops = &dma_direct_ops;
+               /*
+                * Set the coherent DMA mask to prevent the iommu
+                * being used unnecessarily
+                */
+               dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
                return;
        }
 #endif
index 2ee96431f7360e1d8d63ece45520416860c62428..4c827826c05eb13b09f4e0cd9f9c73d6967ec9a9 100644 (file)
@@ -370,6 +370,7 @@ static irqreturn_t process_dump(int irq, void *data)
        uint32_t dump_id, dump_size, dump_type;
        struct dump_obj *dump;
        char name[22];
+       struct kobject *kobj;
 
        rc = dump_read_info(&dump_id, &dump_size, &dump_type);
        if (rc != OPAL_SUCCESS)
@@ -381,8 +382,12 @@ static irqreturn_t process_dump(int irq, void *data)
         * that gracefully and not create two conflicting
         * entries.
         */
-       if (kset_find_obj(dump_kset, name))
+       kobj = kset_find_obj(dump_kset, name);
+       if (kobj) {
+               /* Drop reference added by kset_find_obj() */
+               kobject_put(kobj);
                return 0;
+       }
 
        dump = create_dump_obj(dump_id, dump_size, dump_type);
        if (!dump)
index 37f959bf392e72a8a4bc7dee92181846b1383458..f2344cbd2f464cb93a3afbcbf21dc8263069b555 100644 (file)
@@ -247,6 +247,7 @@ static irqreturn_t elog_event(int irq, void *data)
        uint64_t elog_type;
        int rc;
        char name[2+16+1];
+       struct kobject *kobj;
 
        rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
@@ -269,8 +270,12 @@ static irqreturn_t elog_event(int irq, void *data)
         * that gracefully and not create two conflicting
         * entries.
         */
-       if (kset_find_obj(elog_kset, name))
+       kobj = kset_find_obj(elog_kset, name);
+       if (kobj) {
+               /* Drop reference added by kset_find_obj() */
+               kobject_put(kobj);
                return IRQ_HANDLED;
+       }
 
        create_elog_obj(log_id, elog_size, elog_type);
 
index e505223b4ec5ed2d82bcb08b2e718a6b64601f18..ed8bba68a162120d282bfc2286a674b049e44d5c 100644 (file)
@@ -228,7 +228,8 @@ int __init opal_event_init(void)
                }
 
                /* Install interrupt handler */
-               rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
+               rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
+                                "opal", NULL);
                if (rc) {
                        irq_dispose_mapping(virq);
                        pr_warn("Error %d requesting irq %d (0x%x)\n",
index 8b4fc68cebcb2f4c02a716074cb1b1c1045d0d1f..6c9a65b52e63b589edbe809a4b65851ca2ea2f79 100644 (file)
@@ -399,6 +399,7 @@ static int opal_recover_mce(struct pt_regs *regs,
 
        if (!(regs->msr & MSR_RI)) {
                /* If MSR_RI isn't set, we cannot recover */
+               pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
                recovered = 0;
        } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
                /* Platform corrected itself */
index 6b9528307f620e639be196d3b691e6ac8d159d28..38a5c657ffd399fd31c76509360fe2d79dac2f9c 100644 (file)
@@ -111,10 +111,24 @@ static int __init iommu_setup(char *str)
 }
 early_param("iommu", iommu_setup);
 
-static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
+static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
 {
-       return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
-               (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
+       /*
+        * WARNING: We cannot rely on the resource flags. The Linux PCI
+        * allocation code sometimes decides to put a 64-bit prefetchable
+        * BAR in the 32-bit window, so we have to compare the addresses.
+        *
+        * For simplicity we only test resource start.
+        */
+       return (r->start >= phb->ioda.m64_base &&
+               r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
+}
+
+static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
+{
+       unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
+
+       return (resource_flags & flags) == flags;
 }
 
 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
@@ -142,7 +156,7 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
 
 static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
 {
-       unsigned long pe = phb->ioda.total_pe_num - 1;
+       long pe;
 
        for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
                if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
@@ -155,11 +169,12 @@ static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
 static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
 {
        struct pnv_phb *phb = pe->phb;
+       unsigned int pe_num = pe->pe_number;
 
        WARN_ON(pe->pdev);
 
        memset(pe, 0, sizeof(struct pnv_ioda_pe));
-       clear_bit(pe->pe_number, phb->ioda.pe_alloc);
+       clear_bit(pe_num, phb->ioda.pe_alloc);
 }
 
 /* The default M64 BAR is shared by all PEs */
@@ -229,7 +244,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
        sgsz = phb->ioda.m64_segsize;
        for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
                r = &pdev->resource[i];
-               if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
+               if (!r->parent || !pnv_pci_is_m64(phb, r))
                        continue;
 
                start = _ALIGN_DOWN(r->start - base, sgsz);
@@ -1877,7 +1892,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
                                        unsigned shift, unsigned long index,
                                        unsigned long npages)
 {
-       __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
+       __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
        unsigned long start, end, inc;
 
        /* We'll invalidate DMA address in PE scope */
@@ -2209,7 +2224,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
 
        pnv_pci_link_table_and_group(phb->hose->node, num,
                        tbl, &pe->table_group);
-       pnv_pci_phb3_tce_invalidate_pe(pe);
+       pnv_pci_ioda2_tce_invalidate_pe(pe);
 
        return 0;
 }
@@ -2347,7 +2362,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
        if (ret)
                pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
        else
-               pnv_pci_phb3_tce_invalidate_pe(pe);
+               pnv_pci_ioda2_tce_invalidate_pe(pe);
 
        pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
 
@@ -2863,7 +2878,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
                res = &pdev->resource[i + PCI_IOV_RESOURCES];
                if (!res->flags || res->parent)
                        continue;
-               if (!pnv_pci_is_mem_pref_64(res->flags)) {
+               if (!pnv_pci_is_m64_flags(res->flags)) {
                        dev_warn(&pdev->dev, "Don't support SR-IOV with"
                                        " non M64 VF BAR%d: %pR. \n",
                                 i, res);
@@ -2958,7 +2973,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
                        index++;
                }
        } else if ((res->flags & IORESOURCE_MEM) &&
-                  !pnv_pci_is_mem_pref_64(res->flags)) {
+                  !pnv_pci_is_m64(phb, res)) {
                region.start = res->start -
                               phb->hose->mem_offset[0] -
                               phb->ioda.m32_pci_base;
@@ -3083,9 +3098,12 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
                bridge = bridge->bus->self;
        }
 
-       /* We fail back to M32 if M64 isn't supported */
-       if (phb->ioda.m64_segsize &&
-           pnv_pci_is_mem_pref_64(type))
+       /*
+        * We fall back to M32 if M64 isn't supported. We enforce the M64
+        * alignment for any 64-bit resource, PCIe doesn't care and
+        * bridges only do 64-bit prefetchable anyway.
+        */
+       if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
                return phb->ioda.m64_segsize;
        if (type & IORESOURCE_MEM)
                return phb->ioda.m32_segsize;
@@ -3125,7 +3143,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
                w = NULL;
                if (r->flags & type & IORESOURCE_IO)
                        w = &hose->io_resource;
-               else if (pnv_pci_is_mem_pref_64(r->flags) &&
+               else if (pnv_pci_is_m64(phb, r) &&
                         (type & IORESOURCE_PREFETCH) &&
                         phb->ioda.m64_segsize)
                        w = &hose->mem_resources[1];
@@ -3392,12 +3410,6 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
        struct pnv_phb *phb = pe->phb;
        struct pnv_ioda_pe *slave, *tmp;
 
-       /* Release slave PEs in compound PE */
-       if (pe->flags & PNV_IODA_PE_MASTER) {
-               list_for_each_entry_safe(slave, tmp, &pe->slaves, list)
-                       pnv_ioda_release_pe(slave);
-       }
-
        list_del(&pe->list);
        switch (phb->type) {
        case PNV_PHB_IODA1:
@@ -3412,7 +3424,26 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
 
        pnv_ioda_release_pe_seg(pe);
        pnv_ioda_deconfigure_pe(pe->phb, pe);
-       pnv_ioda_free_pe(pe);
+
+       /* Release slave PEs in the compound PE */
+       if (pe->flags & PNV_IODA_PE_MASTER) {
+               list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
+                       list_del(&slave->list);
+                       pnv_ioda_free_pe(slave);
+               }
+       }
+
+       /*
+        * The PE for root bus can be removed because of hotplug in EEH
+        * recovery for fenced PHB error. We need to mark the PE dead so
+        * that it can be populated again in PCI hot add path. The PE
+        * shouldn't be destroyed as it's the global reserved resource.
+        */
+       if (phb->ioda.root_pe_populated &&
+           phb->ioda.root_pe_idx == pe->pe_number)
+               phb->ioda.root_pe_populated = false;
+       else
+               pnv_ioda_free_pe(pe);
 }
 
 static void pnv_pci_release_device(struct pci_dev *pdev)
@@ -3428,7 +3459,17 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
        if (!pdn || pdn->pe_number == IODA_INVALID_PE)
                return;
 
+       /*
+        * PCI hotplug can happen as part of EEH error recovery. The @pdn
+        * isn't removed and added afterwards in this scenario. We should
+        * set the PE number in @pdn to an invalid one. Otherwise, the PE's
+        * device count is decreased on removing devices while failing to
+        * be increased on adding devices. It leads to unbalanced PE's device
+        * count and eventually make normal PCI hotplug path broken.
+        */
        pe = &phb->ioda.pe_array[pdn->pe_number];
+       pdn->pe_number = IODA_INVALID_PE;
+
        WARN_ON(--pe->device_count < 0);
        if (pe->device_count == 0)
                pnv_ioda_release_pe(pe);
index 43f7beb2902d0b5d5b1001cff100fdf67c650fb4..76ec104e88beea0e89e3473d988e23fbdb7312c1 100644 (file)
@@ -320,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
        return dlpar_update_device_tree_lmb(lmb);
 }
 
-static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
-{
-       unsigned long section_nr;
-       struct mem_section *mem_sect;
-       struct memory_block *mem_block;
-
-       section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
-       mem_sect = __nr_to_section(section_nr);
-
-       mem_block = find_memory_block(mem_sect);
-       return mem_block;
-}
-
 #ifdef CONFIG_MEMORY_HOTREMOVE
 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
 {
@@ -420,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
 
 static int dlpar_add_lmb(struct of_drconf_cell *);
 
+static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
+{
+       unsigned long section_nr;
+       struct mem_section *mem_sect;
+       struct memory_block *mem_block;
+
+       section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
+       mem_sect = __nr_to_section(section_nr);
+
+       mem_block = find_memory_block(mem_sect);
+       return mem_block;
+}
+
 static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
 {
        struct memory_block *mem_block;
index fe16a50700de3d370ac07bad040d6d55cee6b9ef..09eba5a9929afc6818dac60bac41079c89f6330b 100644 (file)
@@ -119,6 +119,10 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
 
        bus = bridge->bus;
 
+       /* Rely on the pcibios_free_controller_deferred() callback. */
+       pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred,
+                                       (void *) pci_bus_to_host(bus));
+
        dn = pcibios_get_phb_of_node(bus);
        if (!dn)
                return 0;
index 906dbaa97fe2850abba20b597b55b4a2a14627ae..547fd13e4f8e88a1d6d3e49fb270649c349fea5b 100644 (file)
@@ -106,8 +106,11 @@ int remove_phb_dynamic(struct pci_controller *phb)
                release_resource(res);
        }
 
-       /* Free pci_controller data structure */
-       pcibios_free_controller(phb);
+       /*
+        * The pci_controller data structure is freed by
+        * the pcibios_free_controller_deferred() callback;
+        * see pseries_root_bridge_prepare().
+        */
 
        return 0;
 }
index 4ffcaa6f867095cdcabfa51c9a206c0efd138f0c..a39d20e8623d4ae7684a5ebb43fc50f0ba793488 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/root_dev.h>
 #include <linux/of.h>
 #include <linux/of_pci.h>
-#include <linux/kexec.h>
 
 #include <asm/mmu.h>
 #include <asm/processor.h>
@@ -66,6 +65,7 @@
 #include <asm/eeh.h>
 #include <asm/reg.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
 
 #include "pseries.h"
 
index 6c110994d902f858a82006fe18fb46658f3fc2b0..81d49476c47e0aa9d44a0981cb458723b1d760e9 100644 (file)
@@ -534,7 +534,8 @@ struct cpm1_gpio16_chip {
 
 static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
+       struct cpm1_gpio16_chip *cpm1_gc =
+               container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
        struct cpm_ioport16 __iomem *iop = mm_gc->regs;
 
        cpm1_gc->cpdata = in_be16(&iop->dat);
@@ -649,7 +650,8 @@ struct cpm1_gpio32_chip {
 
 static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
+       struct cpm1_gpio32_chip *cpm1_gc =
+               container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
        struct cpm_ioport32b __iomem *iop = mm_gc->regs;
 
        cpm1_gc->cpdata = in_be32(&iop->dat);
index 911456d177130f2f325b2879751da82b1ce092cb..947f42007734c1c22d5f6eb02ae72f110cd5f44b 100644 (file)
@@ -94,7 +94,8 @@ struct cpm2_gpio32_chip {
 
 static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc);
+       struct cpm2_gpio32_chip *cpm2_gc =
+               container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
        struct cpm2_ioports __iomem *iop = mm_gc->regs;
 
        cpm2_gc->cpdata = in_be32(&iop->dat);
index 68e7c0dd2e45551143b6afc079fd185d2ca89a80..3cc7cace194aecc3645eab70ec585c6c4b0088a9 100644 (file)
@@ -23,7 +23,7 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/types.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
index 0031eda320c3de7d94e3d9198b0cbc9087a7aec4..385e7aa9e2731cabf13cfccef35a2934719cdbc3 100644 (file)
@@ -1,6 +1,7 @@
 config PPC_XICS
        def_bool n
        select PPC_SMP_MUXED_IPI
+       select HARDIRQS_SW_RESEND
 
 config PPC_ICP_NATIVE
        def_bool n
index 57d72f10a97f69de868b6053beccd7533760870e..9114243fa1b5a4752eab8a85469a18e6de2a324d 100644 (file)
 
 static void icp_opal_teardown_cpu(void)
 {
-       int cpu = smp_processor_id();
+       int hw_cpu = hard_smp_processor_id();
 
        /* Clear any pending IPI */
-       opal_int_set_mfrr(cpu, 0xff);
+       opal_int_set_mfrr(hw_cpu, 0xff);
 }
 
 static void icp_opal_flush_ipi(void)
@@ -101,14 +101,16 @@ static void icp_opal_eoi(struct irq_data *d)
 
 static void icp_opal_cause_ipi(int cpu, unsigned long data)
 {
-       opal_int_set_mfrr(cpu, IPI_PRIORITY);
+       int hw_cpu = get_hard_smp_processor_id(cpu);
+
+       opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 }
 
 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 {
-       int cpu = smp_processor_id();
+       int hw_cpu = hard_smp_processor_id();
 
-       opal_int_set_mfrr(cpu, 0xff);
+       opal_int_set_mfrr(hw_cpu, 0xff);
 
        return smp_ipi_demux();
 }
index 27c936c080a66ffb5523931ac74d6c4c3a01849d..1c6bf4b66f56854d0717a644c75dea70e3fabde7 100644 (file)
@@ -156,7 +156,9 @@ static struct irq_chip ics_opal_irq_chip = {
        .irq_mask = ics_opal_mask_irq,
        .irq_unmask = ics_opal_unmask_irq,
        .irq_eoi = NULL, /* Patched at init time */
-       .irq_set_affinity = ics_opal_set_affinity
+       .irq_set_affinity = ics_opal_set_affinity,
+       .irq_set_type = xics_set_irq_type,
+       .irq_retrigger = xics_retrigger,
 };
 
 static int ics_opal_map(struct ics *ics, unsigned int virq);
index 3854dd41558d2697e73f9d72f9dffb27327952a8..78ee5c778ef8c7650ccea536aff7fb5cefc0bca3 100644 (file)
@@ -163,7 +163,9 @@ static struct irq_chip ics_rtas_irq_chip = {
        .irq_mask = ics_rtas_mask_irq,
        .irq_unmask = ics_rtas_unmask_irq,
        .irq_eoi = NULL, /* Patched at init time */
-       .irq_set_affinity = ics_rtas_set_affinity
+       .irq_set_affinity = ics_rtas_set_affinity,
+       .irq_set_type = xics_set_irq_type,
+       .irq_retrigger = xics_retrigger,
 };
 
 static int ics_rtas_map(struct ics *ics, unsigned int virq)
index a795a5f0301c482ec8edeb3047464ea709f7326b..9d530f47958857621ad19655ac12f975f17e090d 100644 (file)
@@ -328,8 +328,12 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
 
        pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
 
-       /* They aren't all level sensitive but we just don't really know */
-       irq_set_status_flags(virq, IRQ_LEVEL);
+       /*
+        * Mark interrupts as edge sensitive by default so that resend
+        * actually works. The device-tree parsing will turn the LSIs
+        * back to level.
+        */
+       irq_clear_status_flags(virq, IRQ_LEVEL);
 
        /* Don't call into ICS for IPIs */
        if (hw == XICS_IPI) {
@@ -351,13 +355,54 @@ static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
                           irq_hw_number_t *out_hwirq, unsigned int *out_flags)
 
 {
-       /* Current xics implementation translates everything
-        * to level. It is not technically right for MSIs but this
-        * is irrelevant at this point. We might get smarter in the future
-        */
        *out_hwirq = intspec[0];
-       *out_flags = IRQ_TYPE_LEVEL_LOW;
 
+       /*
+        * If intsize is at least 2, we look for the type in the second cell,
+        * we assume the LSB indicates a level interrupt.
+        */
+       if (intsize > 1) {
+               if (intspec[1] & 1)
+                       *out_flags = IRQ_TYPE_LEVEL_LOW;
+               else
+                       *out_flags = IRQ_TYPE_EDGE_RISING;
+       } else
+               *out_flags = IRQ_TYPE_LEVEL_LOW;
+
+       return 0;
+}
+
+int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
+{
+       /*
+        * We only support these. This has really no effect other than setting
+        * the corresponding descriptor bits mind you but those will in turn
+        * affect the resend function when re-enabling an edge interrupt.
+        *
+        * Set set the default to edge as explained in map().
+        */
+       if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
+               flow_type = IRQ_TYPE_EDGE_RISING;
+
+       if (flow_type != IRQ_TYPE_EDGE_RISING &&
+           flow_type != IRQ_TYPE_LEVEL_LOW)
+               return -EINVAL;
+
+       irqd_set_trigger_type(d, flow_type);
+
+       return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+int xics_retrigger(struct irq_data *data)
+{
+       /*
+        * We need to push a dummy CPPR when retriggering, since the subsequent
+        * EOI will try to pop it. Passing 0 works, as the function hard codes
+        * the priority value anyway.
+        */
+       xics_push_cppr(0);
+
+       /* Tell the core to do a soft retrigger */
        return 0;
 }
 
index 9e607bf2d640df1ba7aee69b2cccbc4fd5ff61cb..c109f073d454af9ec0a137dccf4df81a9484bf2b 100644 (file)
@@ -68,7 +68,6 @@ config DEBUG_RODATA
 config S390
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_GCOV_PROFILE_ALL
@@ -123,6 +122,7 @@ config S390
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_EARLY_PFN_TO_NID
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_JUMP_LABEL
        select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select HAVE_ARCH_SECCOMP_FILTER
@@ -871,4 +871,17 @@ config S390_GUEST
          Select this option if you want to run the kernel as a guest under
          the KVM hypervisor.
 
+config S390_GUEST_OLD_TRANSPORT
+       def_bool y
+       prompt "Guest support for old s390 virtio transport (DEPRECATED)"
+       depends on S390_GUEST
+       help
+         Enable this option to add support for the old s390-virtio
+         transport (i.e. virtio devices NOT based on virtio-ccw). This
+         type of virtio devices is only available on the experimental
+         kuli userspace or with old (< 2.6) qemu. If you are running
+         with a modern version of qemu (which supports virtio-ccw since
+         1.4 and uses it by default since version 2.4), you probably won't
+         need this.
+
 endmenu
index f86a4eef28a929ebe235576264122c97fb9eb08e..28c4f96a2d9cef6d6ce97abc84890cc0c1f65615 100644 (file)
@@ -21,16 +21,21 @@ ENTRY(startup_continue)
        lg      %r15,.Lstack-.LPG1(%r13)
        aghi    %r15,-160
        brasl   %r14,decompress_kernel
-       # setup registers for memory mover & branch to target
+       # Set up registers for memory mover. We move the decompressed image to
+       # 0x11000, starting at offset 0x11000 in the decompressed image so
+       # that code living at 0x11000 in the image will end up at 0x11000 in
+       # memory.
        lgr     %r4,%r2
        lg      %r2,.Loffset-.LPG1(%r13)
        la      %r4,0(%r2,%r4)
        lg      %r3,.Lmvsize-.LPG1(%r13)
        lgr     %r5,%r3
-       # move the memory mover someplace safe
+       # Move the memory mover someplace safe so it doesn't overwrite itself.
        la      %r1,0x200
        mvc     0(mover_end-mover,%r1),mover-.LPG1(%r13)
-       # decompress image is started at 0x11000
+       # When the memory mover is done we pass control to
+       # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
+       # the decompressed image.
        lgr     %r6,%r2
        br      %r1
 mover:
index 889ea345021092ac2bd9fe4b72aa51f6f56aad00..412b1bd21029bb997715e5e5ff7778bc69813ee1 100644 (file)
@@ -602,7 +602,6 @@ CONFIG_FAIL_FUTEX=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_IRQSOFF_TRACER=y
 CONFIG_PREEMPT_TRACER=y
 CONFIG_SCHED_TRACER=y
@@ -678,7 +677,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
index 1bcfd764910a706fd44b9fe813f04f07a0d48e7a..bec279eb4b936b9a84f21f7d8c2de8e8769dffa5 100644 (file)
@@ -552,7 +552,6 @@ CONFIG_NOTIFIER_ERROR_INJECTION=m
 CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
 CONFIG_TRACE_ENUM_MAP_FILE=y
@@ -616,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
index 13ff090139c86a897270ba70f37a5ae1a471e43d..1751446a5bbb8ed9ca519d87d8f18fe70801f3fe 100644 (file)
@@ -549,7 +549,6 @@ CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
@@ -615,7 +614,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
index 577ae1d4ae894e58c5e0c37cec32f77341e18735..2bad9d837029924b3e1955bce4da9c170192c0f0 100644 (file)
@@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
                struct kernel_fpu vxstate;                                  \
                unsigned long prealign, aligned, remaining;                 \
                                                                            \
+               if (datalen < VX_MIN_LEN + VX_ALIGN_MASK)                   \
+                       return ___crc32_sw(crc, data, datalen);             \
+                                                                           \
                if ((unsigned long)data & VX_ALIGN_MASK) {                  \
                        prealign = VX_ALIGNMENT -                           \
                                  ((unsigned long)data & VX_ALIGN_MASK);    \
@@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
                        data = (void *)((unsigned long)data + prealign);    \
                }                                                           \
                                                                            \
-               if (datalen < VX_MIN_LEN)                                   \
-                       return ___crc32_sw(crc, data, datalen);             \
-                                                                           \
                aligned = datalen & ~VX_ALIGN_MASK;                         \
                remaining = datalen & VX_ALIGN_MASK;                        \
                                                                            \
index ccccebeeaaf67d7d058a1e0544fe8a50eba9e19a..2d40ef0a6295d9a93c3527d6592eba2780736a2e 100644 (file)
@@ -172,7 +172,6 @@ CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
@@ -234,7 +233,7 @@ CONFIG_CRYPTO_SHA256_S390=m
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CRC7=m
 # CONFIG_XZ_DEC_X86 is not set
 # CONFIG_XZ_DEC_POWERPC is not set
index 9b49cf1daa8f9ee11d3536343fe76325085efa64..52d7c8709279fcd937add7062ac62910ce40fdcf 100644 (file)
@@ -266,28 +266,28 @@ int __put_user_bad(void) __attribute__((noreturn));
        __chk_user_ptr(ptr);                                    \
        switch (sizeof(*(ptr))) {                               \
        case 1: {                                               \
-               unsigned char __x;                              \
+               unsigned char __x = 0;                          \
                __gu_err = __get_user_fn(&__x, ptr,             \
                                         sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 2: {                                               \
-               unsigned short __x;                             \
+               unsigned short __x = 0;                         \
                __gu_err = __get_user_fn(&__x, ptr,             \
                                         sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 4: {                                               \
-               unsigned int __x;                               \
+               unsigned int __x = 0;                           \
                __gu_err = __get_user_fn(&__x, ptr,             \
                                         sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 8: {                                               \
-               unsigned long long __x;                         \
+               unsigned long long __x = 0;                     \
                __gu_err = __get_user_fn(&__x, ptr,             \
                                         sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
@@ -311,6 +311,14 @@ int __get_user_bad(void) __attribute__((noreturn));
 #define __put_user_unaligned __put_user
 #define __get_user_unaligned __get_user
 
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
 /**
  * copy_to_user: - Copy a block of data into user space.
  * @to:   Destination address, in user space.
@@ -332,12 +340,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        return __copy_to_user(to, from, n);
 }
 
-void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
-
 /**
  * copy_from_user: - Copy a block of data from user space.
  * @to:   Destination address, in kernel space.
@@ -362,7 +364,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
 
        might_fault();
        if (unlikely(sz != -1 && sz < n)) {
-               copy_from_user_overflow();
+               if (!__builtin_constant_p(n))
+                       copy_user_overflow(sz, n);
+               else
+                       __bad_copy_user();
                return n;
        }
        return __copy_from_user(to, from, n);
index 56e4d8234ef2ea1c2970ebcd6d8d5d2d15b5a037..4431905f8cfa2cbac81ada709929201371669d99 100644 (file)
@@ -309,7 +309,9 @@ ENTRY(startup_kdump)
        l       %r15,.Lstack-.LPG0(%r13)
        ahi     %r15,-STACK_FRAME_OVERHEAD
        brasl   %r14,verify_facilities
-       /* Continue with startup code in head64.S */
+# For uncompressed images, continue in
+# arch/s390/kernel/head64.S. For compressed images, continue in
+# arch/s390/boot/compressed/head.S.
        jg      startup_continue
 
 .Lstack:
index ba5f456edaa9078261d3ce0b2572aa7eda5de843..7f7ba5f23f130fae0639b44af7e93c84ed85a654 100644 (file)
@@ -204,11 +204,9 @@ static void __init conmode_default(void)
 #endif
                }
        } else if (MACHINE_IS_KVM) {
-               if (sclp.has_vt220 &&
-                   config_enabled(CONFIG_SCLP_VT220_CONSOLE))
+               if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
                        SET_CONSOLE_VT220;
-               else if (sclp.has_linemode &&
-                        config_enabled(CONFIG_SCLP_CONSOLE))
+               else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
                        SET_CONSOLE_SCLP;
                else
                        SET_CONSOLE_HVC;
index 3f3ae4865d579e8a9420cc22c6c2b39f37e70a9f..607ec91966c7226babedb064703f13cf9be7daab 100644 (file)
@@ -1672,6 +1672,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                                    KVM_SYNC_CRS |
                                    KVM_SYNC_ARCH0 |
                                    KVM_SYNC_PFAULT;
+       kvm_s390_set_prefix(vcpu, 0);
        if (test_kvm_facility(vcpu->kvm, 64))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
        /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2230,9 +2231,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
                return -EINVAL;
        current->thread.fpu.fpc = fpu->fpc;
        if (MACHINE_HAS_VX)
-               convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+               convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
+                                (freg_t *) fpu->fprs);
        else
-               memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
+               memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
        return 0;
 }
 
@@ -2241,9 +2243,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        /* make sure we have the latest values */
        save_fpu_regs();
        if (MACHINE_HAS_VX)
-               convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+               convert_vx_to_fp((freg_t *) fpu->fprs,
+                                (__vector128 *) vcpu->run->s.regs.vrs);
        else
-               memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+               memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
        fpu->fpc = current->thread.fpu.fpc;
        return 0;
 }
@@ -2361,8 +2364,10 @@ retry:
                rc = gmap_mprotect_notify(vcpu->arch.gmap,
                                          kvm_s390_get_prefix(vcpu),
                                          PAGE_SIZE * 2, PROT_WRITE);
-               if (rc)
+               if (rc) {
+                       kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
                        return rc;
+               }
                goto retry;
        }
 
index c106488b41371b2c8cc188df7ba5e8e5d7c06f31..d8673e243f13bf7315e36203890401cae191cea2 100644 (file)
@@ -584,7 +584,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                /* Validity 0x0044 will be checked by SIE */
                if (rc)
                        goto unpin;
-               scb_s->gvrd = hpa;
+               scb_s->riccbd = hpa;
        }
        return 0;
 unpin:
index e390bbb16443db59d8a3551b2bdb633ae25c8d83..48352bffbc929cdc43cec471e92cc49bb23996fc 100644 (file)
@@ -237,11 +237,10 @@ char * strrchr(const char * s, int c)
 EXPORT_SYMBOL(strrchr);
 
 static inline int clcle(const char *s1, unsigned long l1,
-                       const char *s2, unsigned long l2,
-                       int *diff)
+                       const char *s2, unsigned long l2)
 {
        register unsigned long r2 asm("2") = (unsigned long) s1;
-       register unsigned long r3 asm("3") = (unsigned long) l2;
+       register unsigned long r3 asm("3") = (unsigned long) l1;
        register unsigned long r4 asm("4") = (unsigned long) s2;
        register unsigned long r5 asm("5") = (unsigned long) l2;
        int cc;
@@ -252,7 +251,6 @@ static inline int clcle(const char *s1, unsigned long l1,
                      "   srl   %0,28"
                      : "=&d" (cc), "+a" (r2), "+a" (r3),
                        "+a" (r4), "+a" (r5) : : "cc");
-       *diff = *(char *)r2 - *(char *)r4;
        return cc;
 }
 
@@ -270,9 +268,9 @@ char * strstr(const char * s1,const char * s2)
                return (char *) s1;
        l1 = __strend(s1) - s1;
        while (l1-- >= l2) {
-               int cc, dummy;
+               int cc;
 
-               cc = clcle(s1, l1, s2, l2, &dummy);
+               cc = clcle(s1, l2, s2, l2);
                if (!cc)
                        return (char *) s1;
                s1++;
@@ -313,11 +311,11 @@ EXPORT_SYMBOL(memchr);
  */
 int memcmp(const void *cs, const void *ct, size_t n)
 {
-       int ret, diff;
+       int ret;
 
-       ret = clcle(cs, n, ct, n, &diff);
+       ret = clcle(cs, n, ct, n);
        if (ret)
-               ret = diff;
+               ret = ret == 1 ? -1 : 1;
        return ret;
 }
 EXPORT_SYMBOL(memcmp);
index d96596128e9f2591c4cde57f3bb6b1bf256b30f9..f481fcde067ba145f6cc2ace122d9d0a543ebe00 100644 (file)
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
 
 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       check_object_size(to, n, false);
        if (static_branch_likely(&have_mvcos))
                return copy_from_user_mvcos(to, from, n);
        return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
 
 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
        if (static_branch_likely(&have_mvcos))
                return copy_to_user_mvcos(to, from, n);
        return copy_to_user_mvcs(to, from, n);
index 7104ffb5a67f8fb311184d3878e4a54f7d72d0cf..af7cf28cf97edcc71551917cf66cfb0214f56946 100644 (file)
@@ -252,6 +252,8 @@ static int change_page_attr(unsigned long addr, unsigned long end,
        int rc = -EINVAL;
        pgd_t *pgdp;
 
+       if (addr == end)
+               return 0;
        if (end >= MODULES_END)
                return -EINVAL;
        mutex_lock(&cpa_mutex);
index 20a3591225ccea9aa3d93acc11a97a34bf254501..01aec8ccde834119b0cf893c6d8ff4b81a41cb20 100644 (file)
@@ -163,7 +163,7 @@ do {                                                                        \
                __get_user_asm(val, "lw", ptr);                         \
                 break;                                                 \
        case 8:                                                         \
-               if ((copy_from_user((void *)&val, ptr, 8)) == 0)        \
+               if (__copy_from_user((void *)&val, ptr, 8) == 0)        \
                        __gu_err = 0;                                   \
                else                                                    \
                        __gu_err = -EFAULT;                             \
@@ -188,6 +188,8 @@ do {                                                                        \
                                                                        \
        if (likely(access_ok(VERIFY_READ, __gu_ptr, size)))             \
                __get_user_common((x), size, __gu_ptr);                 \
+       else                                                            \
+               (x) = 0;                                                \
                                                                        \
        __gu_err;                                                       \
 })
@@ -201,6 +203,7 @@ do {                                                                        \
                "2:\n"                                                  \
                ".section .fixup,\"ax\"\n"                              \
                "3:li   %0, %4\n"                                       \
+               "li     %1, 0\n"                                        \
                "j      2b\n"                                           \
                ".previous\n"                                           \
                ".section __ex_table,\"a\"\n"                           \
@@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
 static inline unsigned long
 copy_from_user(void *to, const void *from, unsigned long len)
 {
-       unsigned long over;
+       unsigned long res = len;
 
-       if (access_ok(VERIFY_READ, from, len))
-               return __copy_tofrom_user(to, from, len);
+       if (likely(access_ok(VERIFY_READ, from, len)))
+               res = __copy_tofrom_user(to, from, len);
 
-       if ((unsigned long)from < TASK_SIZE) {
-               over = (unsigned long)from + len - TASK_SIZE;
-               return __copy_tofrom_user(to, from, len - over) + over;
-       }
-       return len;
+       if (unlikely(res))
+               memset(to + (len - res), 0, res);
+
+       return res;
 }
 
 static inline unsigned long
 copy_to_user(void *to, const void *from, unsigned long len)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_WRITE, to, len))
-               return __copy_tofrom_user(to, from, len);
+       if (likely(access_ok(VERIFY_WRITE, to, len)))
+               len = __copy_tofrom_user(to, from, len);
 
-       if ((unsigned long)to < TASK_SIZE) {
-               over = (unsigned long)to + len - TASK_SIZE;
-               return __copy_tofrom_user(to, from, len - over) + over;
-       }
        return len;
 }
 
-#define __copy_from_user(to, from, len)        \
-               __copy_tofrom_user((to), (from), (len))
+static inline unsigned long
+__copy_from_user(void *to, const void *from, unsigned long len)
+{
+       unsigned long left = __copy_tofrom_user(to, from, len);
+       if (unlikely(left))
+               memset(to + (len - left), 0, left);
+       return left;
+}
 
 #define __copy_to_user(to, from, len)          \
                __copy_tofrom_user((to), (from), (len))
@@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
 static inline unsigned long
 __copy_from_user_inatomic(void *to, const void *from, unsigned long len)
 {
-       return __copy_from_user(to, from, len);
+       return __copy_tofrom_user(to, from, len);
 }
 
-#define __copy_in_user(to, from, len)  __copy_from_user(to, from, len)
+#define __copy_in_user(to, from, len)  __copy_tofrom_user(to, from, len)
 
 static inline unsigned long
 copy_in_user(void *to, const void *from, unsigned long len)
 {
        if (access_ok(VERIFY_READ, from, len) &&
                      access_ok(VERFITY_WRITE, to, len))
-               return copy_from_user(to, from, len);
+               return __copy_tofrom_user(to, from, len);
 }
 
 /*
index caea2c45f6c2ac43ee6c134c1b4c9a7ebc8295e3..1d159ce50f5ad37a5ae5e7bdd5ca2116dfd26158 100644 (file)
@@ -60,7 +60,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                       \
 "      movco.l %0, @%3                                 \n"             \
 "      bf      1b                                      \n"             \
 "      synco                                           \n"             \
-       : "=&z" (temp), "=&z" (res)                                     \
+       : "=&z" (temp), "=&r" (res)                                     \
        : "r" (i), "r" (&v->counter)                                    \
        : "t");                                                         \
                                                                        \
index a49635c512665ddc6b9e70e5d944c57e26d349d6..92ade79ac4272ea22657b3aac6adfb90b1d4105f 100644 (file)
@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
        __kernel_size_t __copy_size = (__kernel_size_t) n;
 
        if (__copy_size && __access_ok(__copy_from, __copy_size))
-               return __copy_user(to, from, __copy_size);
+               __copy_size = __copy_user(to, from, __copy_size);
+
+       if (unlikely(__copy_size))
+               memset(to + (n - __copy_size), 0, __copy_size);
 
        return __copy_size;
 }
index c01376c76b868727b55bd117551cc1b96b93d8f1..ca5073dd459674d1ad835406fc46baa1957d01ac 100644 (file)
@@ -24,6 +24,7 @@
 #define __get_user_size(x,ptr,size,retval)                     \
 do {                                                           \
        retval = 0;                                             \
+       x = 0;                                                  \
        switch (size) {                                         \
        case 1:                                                 \
                retval = __get_user_asm_b((void *)&x,           \
index 546293d9e6c52906f2c1f72d1a9545749595f994..59b09600dd326b8d0e24853d720d18d73837cd69 100644 (file)
@@ -43,6 +43,7 @@ config SPARC
        select OLD_SIGSUSPEND
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config SPARC32
        def_bool !64BIT
index 57aca2792d29f89203735f892ceab4b73340bd69..ea55f86d7ccd7d54c22236b20300ea8358d45cda 100644 (file)
@@ -248,23 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
 
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) to, n))
+       if (n && __access_ok((unsigned long) to, n)) {
+               check_object_size(from, n, true);
                return __copy_user(to, (__force void __user *) from, n);
-       else
+       else
                return n;
 }
 
 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
        return __copy_user(to, (__force void __user *) from, n);
 }
 
 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) from, n))
+       if (n && __access_ok((unsigned long) from, n)) {
+               check_object_size(to, n, false);
                return __copy_user((__force void __user *) to, from, n);
-       else
+       } else {
+               memset(to, 0, n);
                return n;
+       }
 }
 
 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
index e9a51d64974ddff102017ee8f86be01f7a41361a..37a315d0ddd4b205c8a54d77c354d7cb116aba15 100644 (file)
@@ -210,8 +210,11 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long size)
 {
-       unsigned long ret = ___copy_from_user(to, from, size);
+       unsigned long ret;
 
+       check_object_size(to, size, false);
+
+       ret = ___copy_from_user(to, from, size);
        if (unlikely(ret))
                ret = copy_from_user_fixup(to, from, size);
 
@@ -227,8 +230,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long size)
 {
-       unsigned long ret = ___copy_to_user(to, from, size);
+       unsigned long ret;
+
+       check_object_size(from, size, true);
 
+       ret = ___copy_to_user(to, from, size);
        if (unlikely(ret))
                ret = copy_to_user_fixup(to, from, size);
        return ret;
index 4820a02838ace2e1d05f8e9ed5e43e18b48a5569..78da75b670bcaa67e1cb42ccf5e53ab63421a863 100644 (file)
@@ -4,7 +4,6 @@
 config TILE
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_WANT_FRAME_POINTERS
index 0a9c4265763bd1a26f5f38aa20460c475670cea5..a77369e91e5416c08ee25eaf04cb2e1df7c4fabe 100644 (file)
@@ -416,14 +416,13 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
        return n;
 }
 
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-/*
- * There are still unprovable places in the generic code as of 2.6.34, so this
- * option is not really compatible with -Werror, which is more useful in
- * general.
- */
-extern void copy_from_user_overflow(void)
-       __compiletime_warning("copy_from_user() size is not provably correct");
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
 
 static inline unsigned long __must_check copy_from_user(void *to,
                                          const void __user *from,
@@ -433,14 +432,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
 
        if (likely(sz == -1 || sz >= n))
                n = _copy_from_user(to, from, n);
+       else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               copy_from_user_overflow();
+               __bad_copy_user();
 
        return n;
 }
-#else
-#define copy_from_user _copy_from_user
-#endif
 
 #ifdef __tilegx__
 /**
index 1dd5bd8a8c599360ae1626b66010c1a18b12145b..133055311dce1c1ea0d4e2ae26195cc7da4c5647 100644 (file)
@@ -81,7 +81,7 @@
   .altinstr_replacement : { *(.altinstr_replacement) }
   /* .exit.text is discard at runtime, not link time, to deal with references
      from .altinstructions and .eh_frame */
-  .exit.text : { *(.exit.text) }
+  .exit.text : { EXIT_TEXT }
   .exit.data : { *(.exit.data) }
 
   .preinit_array : {
index ef4b8f949b516df373b2d016e4a1366b53e1bc75..b783ac87d98a65a9bdafc871b4f0fc40e261567b 100644 (file)
@@ -21,21 +21,17 @@ void handle_syscall(struct uml_pt_regs *r)
        PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
 
        if (syscall_trace_enter(regs))
-               return;
+               goto out;
 
        /* Do the seccomp check after ptrace; failures should be fast. */
        if (secure_computing(NULL) == -1)
-               return;
+               goto out;
 
-       /* Update the syscall number after orig_ax has potentially been updated
-        * with ptrace.
-        */
-       UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
        syscall = UPT_SYSCALL_NR(r);
-
        if (syscall >= 0 && syscall <= __NR_syscall_max)
                PT_REGS_SET_SYSCALL_RETURN(regs,
                                EXECUTE_SYSCALL(syscall, regs));
 
+out:
        syscall_trace_leave(regs);
 }
index e35632ef23c759a43e4673d222aac430172cdad7..62dfc644c908ab5ffb173b493d5b97b93e8f7ca6 100644 (file)
@@ -98,7 +98,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
 }
 
 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
-               bool write, bool foreign)
+               bool write, bool execute, bool foreign)
 {
        /* by default, allow everything */
        return true;
index 5c6e7471b732335bf0b4272a4f274520d2bb4f4e..2a1f0ce7c59acac6e1a6543eeda61ccb3792f277 100644 (file)
@@ -24,7 +24,6 @@ config X86
        select ARCH_DISCARD_MEMBLOCK
        select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FAST_MULTIPLIER
@@ -80,6 +79,7 @@ config X86
        select HAVE_ALIGNED_STRUCT_PAGE         if SLUB
        select HAVE_AOUT                        if X86_32
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_HUGE_VMAP              if X86_64 || X86_PAE
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN                  if X86_64 && SPARSEMEM_VMEMMAP
@@ -91,6 +91,7 @@ config X86
        select HAVE_ARCH_SOFT_DIRTY             if X86_64
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_WITHIN_STACK_FRAMES
        select HAVE_EBPF_JIT                    if X86_64
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CMPXCHG_DOUBLE
index ff574dad95ccaf2739d0edfe85c94acea1e15134..94dd4a31f5b39a88f205fbda2a452f1228cd9516 100644 (file)
@@ -1004,79 +1004,87 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
        return status;
 }
 
-static efi_status_t exit_boot(struct boot_params *boot_params,
-                             void *handle, bool is64)
-{
-       struct efi_info *efi = &boot_params->efi_info;
-       unsigned long map_sz, key, desc_size;
-       efi_memory_desc_t *mem_map;
+struct exit_boot_struct {
+       struct boot_params *boot_params;
+       struct efi_info *efi;
        struct setup_data *e820ext;
-       const char *signature;
        __u32 e820ext_size;
-       __u32 nr_desc, prev_nr_desc;
-       efi_status_t status;
-       __u32 desc_version;
-       bool called_exit = false;
-       u8 nr_entries;
-       int i;
-
-       nr_desc = 0;
-       e820ext = NULL;
-       e820ext_size = 0;
-
-get_map:
-       status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
-                                   &desc_version, &key);
-
-       if (status != EFI_SUCCESS)
-               return status;
-
-       prev_nr_desc = nr_desc;
-       nr_desc = map_sz / desc_size;
-       if (nr_desc > prev_nr_desc &&
-           nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
-               u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
-
-               status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
-               if (status != EFI_SUCCESS)
-                       goto free_mem_map;
+       bool is64;
+};
 
-               efi_call_early(free_pool, mem_map);
-               goto get_map; /* Allocated memory, get map again */
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+                                  struct efi_boot_memmap *map,
+                                  void *priv)
+{
+       static bool first = true;
+       const char *signature;
+       __u32 nr_desc;
+       efi_status_t status;
+       struct exit_boot_struct *p = priv;
+
+       if (first) {
+               nr_desc = *map->buff_size / *map->desc_size;
+               if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
+                       u32 nr_e820ext = nr_desc -
+                                       ARRAY_SIZE(p->boot_params->e820_map);
+
+                       status = alloc_e820ext(nr_e820ext, &p->e820ext,
+                                              &p->e820ext_size);
+                       if (status != EFI_SUCCESS)
+                               return status;
+               }
+               first = false;
        }
 
-       signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
-       memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
+       signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+       memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
 
-       efi->efi_systab = (unsigned long)sys_table;
-       efi->efi_memdesc_size = desc_size;
-       efi->efi_memdesc_version = desc_version;
-       efi->efi_memmap = (unsigned long)mem_map;
-       efi->efi_memmap_size = map_sz;
+       p->efi->efi_systab = (unsigned long)sys_table_arg;
+       p->efi->efi_memdesc_size = *map->desc_size;
+       p->efi->efi_memdesc_version = *map->desc_ver;
+       p->efi->efi_memmap = (unsigned long)*map->map;
+       p->efi->efi_memmap_size = *map->map_size;
 
 #ifdef CONFIG_X86_64
-       efi->efi_systab_hi = (unsigned long)sys_table >> 32;
-       efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
+       p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
+       p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
 #endif
 
+       return EFI_SUCCESS;
+}
+
+static efi_status_t exit_boot(struct boot_params *boot_params,
+                             void *handle, bool is64)
+{
+       unsigned long map_sz, key, desc_size, buff_size;
+       efi_memory_desc_t *mem_map;
+       struct setup_data *e820ext;
+       __u32 e820ext_size;
+       efi_status_t status;
+       __u32 desc_version;
+       struct efi_boot_memmap map;
+       struct exit_boot_struct priv;
+
+       map.map =               &mem_map;
+       map.map_size =          &map_sz;
+       map.desc_size =         &desc_size;
+       map.desc_ver =          &desc_version;
+       map.key_ptr =           &key;
+       map.buff_size =         &buff_size;
+       priv.boot_params =      boot_params;
+       priv.efi =              &boot_params->efi_info;
+       priv.e820ext =          NULL;
+       priv.e820ext_size =     0;
+       priv.is64 =             is64;
+
        /* Might as well exit boot services now */
-       status = efi_call_early(exit_boot_services, handle, key);
-       if (status != EFI_SUCCESS) {
-               /*
-                * ExitBootServices() will fail if any of the event
-                * handlers change the memory map. In which case, we
-                * must be prepared to retry, but only once so that
-                * we're guaranteed to exit on repeated failures instead
-                * of spinning forever.
-                */
-               if (called_exit)
-                       goto free_mem_map;
-
-               called_exit = true;
-               efi_call_early(free_pool, mem_map);
-               goto get_map;
-       }
+       status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+                                       exit_boot_func);
+       if (status != EFI_SUCCESS)
+               return status;
 
+       e820ext = priv.e820ext;
+       e820ext_size = priv.e820ext_size;
        /* Historic? */
        boot_params->alt_mem_k = 32 * 1024;
 
@@ -1085,10 +1093,6 @@ get_map:
                return status;
 
        return EFI_SUCCESS;
-
-free_mem_map:
-       efi_call_early(free_pool, mem_map);
-       return status;
 }
 
 /*
index 4e2ecfa23c15978faf88416ad11ca859e1201718..4b429df40d7a2fb9ee9fce2eb5315b592d1200e9 100644 (file)
@@ -1 +1,3 @@
 CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+# CONFIG_HIGHMEM64G is not set
index 89fa85e8b10ca3331f48110393747bf44f750c2b..6f97fb33ae216d9129d42261b5a10226868caab0 100644 (file)
@@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
 
                        req = cast_mcryptd_ctx_to_req(req_ctx);
                        if (irqs_disabled())
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                        else {
                                local_bh_disable();
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                                local_bh_enable();
                        }
                }
index b691da981cd9cf08e718a4ff81ad3d5dc5d7199d..a78a0694ddef37be737599dbf6ba305eb88b3cba 100644 (file)
@@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
        vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
        vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
        vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
-       movl    _args_digest+4*32(state, idx, 4), tmp2_w
+       vmovd   _args_digest(state , idx, 4) , %xmm0
        vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
        vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
        vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
 
-       vmovdqu %xmm0, _result_digest(job_rax)
-       movl    tmp2_w, _result_digest+1*16(job_rax)
+        vmovdqu %xmm0, _result_digest(job_rax)
+        offset =  (_result_digest + 1*16)
+        vmovdqu %xmm1, offset(job_rax)
 
        pop     %rbx
 
index f4cf5b78fd360ee0593d9aefa1266e9ffc687325..d210174a52b0e933983f8b253e679eb6b4336632 100644 (file)
@@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
 
                        req = cast_mcryptd_ctx_to_req(req_ctx);
                        if (irqs_disabled())
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                        else {
                                local_bh_disable();
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                                local_bh_enable();
                        }
                }
index fe91c25092da2662b67413a154e446843e7d5f7a..77f28ce9c6464e71a942f767082391502c8fa80d 100644 (file)
@@ -5,6 +5,8 @@
 OBJECT_FILES_NON_STANDARD_entry_$(BITS).o   := y
 OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
 
+CFLAGS_syscall_64.o            += -Wno-override-init
+CFLAGS_syscall_32.o            += -Wno-override-init
 obj-y                          := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
 obj-y                          += common.o
 
index b846875aeea6792ce4a0c359f7635112f27e84d5..d172c619c44931b34b01d34b1dd09379ba26a5b4 100644 (file)
@@ -288,11 +288,15 @@ return_from_SYSCALL_64:
        jne     opportunistic_sysret_failed
 
        /*
-        * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
-        * restoring TF results in a trap from userspace immediately after
-        * SYSRET.  This would cause an infinite loop whenever #DB happens
-        * with register state that satisfies the opportunistic SYSRET
-        * conditions.  For example, single-stepping this user code:
+        * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
+        * restore RF properly. If the slowpath sets it for whatever reason, we
+        * need to restore it correctly.
+        *
+        * SYSRET can restore TF, but unlike IRET, restoring TF results in a
+        * trap from userspace immediately after SYSRET.  This would cause an
+        * infinite loop whenever #DB happens with register state that satisfies
+        * the opportunistic SYSRET conditions.  For example, single-stepping
+        * this user code:
         *
         *           movq       $stuck_here, %rcx
         *           pushfq
@@ -601,9 +605,20 @@ apicinterrupt3 \num trace(\sym) smp_trace(\sym)
 .endm
 #endif
 
+/* Make sure APIC interrupt handlers end up in the irqentry section: */
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
+# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
+# define POP_SECTION_IRQENTRY  .popsection
+#else
+# define PUSH_SECTION_IRQENTRY
+# define POP_SECTION_IRQENTRY
+#endif
+
 .macro apicinterrupt num sym do_sym
+PUSH_SECTION_IRQENTRY
 apicinterrupt3 \num \sym \do_sym
 trace_apicinterrupt \num \sym
+POP_SECTION_IRQENTRY
 .endm
 
 #ifdef CONFIG_SMP
index e07a22bb9308d0d030f0052f2f8195a8fbb48df5..f5f4b3fbbbc2924cbac3fe24d45d949e0997dc8e 100644 (file)
@@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
   [PERF_COUNT_HW_CPU_CYCLES]                   = 0x0076,
   [PERF_COUNT_HW_INSTRUCTIONS]                 = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x0080,
-  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x0081,
+  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x077d,
+  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x077e,
   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]          = 0x00c2,
   [PERF_COUNT_HW_BRANCH_MISSES]                        = 0x00c3,
   [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]      = 0x00d0, /* "Decoder empty" event */
index e6131d4454e67005f91047d9e966243ae86aaf80..65577f081d072c5f774f763adc19a0e7a87e5627 100644 (file)
@@ -29,6 +29,8 @@
 
 #define COUNTER_SHIFT          16
 
+static HLIST_HEAD(uncore_unused_list);
+
 struct amd_uncore {
        int id;
        int refcnt;
@@ -39,7 +41,7 @@ struct amd_uncore {
        cpumask_t *active_mask;
        struct pmu *pmu;
        struct perf_event *events[MAX_COUNTERS];
-       struct amd_uncore *free_when_cpu_online;
+       struct hlist_node node;
 };
 
 static struct amd_uncore * __percpu *amd_uncore_nb;
@@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
                uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
                uncore_nb->active_mask = &amd_nb_active_mask;
                uncore_nb->pmu = &amd_nb_pmu;
+               uncore_nb->id = -1;
                *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
        }
 
@@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
                uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
                uncore_l2->active_mask = &amd_l2_active_mask;
                uncore_l2->pmu = &amd_l2_pmu;
+               uncore_l2->id = -1;
                *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
        }
 
@@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
                        continue;
 
                if (this->id == that->id) {
-                       that->free_when_cpu_online = this;
+                       hlist_add_head(&this->node, &uncore_unused_list);
                        this = that;
                        break;
                }
@@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
        return 0;
 }
 
+static void uncore_clean_online(void)
+{
+       struct amd_uncore *uncore;
+       struct hlist_node *n;
+
+       hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
+               hlist_del(&uncore->node);
+               kfree(uncore);
+       }
+}
+
 static void uncore_online(unsigned int cpu,
                          struct amd_uncore * __percpu *uncores)
 {
        struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
 
-       kfree(uncore->free_when_cpu_online);
-       uncore->free_when_cpu_online = NULL;
+       uncore_clean_online();
 
        if (cpu == uncore->cpu)
                cpumask_set_cpu(cpu, uncore->active_mask);
index 0a6e393a2e6298bb9240831714d5915ab26abc5a..982c9e31daca441cc66b0b7fc652f3b4251be7b0 100644 (file)
 struct bts_ctx {
        struct perf_output_handle       handle;
        struct debug_store              ds_back;
-       int                             started;
+       int                             state;
+};
+
+/* BTS context states: */
+enum {
+       /* no ongoing AUX transactions */
+       BTS_STATE_STOPPED = 0,
+       /* AUX transaction is on, BTS tracing is disabled */
+       BTS_STATE_INACTIVE,
+       /* AUX transaction is on, BTS tracing is running */
+       BTS_STATE_ACTIVE,
 };
 
 static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
@@ -204,6 +214,15 @@ static void bts_update(struct bts_ctx *bts)
 static int
 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
 
+/*
+ * Ordering PMU callbacks wrt themselves and the PMI is done by means
+ * of bts::state, which:
+ *  - is set when bts::handle::event is valid, that is, between
+ *    perf_aux_output_begin() and perf_aux_output_end();
+ *  - is zero otherwise;
+ *  - is ordered against bts::handle::event with a compiler barrier.
+ */
+
 static void __bts_event_start(struct perf_event *event)
 {
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
@@ -221,10 +240,13 @@ static void __bts_event_start(struct perf_event *event)
 
        /*
         * local barrier to make sure that ds configuration made it
-        * before we enable BTS
+        * before we enable BTS and bts::state goes ACTIVE
         */
        wmb();
 
+       /* INACTIVE/STOPPED -> ACTIVE */
+       WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
+
        intel_pmu_enable_bts(config);
 
 }
@@ -251,9 +273,6 @@ static void bts_event_start(struct perf_event *event, int flags)
 
        __bts_event_start(event);
 
-       /* PMI handler: this counter is running and likely generating PMIs */
-       ACCESS_ONCE(bts->started) = 1;
-
        return;
 
 fail_end_stop:
@@ -263,30 +282,34 @@ fail_stop:
        event->hw.state = PERF_HES_STOPPED;
 }
 
-static void __bts_event_stop(struct perf_event *event)
+static void __bts_event_stop(struct perf_event *event, int state)
 {
+       struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+       /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
+       WRITE_ONCE(bts->state, state);
+
        /*
         * No extra synchronization is mandated by the documentation to have
         * BTS data stores globally visible.
         */
        intel_pmu_disable_bts();
-
-       if (event->hw.state & PERF_HES_STOPPED)
-               return;
-
-       ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
 }
 
 static void bts_event_stop(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
-       struct bts_buffer *buf = perf_get_aux(&bts->handle);
+       struct bts_buffer *buf = NULL;
+       int state = READ_ONCE(bts->state);
 
-       /* PMI handler: don't restart this counter */
-       ACCESS_ONCE(bts->started) = 0;
+       if (state == BTS_STATE_ACTIVE)
+               __bts_event_stop(event, BTS_STATE_STOPPED);
 
-       __bts_event_stop(event);
+       if (state != BTS_STATE_STOPPED)
+               buf = perf_get_aux(&bts->handle);
+
+       event->hw.state |= PERF_HES_STOPPED;
 
        if (flags & PERF_EF_UPDATE) {
                bts_update(bts);
@@ -296,6 +319,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
                                bts->handle.head =
                                        local_xchg(&buf->data_size,
                                                   buf->nr_pages << PAGE_SHIFT);
+
                        perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
                                            !!local_xchg(&buf->lost, 0));
                }
@@ -310,8 +334,20 @@ static void bts_event_stop(struct perf_event *event, int flags)
 void intel_bts_enable_local(void)
 {
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+       int state = READ_ONCE(bts->state);
+
+       /*
+        * Here we transition from INACTIVE to ACTIVE;
+        * if we instead are STOPPED from the interrupt handler,
+        * stay that way. Can't be ACTIVE here though.
+        */
+       if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
+               return;
+
+       if (state == BTS_STATE_STOPPED)
+               return;
 
-       if (bts->handle.event && bts->started)
+       if (bts->handle.event)
                __bts_event_start(bts->handle.event);
 }
 
@@ -319,8 +355,15 @@ void intel_bts_disable_local(void)
 {
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
 
+       /*
+        * Here we transition from ACTIVE to INACTIVE;
+        * do nothing for STOPPED or INACTIVE.
+        */
+       if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
+               return;
+
        if (bts->handle.event)
-               __bts_event_stop(bts->handle.event);
+               __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
 }
 
 static int
@@ -335,8 +378,6 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
                return 0;
 
        head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
-       if (WARN_ON_ONCE(head != local_read(&buf->head)))
-               return -EINVAL;
 
        phys = &buf->buf[buf->cur_buf];
        space = phys->offset + phys->displacement + phys->size - head;
@@ -403,22 +444,37 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
 
 int intel_bts_interrupt(void)
 {
+       struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
        struct perf_event *event = bts->handle.event;
        struct bts_buffer *buf;
        s64 old_head;
-       int err;
+       int err = -ENOSPC, handled = 0;
 
-       if (!event || !bts->started)
-               return 0;
+       /*
+        * The only surefire way of knowing if this NMI is ours is by checking
+        * the write ptr against the PMI threshold.
+        */
+       if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
+               handled = 1;
+
+       /*
+        * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
+        * so we can only be INACTIVE or STOPPED
+        */
+       if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
+               return handled;
 
        buf = perf_get_aux(&bts->handle);
+       if (!buf)
+               return handled;
+
        /*
         * Skip snapshot counters: they don't use the interrupt, but
         * there's no other way of telling, because the pointer will
         * keep moving
         */
-       if (!buf || buf->snapshot)
+       if (buf->snapshot)
                return 0;
 
        old_head = local_read(&buf->head);
@@ -426,18 +482,27 @@ int intel_bts_interrupt(void)
 
        /* no new data */
        if (old_head == local_read(&buf->head))
-               return 0;
+               return handled;
 
        perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
                            !!local_xchg(&buf->lost, 0));
 
        buf = perf_aux_output_begin(&bts->handle, event);
-       if (!buf)
-               return 1;
+       if (buf)
+               err = bts_buffer_reset(buf, &bts->handle);
+
+       if (err) {
+               WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
 
-       err = bts_buffer_reset(buf, &bts->handle);
-       if (err)
-               perf_aux_output_end(&bts->handle, 0, false);
+               if (buf) {
+                       /*
+                        * BTS_STATE_STOPPED should be visible before
+                        * cleared handle::event
+                        */
+                       barrier();
+                       perf_aux_output_end(&bts->handle, 0, false);
+               }
+       }
 
        return 1;
 }
@@ -519,7 +584,8 @@ static __init int bts_init(void)
        if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
                return -ENODEV;
 
-       bts_pmu.capabilities    = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE;
+       bts_pmu.capabilities    = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
+                                 PERF_PMU_CAP_EXCLUSIVE;
        bts_pmu.task_ctx_nr     = perf_sw_context;
        bts_pmu.event_init      = bts_event_init;
        bts_pmu.add             = bts_event_add;
index 2cbde2f449aa8ced63adf14b14f9ceb3d464068c..4c9a79b9cd691ad8b44650ca8ea1b3b1295c0d3d 100644 (file)
@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
  * disabled state if called consecutively.
  *
  * During consecutive calls, the same disable value will be written to related
- * registers, so the PMU state remains unchanged. hw.state in
- * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
- * calls.
+ * registers, so the PMU state remains unchanged.
+ *
+ * intel_bts events don't coexist with intel PMU's BTS events because of
+ * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
+ * disabled around intel PMU's event batching etc, only inside the PMI handler.
  */
 static void __intel_pmu_disable_all(void)
 {
@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
 
        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
-       else
-               intel_bts_disable_local();
 
        intel_pmu_pebs_disable_all();
 }
@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
                        return;
 
                intel_pmu_enable_bts(event->hw.config);
-       } else
-               intel_bts_enable_local();
+       }
 }
 
 static void intel_pmu_enable_all(int added)
@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
         */
        if (!x86_pmu.late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
+       intel_bts_disable_local();
        __intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
        handled += intel_bts_interrupt();
@@ -2172,6 +2172,7 @@ done:
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
        if (cpuc->enabled)
                __intel_pmu_enable_all(0, true);
+       intel_bts_enable_local();
 
        /*
         * Only unmask the NMI after the overflow counters
index 783c49ddef29c2336d458d1741fbdde5f69fecec..8f82b02934fa701a451ee5e7d8f76193eaefc2fa 100644 (file)
@@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
 static void init_mbm_sample(u32 rmid, u32 evt_type);
 static void __intel_mbm_event_count(void *info);
 
+static bool is_cqm_event(int e)
+{
+       return (e == QOS_L3_OCCUP_EVENT_ID);
+}
+
 static bool is_mbm_event(int e)
 {
        return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
@@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
             (event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
                return -EINVAL;
 
+       if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
+           (is_mbm_event(event->attr.config) && !mbm_enabled))
+               return -EINVAL;
+
        /* unsupported modes and filters */
        if (event->attr.exclude_user   ||
            event->attr.exclude_kernel ||
index 7ce9f3f669e63d2bd4bbf15db801cf8bc5fa7565..9b983a474253768d172c5db3dee8a1d50098bf4f 100644 (file)
@@ -1274,18 +1274,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                struct pebs_record_nhm *p = at;
                u64 pebs_status;
 
-               /* PEBS v3 has accurate status bits */
+               pebs_status = p->status & cpuc->pebs_enabled;
+               pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+
+               /* PEBS v3 has more accurate status bits */
                if (x86_pmu.intel_cap.pebs_format >= 3) {
-                       for_each_set_bit(bit, (unsigned long *)&p->status,
-                                        MAX_PEBS_EVENTS)
+                       for_each_set_bit(bit, (unsigned long *)&pebs_status,
+                                        x86_pmu.max_pebs_events)
                                counts[bit]++;
 
                        continue;
                }
 
-               pebs_status = p->status & cpuc->pebs_enabled;
-               pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
-
                /*
                 * On some CPUs the PEBS status can be zero when PEBS is
                 * racing with clearing of GLOBAL_STATUS.
@@ -1333,8 +1333,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                        continue;
 
                event = cpuc->events[bit];
-               WARN_ON_ONCE(!event);
-               WARN_ON_ONCE(!event->attr.precise_ip);
+               if (WARN_ON_ONCE(!event))
+                       continue;
+
+               if (WARN_ON_ONCE(!event->attr.precise_ip))
+                       continue;
 
                /* log dropped samples number */
                if (error[bit])
index 04bb5fb5a8d7a13308fdcc20d2c1a5930146f07f..861a7d9cb60f6c1c19b560a4912c7eef1effd8b3 100644 (file)
@@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
        event->hw.addr_filters = NULL;
 }
 
+static inline bool valid_kernel_ip(unsigned long ip)
+{
+       return virt_addr_valid(ip) && kernel_ip(ip);
+}
+
 static int pt_event_addr_filters_validate(struct list_head *filters)
 {
        struct perf_addr_filter *filter;
@@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
 
        list_for_each_entry(filter, filters, entry) {
                /* PT doesn't support single address triggers */
-               if (!filter->range)
+               if (!filter->range || !filter->size)
                        return -EOPNOTSUPP;
 
-               if (!filter->inode && !kernel_ip(filter->offset))
-                       return -EINVAL;
+               if (!filter->inode) {
+                       if (!valid_kernel_ip(filter->offset))
+                               return -EINVAL;
+
+                       if (!valid_kernel_ip(filter->offset + filter->size))
+                               return -EINVAL;
+               }
 
                if (++range > pt_cap_get(PT_CAP_num_address_ranges))
                        return -EOPNOTSUPP;
@@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
                } else {
                        /* apply the offset */
                        msr_a = filter->offset + offs[range];
-                       msr_b = filter->size + msr_a;
+                       msr_b = filter->size + msr_a - 1;
                }
 
                filters->filter[range].msr_a  = msr_a;
index 97a69dbba649b6bb7c6dd936440f572c5d02ed6f..9d35ec0cb8fc916ba3b4b63f5bdb1b6ebda5de55 100644 (file)
@@ -100,6 +100,12 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
        }
 }
 
+static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+               SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
+}
+
 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
 {
        if (box->pmu->pmu_idx == 0)
@@ -127,6 +133,7 @@ static struct attribute_group snb_uncore_format_group = {
 
 static struct intel_uncore_ops snb_uncore_msr_ops = {
        .init_box       = snb_uncore_msr_init_box,
+       .enable_box     = snb_uncore_msr_enable_box,
        .exit_box       = snb_uncore_msr_exit_box,
        .disable_event  = snb_uncore_msr_disable_event,
        .enable_event   = snb_uncore_msr_enable_event,
@@ -192,6 +199,12 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
        }
 }
 
+static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
+               SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
+}
+
 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
 {
        if (box->pmu->pmu_idx == 0)
@@ -200,6 +213,7 @@ static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
 
 static struct intel_uncore_ops skl_uncore_msr_ops = {
        .init_box       = skl_uncore_msr_init_box,
+       .enable_box     = skl_uncore_msr_enable_box,
        .exit_box       = skl_uncore_msr_exit_box,
        .disable_event  = snb_uncore_msr_disable_event,
        .enable_event   = snb_uncore_msr_enable_event,
index 824e54086e071456b170380c52e561dbbac62cf7..8aee83bcf71f2dc5a380009957d0858a7b4a2507 100644 (file)
@@ -2626,7 +2626,7 @@ void hswep_uncore_cpu_init(void)
 
 static struct intel_uncore_type hswep_uncore_ha = {
        .name           = "ha",
-       .num_counters   = 5,
+       .num_counters   = 4,
        .num_boxes      = 2,
        .perf_ctr_bits  = 48,
        SNBEP_UNCORE_PCI_COMMON_INIT(),
@@ -2645,7 +2645,7 @@ static struct uncore_event_desc hswep_uncore_imc_events[] = {
 
 static struct intel_uncore_type hswep_uncore_imc = {
        .name           = "imc",
-       .num_counters   = 5,
+       .num_counters   = 4,
        .num_boxes      = 8,
        .perf_ctr_bits  = 48,
        .fixed_ctr_bits = 48,
@@ -2691,7 +2691,7 @@ static struct intel_uncore_type hswep_uncore_irp = {
 
 static struct intel_uncore_type hswep_uncore_qpi = {
        .name                   = "qpi",
-       .num_counters           = 5,
+       .num_counters           = 4,
        .num_boxes              = 3,
        .perf_ctr_bits          = 48,
        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
@@ -2773,7 +2773,7 @@ static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
 
 static struct intel_uncore_type hswep_uncore_r3qpi = {
        .name           = "r3qpi",
-       .num_counters   = 4,
+       .num_counters   = 3,
        .num_boxes      = 3,
        .perf_ctr_bits  = 44,
        .constraints    = hswep_uncore_r3qpi_constraints,
@@ -2972,7 +2972,7 @@ static struct intel_uncore_type bdx_uncore_ha = {
 
 static struct intel_uncore_type bdx_uncore_imc = {
        .name           = "imc",
-       .num_counters   = 5,
+       .num_counters   = 4,
        .num_boxes      = 8,
        .perf_ctr_bits  = 48,
        .fixed_ctr_bits = 48,
index f5befd4945f2a84d4123ce47d2c68d5bab1a0a88..124357773ffade4a78766e3cd021d928fa4f5866 100644 (file)
@@ -135,6 +135,7 @@ extern void init_apic_mappings(void);
 void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
 extern void setup_secondary_APIC_clock(void);
+extern void lapic_update_tsc_freq(void);
 extern int APIC_init_uniprocessor(void);
 
 #ifdef CONFIG_X86_64
@@ -170,6 +171,7 @@ static inline void init_apic_mappings(void) { }
 static inline void disable_local_APIC(void) { }
 # define setup_boot_APIC_clock x86_init_noop
 # define setup_secondary_APIC_clock x86_init_noop
+static inline void lapic_update_tsc_freq(void) { }
 #endif /* !CONFIG_X86_LOCAL_APIC */
 
 #ifdef CONFIG_X86_X2APIC
index 7178043b0e1dd69d20a6ff5ddaa37ee6c32841f8..59405a248fc2488c66259e5669c1f857b13954aa 100644 (file)
@@ -22,10 +22,6 @@ typedef struct {
 #ifdef CONFIG_SMP
        unsigned int irq_resched_count;
        unsigned int irq_call_count;
-       /*
-        * irq_tlb_count is double-counted in irq_call_count, so it must be
-        * subtracted from irq_call_count when displaying irq_call_count
-        */
        unsigned int irq_tlb_count;
 #endif
 #ifdef CONFIG_X86_THERMAL_VECTOR
index 223042086f4e9aa29498d3f159aaa2b757fa766a..737da62bfeb095e875912b18e5732b7ce04cb18e 100644 (file)
@@ -5,10 +5,10 @@ struct x86_mapping_info {
        void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
        void *context;                   /* context for alloc_pgt_page */
        unsigned long pmd_flag;          /* page flag for PMD entry */
-       bool kernel_mapping;             /* kernel mapping or ident mapping */
+       unsigned long offset;            /* ident mapping offset */
 };
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-                               unsigned long addr, unsigned long end);
+                               unsigned long pstart, unsigned long pend);
 
 #endif /* _ASM_X86_INIT_H */
index 7e8ec7ae10faff67a1c6fd26314ed53be3ed598f..1cc82ece9ac1819b92ec82aca72805c0e966af97 100644 (file)
@@ -145,7 +145,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
  *
  * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2|1|0| <- bit number
  * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
- * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry
+ * | OFFSET (14->63) | TYPE (9-13)  |0|X|X|X| X| X|X|X|0| <- swp entry
  *
  * G (8) is aliased and used as a PROT_NONE indicator for
  * !present ptes.  We need to start storing swap entries above
@@ -156,7 +156,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
 #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
 #define SWP_TYPE_BITS 5
 /* Place the offset above the type: */
-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1)
+#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
 
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
 
index 9c6b890d5e7a0733ed7e92f1d3f1b1aa81bd349f..b2988c0ed82961c043b682d44b829c6db00a1e46 100644 (file)
@@ -58,7 +58,15 @@ extern unsigned char boot_gdt[];
 extern unsigned char secondary_startup_64[];
 #endif
 
+static inline size_t real_mode_size_needed(void)
+{
+       if (real_mode_header)
+               return 0;       /* already allocated. */
+
+       return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
+}
+
+void set_real_mode_mem(phys_addr_t mem, size_t size);
 void reserve_real_mode(void);
-void setup_real_mode(void);
 
 #endif /* _ARCH_X86_REALMODE_H */
index 84b59846154a92be76ebd630a79283c9d99b2a39..8b7c8d8e0852cf50d5a03e24f89ae9c693a1891c 100644 (file)
@@ -176,6 +176,50 @@ static inline unsigned long current_stack_pointer(void)
        return sp;
 }
 
+/*
+ * Walks up the stack frames to make sure that the specified object is
+ * entirely contained by a single stack frame.
+ *
+ * Returns:
+ *              1 if within a frame
+ *             -1 if placed across a frame boundary (or outside stack)
+ *              0 unable to determine (no frame pointers, etc)
+ */
+static inline int arch_within_stack_frames(const void * const stack,
+                                          const void * const stackend,
+                                          const void *obj, unsigned long len)
+{
+#if defined(CONFIG_FRAME_POINTER)
+       const void *frame = NULL;
+       const void *oldframe;
+
+       oldframe = __builtin_frame_address(1);
+       if (oldframe)
+               frame = __builtin_frame_address(2);
+       /*
+        * low ----------------------------------------------> high
+        * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+        *                     ^----------------^
+        *               allow copies only within here
+        */
+       while (stack <= frame && frame < stackend) {
+               /*
+                * If obj + len extends past the last frame, this
+                * check won't pass and the next frame will be 0,
+                * causing us to bail out and correctly report
+                * the copy as invalid.
+                */
+               if (obj + len <= frame)
+                       return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
+               oldframe = frame;
+               frame = *(const void * const *)frame;
+       }
+       return -1;
+#else
+       return 0;
+#endif
+}
+
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
index 4e5be94e079a6c64353bd327c9fe4ef9796e16b1..6fa85944af83d8ddbbad3a344a31a7920e64e6d0 100644 (file)
@@ -135,7 +135,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
 
 static inline void __native_flush_tlb(void)
 {
+       /*
+        * If current->mm == NULL then we borrow a mm which may change during a
+        * task switch and therefore we must not be preempted while we write CR3
+        * back:
+        */
+       preempt_disable();
        native_write_cr3(native_read_cr3());
+       preempt_enable();
 }
 
 static inline void __native_flush_tlb_global_irq_disabled(void)
index c03bfb68c50352df52d6ae4e36fa54bdc22dbc50..2131c4ce7d8a10698c45626e2781dd8f1cb2f9b9 100644 (file)
@@ -433,7 +433,11 @@ do {                                                                       \
 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                        \
        asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
                     "2:\n"                                             \
-                    _ASM_EXTABLE_EX(1b, 2b)                            \
+                    ".section .fixup,\"ax\"\n"                         \
+                     "3:xor"itype" %"rtype"0,%"rtype"0\n"              \
+                    "  jmp 2b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE_EX(1b, 3b)                            \
                     : ltype(x) : "m" (__m(addr)))
 
 #define __put_user_nocheck(x, ptr, size)                       \
@@ -697,44 +701,15 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
 unsigned long __must_check _copy_to_user(void __user *to, const void *from,
                                         unsigned n);
 
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-# define copy_user_diag __compiletime_error
-#else
-# define copy_user_diag __compiletime_warning
-#endif
-
-extern void copy_user_diag("copy_from_user() buffer size is too small")
-copy_from_user_overflow(void);
-extern void copy_user_diag("copy_to_user() buffer size is too small")
-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-
-#undef copy_user_diag
-
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-
-extern void
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
-
-extern void
-__compiletime_warning("copy_to_user() buffer size is not provably correct")
-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
-
-#else
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
 
-static inline void
-__copy_from_user_overflow(int size, unsigned long count)
+static inline void copy_user_overflow(int size, unsigned long count)
 {
        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 }
 
-#define __copy_to_user_overflow __copy_from_user_overflow
-
-#endif
-
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        int sz = __compiletime_object_size(to);
@@ -743,35 +718,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
 
        kasan_check_write(to, n);
 
-       /*
-        * While we would like to have the compiler do the checking for us
-        * even in the non-constant size case, any false positives there are
-        * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
-        * without - the [hopefully] dangerous looking nature of the warning
-        * would make people go look at the respecitive call sites over and
-        * over again just to find that there's no problem).
-        *
-        * And there are cases where it's just not realistic for the compiler
-        * to prove the count to be in range. For example when multiple call
-        * sites of a helper function - perhaps in different source files -
-        * all doing proper range checking, yet the helper function not doing
-        * so again.
-        *
-        * Therefore limit the compile time checking to the constant size
-        * case, and do only runtime checking for non-constant sizes.
-        */
-
-       if (likely(sz < 0 || sz >= n))
+       if (likely(sz < 0 || sz >= n)) {
+               check_object_size(to, n, false);
                n = _copy_from_user(to, from, n);
-       else if(__builtin_constant_p(n))
-               copy_from_user_overflow();
+       } else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               __copy_from_user_overflow(sz, n);
+               __bad_copy_user();
 
        return n;
 }
 
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        int sz = __compiletime_object_size(from);
@@ -780,20 +738,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 
        might_fault();
 
-       /* See the comment in copy_from_user() above. */
-       if (likely(sz < 0 || sz >= n))
+       if (likely(sz < 0 || sz >= n)) {
+               check_object_size(from, n, true);
                n = _copy_to_user(to, from, n);
-       else if(__builtin_constant_p(n))
-               copy_to_user_overflow();
+       } else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               __copy_to_user_overflow(sz, n);
+               __bad_copy_user();
 
        return n;
 }
 
-#undef __copy_from_user_overflow
-#undef __copy_to_user_overflow
-
 /*
  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
  * nested NMI paths are careful to preserve CR2.
@@ -812,21 +767,21 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 #define user_access_begin()    __uaccess_begin()
 #define user_access_end()      __uaccess_end()
 
-#define unsafe_put_user(x, ptr)                                                \
-({                                                                             \
+#define unsafe_put_user(x, ptr, err_label)                                     \
+do {                                                                           \
        int __pu_err;                                                           \
        __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
-       __builtin_expect(__pu_err, 0);                                          \
-})
+       if (unlikely(__pu_err)) goto err_label;                                 \
+} while (0)
 
-#define unsafe_get_user(x, ptr)                                                \
-({                                                                             \
+#define unsafe_get_user(x, ptr, err_label)                                     \
+do {                                                                           \
        int __gu_err;                                                           \
        unsigned long __gu_val;                                                 \
        __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
-       __builtin_expect(__gu_err, 0);                                          \
-})
+       if (unlikely(__gu_err)) goto err_label;                                 \
+} while (0)
 
 #endif /* _ASM_X86_UACCESS_H */
 
index 4b32da24faaf1cb21fe3d4450a425c5193ee4b97..7d3bdd1ed6977b5e1f69dc8ba3e3d6cfa8f861a3 100644 (file)
@@ -37,6 +37,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
 static __always_inline unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
        return __copy_to_user_ll(to, from, n);
 }
 
@@ -95,6 +96,7 @@ static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        might_fault();
+       check_object_size(to, n, false);
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
index 2eac2aa3e37f30318f6e20ed1753ae29d472f251..673059a109fee067a6739704470f947fd7acae57 100644 (file)
@@ -54,6 +54,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
 {
        int ret = 0;
 
+       check_object_size(dst, size, false);
        if (!__builtin_constant_p(size))
                return copy_user_generic(dst, (__force void *)src, size);
        switch (size) {
@@ -119,6 +120,7 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
 {
        int ret = 0;
 
+       check_object_size(src, size, true);
        if (!__builtin_constant_p(size))
                return copy_user_generic((__force void *)dst, src, size);
        switch (size) {
index c852590254d5f4191609f92fce8ee488896efe8b..e652a7cc61863667fad8ff2baa0a7242485600df 100644 (file)
@@ -79,7 +79,7 @@ struct uv_gam_range_entry {
        u16     nasid;          /* HNasid */
        u16     sockid;         /* Socket ID, high bits of APIC ID */
        u16     pnode;          /* Index to MMR and GRU spaces */
-       u32     pxm;            /* ACPI proximity domain number */
+       u32     unused2;
        u32     limit;          /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */
 };
 
@@ -88,7 +88,8 @@ struct uv_gam_range_entry {
 #define        UV_SYSTAB_VERSION_UV4           0x400   /* UV4 BIOS base version */
 #define        UV_SYSTAB_VERSION_UV4_1         0x401   /* + gpa_shift */
 #define        UV_SYSTAB_VERSION_UV4_2         0x402   /* + TYPE_NVRAM/WINDOW/MBOX */
-#define        UV_SYSTAB_VERSION_UV4_LATEST    UV_SYSTAB_VERSION_UV4_2
+#define        UV_SYSTAB_VERSION_UV4_3         0x403   /* - GAM Range PXM Value */
+#define        UV_SYSTAB_VERSION_UV4_LATEST    UV_SYSTAB_VERSION_UV4_3
 
 #define        UV_SYSTAB_TYPE_UNUSED           0       /* End of table (offset == 0) */
 #define        UV_SYSTAB_TYPE_GAM_PARAMS       1       /* GAM PARAM conversions */
index 20abd912f0e4a6bbaa364dd97b388d0dc3da136a..f3e9b2df4b165d9d1a31322960778a2e9ef68bf3 100644 (file)
@@ -313,7 +313,7 @@ int lapic_get_maxlvt(void)
 
 /* Clock divisor */
 #define APIC_DIVISOR 16
-#define TSC_DIVISOR  32
+#define TSC_DIVISOR  8
 
 /*
  * This function sets up the local APIC timer, with a timeout of
@@ -565,12 +565,36 @@ static void setup_APIC_timer(void)
                                    CLOCK_EVT_FEAT_DUMMY);
                levt->set_next_event = lapic_next_deadline;
                clockevents_config_and_register(levt,
-                                               (tsc_khz / TSC_DIVISOR) * 1000,
+                                               tsc_khz * (1000 / TSC_DIVISOR),
                                                0xF, ~0UL);
        } else
                clockevents_register_device(levt);
 }
 
+/*
+ * Install the updated TSC frequency from recalibration at the TSC
+ * deadline clockevent devices.
+ */
+static void __lapic_update_tsc_freq(void *info)
+{
+       struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
+
+       if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+               return;
+
+       clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
+}
+
+void lapic_update_tsc_freq(void)
+{
+       /*
+        * The clockevent device's ->mult and ->shift can both be
+        * changed. In order to avoid races, schedule the frequency
+        * update code on each CPU.
+        */
+       on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
+}
+
 /*
  * In this functions we calibrate APIC bus clocks to the external timer.
  *
@@ -1599,6 +1623,9 @@ void __init enable_IR_x2apic(void)
        unsigned long flags;
        int ret, ir_stat;
 
+       if (skip_ioapic_setup)
+               return;
+
        ir_stat = irq_remapping_prepare();
        if (ir_stat < 0 && !x2apic_supported())
                return;
@@ -2066,7 +2093,6 @@ int generic_processor_info(int apicid, int version)
                return -EINVAL;
        }
 
-       num_processors++;
        if (apicid == boot_cpu_physical_apicid) {
                /*
                 * x86_bios_cpu_apicid is required to have processors listed
@@ -2089,10 +2115,13 @@ int generic_processor_info(int apicid, int version)
 
                pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
                           thiscpu, apicid);
+
                disabled_cpus++;
                return -ENOSPC;
        }
 
+       num_processors++;
+
        /*
         * Validate version
         */
index 6368fa69d2afa0eb44c5e90fe5293c4ceeafa93c..54f35d988025b6d867f4915d5d4aeecf9461f352 100644 (file)
@@ -155,7 +155,7 @@ static void init_x2apic_ldr(void)
 /*
  * At CPU state changes, update the x2apic cluster sibling info.
  */
-int x2apic_prepare_cpu(unsigned int cpu)
+static int x2apic_prepare_cpu(unsigned int cpu)
 {
        if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
                return -ENOMEM;
@@ -168,7 +168,7 @@ int x2apic_prepare_cpu(unsigned int cpu)
        return 0;
 }
 
-int x2apic_dead_cpu(unsigned int this_cpu)
+static int x2apic_dead_cpu(unsigned int this_cpu)
 {
        int cpu;
 
@@ -186,13 +186,18 @@ int x2apic_dead_cpu(unsigned int this_cpu)
 static int x2apic_cluster_probe(void)
 {
        int cpu = smp_processor_id();
+       int ret;
 
        if (!x2apic_mode)
                return 0;
 
+       ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
+                               x2apic_prepare_cpu, x2apic_dead_cpu);
+       if (ret < 0) {
+               pr_err("Failed to register X2APIC_PREPARE\n");
+               return 0;
+       }
        cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
-       cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
-                         x2apic_prepare_cpu, x2apic_dead_cpu);
        return 1;
 }
 
index 09b59adaea3f5499c1c3da0473e79f8f40695037..cb0673c1e940a2fe0e92f04304972f83aad3bacb 100644 (file)
@@ -223,6 +223,11 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
        if (strncmp(oem_id, "SGI", 3) != 0)
                return 0;
 
+       if (numa_off) {
+               pr_err("UV: NUMA is off, disabling UV support\n");
+               return 0;
+       }
+
        /* Setup early hub type field in uv_hub_info for Node 0 */
        uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
 
@@ -325,7 +330,7 @@ static __init void build_uv_gr_table(void)
        struct uv_gam_range_entry *gre = uv_gre_table;
        struct uv_gam_range_s *grt;
        unsigned long last_limit = 0, ram_limit = 0;
-       int bytes, i, sid, lsid = -1;
+       int bytes, i, sid, lsid = -1, indx = 0, lindx = -1;
 
        if (!gre)
                return;
@@ -356,11 +361,12 @@ static __init void build_uv_gr_table(void)
                }
                sid = gre->sockid - _min_socket;
                if (lsid < sid) {               /* new range */
-                       grt = &_gr_table[sid];
-                       grt->base = lsid;
+                       grt = &_gr_table[indx];
+                       grt->base = lindx;
                        grt->nasid = gre->nasid;
                        grt->limit = last_limit = gre->limit;
                        lsid = sid;
+                       lindx = indx++;
                        continue;
                }
                if (lsid == sid && !ram_limit) {        /* update range */
@@ -371,7 +377,7 @@ static __init void build_uv_gr_table(void)
                }
                if (!ram_limit) {               /* non-contiguous ram range */
                        grt++;
-                       grt->base = sid - 1;
+                       grt->base = lindx;
                        grt->nasid = gre->nasid;
                        grt->limit = last_limit = gre->limit;
                        continue;
@@ -1155,19 +1161,18 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
        for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
                if (!index) {
                        pr_info("UV: GAM Range Table...\n");
-                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s %3s\n",
+                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n",
                                "Range", "", "Size", "Type", "NASID",
-                               "SID", "PN", "PXM");
+                               "SID", "PN");
                }
                pr_info(
-               "UV: %2d: 0x%014lx-0x%014lx %5luG %3d   %04x  %02x %02x %3d\n",
+               "UV: %2d: 0x%014lx-0x%014lx %5luG %3d   %04x  %02x %02x\n",
                        index++,
                        (unsigned long)lgre << UV_GAM_RANGE_SHFT,
                        (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
                        ((unsigned long)(gre->limit - lgre)) >>
                                (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
-                       gre->type, gre->nasid, gre->sockid,
-                       gre->pnode, gre->pxm);
+                       gre->type, gre->nasid, gre->sockid, gre->pnode);
 
                lgre = gre->limit;
                if (sock_min > gre->sockid)
@@ -1286,7 +1291,7 @@ static void __init build_socket_tables(void)
                _pnode_to_socket[i] = SOCK_EMPTY;
 
        /* fill in pnode/node/addr conversion list values */
-       pr_info("UV: GAM Building socket/pnode/pxm conversion tables\n");
+       pr_info("UV: GAM Building socket/pnode conversion tables\n");
        for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
                if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
                        continue;
@@ -1294,20 +1299,18 @@ static void __init build_socket_tables(void)
                if (_socket_to_pnode[i] != SOCK_EMPTY)
                        continue;       /* duplicate */
                _socket_to_pnode[i] = gre->pnode;
-               _socket_to_node[i] = gre->pxm;
 
                i = gre->pnode - minpnode;
                _pnode_to_socket[i] = gre->sockid;
 
                pr_info(
-               "UV: sid:%02x type:%d nasid:%04x pn:%02x pxm:%2d pn2s:%2x\n",
+               "UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
                        gre->sockid, gre->type, gre->nasid,
                        _socket_to_pnode[gre->sockid - minsock],
-                       _socket_to_node[gre->sockid - minsock],
                        _pnode_to_socket[gre->pnode - minpnode]);
        }
 
-       /* check socket -> node values */
+       /* Set socket -> node values */
        lnid = -1;
        for_each_present_cpu(cpu) {
                int nid = cpu_to_node(cpu);
@@ -1318,14 +1321,9 @@ static void __init build_socket_tables(void)
                lnid = nid;
                apicid = per_cpu(x86_cpu_to_apicid, cpu);
                sockid = apicid >> uv_cpuid.socketid_shift;
-               i = sockid - minsock;
-
-               if (nid != _socket_to_node[i]) {
-                       pr_warn(
-                       "UV: %02x: type:%d socket:%02x PXM:%02x != node:%2d\n",
-                               i, sockid, gre->type, _socket_to_node[i], nid);
-                       _socket_to_node[i] = nid;
-               }
+               _socket_to_node[sockid - minsock] = nid;
+               pr_info("UV: sid:%02x: apicid:%04x node:%2d\n",
+                       sockid, apicid, nid);
        }
 
        /* Setup physical blade to pnode translation from GAM Range Table */
index f5c69d8974e176e44a995bd6f91512c7d030b001..b81fe2d63e15751c2cb7e61fd10dc85cc7f906b0 100644 (file)
@@ -669,6 +669,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
                set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 }
 
+#define MSR_AMD64_DE_CFG       0xC0011029
+
+static void init_amd_ln(struct cpuinfo_x86 *c)
+{
+       /*
+        * Apply erratum 665 fix unconditionally so machines without a BIOS
+        * fix work.
+        */
+       msr_set_bit(MSR_AMD64_DE_CFG, 31);
+}
+
 static void init_amd_bd(struct cpuinfo_x86 *c)
 {
        u64 value;
@@ -726,6 +737,7 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 6:    init_amd_k7(c); break;
        case 0xf:  init_amd_k8(c); break;
        case 0x10: init_amd_gh(c); break;
+       case 0x12: init_amd_ln(c); break;
        case 0x15: init_amd_bd(c); break;
        }
 
index 27a0228c9cae0dd32d2d5aff092a5b39162d6593..620ab06bcf4571c8841b70e35a57657dda2e9985 100644 (file)
@@ -54,6 +54,7 @@ static LIST_HEAD(pcache);
  */
 static u8 *container;
 static size_t container_size;
+static bool ucode_builtin;
 
 static u32 ucode_new_rev;
 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
@@ -281,18 +282,22 @@ static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
 void __init load_ucode_amd_bsp(unsigned int family)
 {
        struct cpio_data cp;
+       bool *builtin;
        void **data;
        size_t *size;
 
 #ifdef CONFIG_X86_32
        data =  (void **)__pa_nodebug(&ucode_cpio.data);
        size = (size_t *)__pa_nodebug(&ucode_cpio.size);
+       builtin = (bool *)__pa_nodebug(&ucode_builtin);
 #else
        data = &ucode_cpio.data;
        size = &ucode_cpio.size;
+       builtin = &ucode_builtin;
 #endif
 
-       if (!load_builtin_amd_microcode(&cp, family))
+       *builtin = load_builtin_amd_microcode(&cp, family);
+       if (!*builtin)
                cp = find_ucode_in_initrd();
 
        if (!(cp.data && cp.size))
@@ -355,6 +360,7 @@ void load_ucode_amd_ap(void)
        unsigned int cpu = smp_processor_id();
        struct equiv_cpu_entry *eq;
        struct microcode_amd *mc;
+       u8 *cont = container;
        u32 rev, eax;
        u16 eq_id;
 
@@ -371,8 +377,12 @@ void load_ucode_amd_ap(void)
        if (check_current_patch_level(&rev, false))
                return;
 
+       /* Add CONFIG_RANDOMIZE_MEMORY offset. */
+       if (!ucode_builtin)
+               cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+
        eax = cpuid_eax(0x00000001);
-       eq  = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
+       eq  = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
 
        eq_id = find_equiv_id(eq, eax);
        if (!eq_id)
@@ -434,6 +444,10 @@ int __init save_microcode_in_initrd_amd(void)
        else
                container = cont_va;
 
+       /* Add CONFIG_RANDOMIZE_MEMORY offset. */
+       if (!ucode_builtin)
+               container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+
        eax   = cpuid_eax(0x00000001);
        eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
 
index de7501edb21ce5de48db107191cd87a7c2515c65..f306698a4cb4594137e471c4e85c3eb767ea1af0 100644 (file)
@@ -317,16 +317,11 @@ static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
 static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
                                           size_t stolen_size)
 {
-       u16 toud;
+       u16 toud = 0;
 
-       /*
-        * FIXME is the graphics stolen memory region
-        * always at TOUD? Ie. is it always the last
-        * one to be allocated by the BIOS?
-        */
        toud = read_pci_config_16(0, 0, 0, I865_TOUD);
 
-       return (phys_addr_t)toud << 16;
+       return (phys_addr_t)(toud << 16) + i845_tseg_size();
 }
 
 static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
@@ -512,8 +507,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
        INTEL_I915GM_IDS(&gen3_early_ops),
        INTEL_I945G_IDS(&gen3_early_ops),
        INTEL_I945GM_IDS(&gen3_early_ops),
-       INTEL_VLV_M_IDS(&gen6_early_ops),
-       INTEL_VLV_D_IDS(&gen6_early_ops),
+       INTEL_VLV_IDS(&gen6_early_ops),
        INTEL_PINEVIEW_IDS(&gen3_early_ops),
        INTEL_I965G_IDS(&gen3_early_ops),
        INTEL_G33_IDS(&gen3_early_ops),
@@ -526,10 +520,8 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
        INTEL_SNB_M_IDS(&gen6_early_ops),
        INTEL_IVB_M_IDS(&gen6_early_ops),
        INTEL_IVB_D_IDS(&gen6_early_ops),
-       INTEL_HSW_D_IDS(&gen6_early_ops),
-       INTEL_HSW_M_IDS(&gen6_early_ops),
-       INTEL_BDW_M_IDS(&gen8_early_ops),
-       INTEL_BDW_D_IDS(&gen8_early_ops),
+       INTEL_HSW_IDS(&gen6_early_ops),
+       INTEL_BDW_IDS(&gen8_early_ops),
        INTEL_CHV_IDS(&chv_early_ops),
        INTEL_SKL_IDS(&gen9_early_ops),
        INTEL_BXT_IDS(&gen9_early_ops),
index 680049aa4593ca773d9860a2b8af77eab3839f31..01567aa87503f021cfb3b4104520842b1ec87179 100644 (file)
@@ -866,105 +866,17 @@ const void *get_xsave_field_ptr(int xsave_state)
        return get_xsave_addr(&fpu->state.xsave, xsave_state);
 }
 
-
-/*
- * Set xfeatures (aka XSTATE_BV) bit for a feature that we want
- * to take out of its "init state".  This will ensure that an
- * XRSTOR actually restores the state.
- */
-static void fpu__xfeature_set_non_init(struct xregs_state *xsave,
-               int xstate_feature_mask)
-{
-       xsave->header.xfeatures |= xstate_feature_mask;
-}
-
-/*
- * This function is safe to call whether the FPU is in use or not.
- *
- * Note that this only works on the current task.
- *
- * Inputs:
- *     @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
- *     XFEATURE_MASK_SSE, etc...)
- *     @xsave_state_ptr: a pointer to a copy of the state that you would
- *     like written in to the current task's FPU xsave state.  This pointer
- *     must not be located in the current tasks's xsave area.
- * Output:
- *     address of the state in the xsave area or NULL if the state
- *     is not present or is in its 'init state'.
- */
-static void fpu__xfeature_set_state(int xstate_feature_mask,
-               void *xstate_feature_src, size_t len)
-{
-       struct xregs_state *xsave = &current->thread.fpu.state.xsave;
-       struct fpu *fpu = &current->thread.fpu;
-       void *dst;
-
-       if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
-               WARN_ONCE(1, "%s() attempted with no xsave support", __func__);
-               return;
-       }
-
-       /*
-        * Tell the FPU code that we need the FPU state to be in
-        * 'fpu' (not in the registers), and that we need it to
-        * be stable while we write to it.
-        */
-       fpu__current_fpstate_write_begin();
-
-       /*
-        * This method *WILL* *NOT* work for compact-format
-        * buffers.  If the 'xstate_feature_mask' is unset in
-        * xcomp_bv then we may need to move other feature state
-        * "up" in the buffer.
-        */
-       if (xsave->header.xcomp_bv & xstate_feature_mask) {
-               WARN_ON_ONCE(1);
-               goto out;
-       }
-
-       /* find the location in the xsave buffer of the desired state */
-       dst = __raw_xsave_addr(&fpu->state.xsave, xstate_feature_mask);
-
-       /*
-        * Make sure that the pointer being passed in did not
-        * come from the xsave buffer itself.
-        */
-       WARN_ONCE(xstate_feature_src == dst, "set from xsave buffer itself");
-
-       /* put the caller-provided data in the location */
-       memcpy(dst, xstate_feature_src, len);
-
-       /*
-        * Mark the xfeature so that the CPU knows there is state
-        * in the buffer now.
-        */
-       fpu__xfeature_set_non_init(xsave, xstate_feature_mask);
-out:
-       /*
-        * We are done writing to the 'fpu'.  Reenable preeption
-        * and (possibly) move the fpstate back in to the fpregs.
-        */
-       fpu__current_fpstate_write_end();
-}
-
 #define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
 #define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
 
 /*
- * This will go out and modify the XSAVE buffer so that PKRU is
- * set to a particular state for access to 'pkey'.
- *
- * PKRU state does affect kernel access to user memory.  We do
- * not modfiy PKRU *itself* here, only the XSAVE state that will
- * be restored in to PKRU when we return back to userspace.
+ * This will go out and modify PKRU register to set the access
+ * rights for @pkey to @init_val.
  */
 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
                unsigned long init_val)
 {
-       struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
-       struct pkru_state *old_pkru_state;
-       struct pkru_state new_pkru_state;
+       u32 old_pkru;
        int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
        u32 new_pkru_bits = 0;
 
@@ -974,6 +886,15 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
         */
        if (!boot_cpu_has(X86_FEATURE_OSPKE))
                return -EINVAL;
+       /*
+        * For most XSAVE components, this would be an arduous task:
+        * brining fpstate up to date with fpregs, updating fpstate,
+        * then re-populating fpregs.  But, for components that are
+        * never lazily managed, we can just access the fpregs
+        * directly.  PKRU is never managed lazily, so we can just
+        * manipulate it directly.  Make sure it stays that way.
+        */
+       WARN_ON_ONCE(!use_eager_fpu());
 
        /* Set the bits we need in PKRU:  */
        if (init_val & PKEY_DISABLE_ACCESS)
@@ -984,37 +905,12 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
        /* Shift the bits in to the correct place in PKRU for pkey: */
        new_pkru_bits <<= pkey_shift;
 
-       /* Locate old copy of the state in the xsave buffer: */
-       old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU);
-
-       /*
-        * When state is not in the buffer, it is in the init
-        * state, set it manually.  Otherwise, copy out the old
-        * state.
-        */
-       if (!old_pkru_state)
-               new_pkru_state.pkru = 0;
-       else
-               new_pkru_state.pkru = old_pkru_state->pkru;
-
-       /* Mask off any old bits in place: */
-       new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
-
-       /* Set the newly-requested bits: */
-       new_pkru_state.pkru |= new_pkru_bits;
-
-       /*
-        * We could theoretically live without zeroing pkru.pad.
-        * The current XSAVE feature state definition says that
-        * only bytes 0->3 are used.  But we do not want to
-        * chance leaking kernel stack out to userspace in case a
-        * memcpy() of the whole xsave buffer was done.
-        *
-        * They're in the same cacheline anyway.
-        */
-       new_pkru_state.pad = 0;
+       /* Get old PKRU and mask off any old bits in place: */
+       old_pkru = read_pkru();
+       old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
 
-       fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state));
+       /* Write old part along with new part: */
+       write_pkru(old_pkru | new_pkru_bits);
 
        return 0;
 }
index 2dda0bc4576ebf7a6940056e621d0bdb4850aa79..f16c55bfc0907bc4a3b1f35174ca5f5cbc53ce99 100644 (file)
@@ -25,8 +25,6 @@ static void __init i386_default_early_setup(void)
        /* Initialize 32bit specific setup functions */
        x86_init.resources.reserve_resources = i386_reserve_resources;
        x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
-
-       reserve_bios_regions();
 }
 
 asmlinkage __visible void __init i386_start_kernel(void)
index 99d48e7d2974c64fe7b3fe9c1dd59e7f3b189cbb..54a2372f5dbb1eb0598788e944ad28708b638671 100644 (file)
@@ -183,7 +183,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
                copy_bootdata(__va(real_mode_data));
 
        x86_early_init_platform_quirks();
-       reserve_bios_regions();
 
        switch (boot_params.hdr.hardware_subarch) {
        case X86_SUBARCH_INTEL_MID:
index ed16e58658a4201184fd0b1d8c470f5eb3e32c1e..c6dfd801df973039e8c00f1d48ad4058713c914d 100644 (file)
@@ -1242,7 +1242,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
        memset(&curr_time, 0, sizeof(struct rtc_time));
 
        if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
-               mc146818_set_time(&curr_time);
+               mc146818_get_time(&curr_time);
 
        if (hpet_rtc_flags & RTC_UIE &&
            curr_time.tm_sec != hpet_prev_update_sec) {
index 61521dc19c102114e177cbc21a4f5da9d94c20cd..9f669fdd20106cbb53b404e7085d31b4244bd7c7 100644 (file)
@@ -102,8 +102,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        seq_puts(p, "  Rescheduling interrupts\n");
        seq_printf(p, "%*s: ", prec, "CAL");
        for_each_online_cpu(j)
-               seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
-                                       irq_stats(j)->irq_tlb_count);
+               seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
        seq_puts(p, "  Function call interrupts\n");
        seq_printf(p, "%*s: ", prec, "TLB");
        for_each_online_cpu(j)
index 1d39bfbd26bb3d0918271f13af86c94385983150..3692249a70f1edde4dbe52eae56c46db29b82748 100644 (file)
@@ -289,6 +289,7 @@ void __init kvmclock_init(void)
        put_cpu();
 
        x86_platform.calibrate_tsc = kvm_get_tsc_khz;
+       x86_platform.calibrate_cpu = kvm_get_tsc_khz;
        x86_platform.get_wallclock = kvm_get_wallclock;
        x86_platform.set_wallclock = kvm_set_wallclock;
 #ifdef CONFIG_X86_LOCAL_APIC
index ad5bc9578a7336f031bf580b415fe8354780d060..1acfd76e3e26b73d5e874d3e48fb2e0b9efa9ca8 100644 (file)
@@ -56,12 +56,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
      ".popsection");
 
 /* identity function, which can be inlined */
-u32 _paravirt_ident_32(u32 x)
+u32 notrace _paravirt_ident_32(u32 x)
 {
        return x;
 }
 
-u64 _paravirt_ident_64(u64 x)
+u64 notrace _paravirt_ident_64(u64 x)
 {
        return x;
 }
index 991b77986d57529bc3102506b398b37217e66287..0fa60f5f5a1641dc8eb2c82968dbadd650ed6e37 100644 (file)
@@ -936,8 +936,6 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.oem.arch_setup();
 
-       kernel_randomize_memory();
-
        iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
        setup_memory_map();
        parse_setup_data();
@@ -1055,6 +1053,12 @@ void __init setup_arch(char **cmdline_p)
 
        max_possible_pfn = max_pfn;
 
+       /*
+        * Define random base addresses for memory sections after max_pfn is
+        * defined and before each memory section base is used.
+        */
+       kernel_randomize_memory();
+
 #ifdef CONFIG_X86_32
        /* max_low_pfn get updated here */
        find_low_pfn_range();
@@ -1097,6 +1101,8 @@ void __init setup_arch(char **cmdline_p)
                efi_find_mirror();
        }
 
+       reserve_bios_regions();
+
        /*
         * The EFI specification says that boot service code won't be called
         * after ExitBootServices(). This is, in fact, a lie.
@@ -1125,7 +1131,15 @@ void __init setup_arch(char **cmdline_p)
 
        early_trap_pf_init();
 
-       setup_real_mode();
+       /*
+        * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
+        * with the current CR4 value.  This may not be necessary, but
+        * auditing all the early-boot CR4 manipulation would be needed to
+        * rule it out.
+        */
+       if (boot_cpu_data.cpuid_level >= 0)
+               /* A CPU has %cr4 if and only if it has CPUID. */
+               mmu_cr4_features = __read_cr4();
 
        memblock_set_current_limit(get_max_mapped());
 
@@ -1174,13 +1188,6 @@ void __init setup_arch(char **cmdline_p)
 
        kasan_init();
 
-       if (boot_cpu_data.cpuid_level >= 0) {
-               /* A CPU has %cr4 if and only if it has CPUID */
-               mmu_cr4_features = __read_cr4();
-               if (trampoline_cr4_features)
-                       *trampoline_cr4_features = mmu_cr4_features;
-       }
-
 #ifdef CONFIG_X86_32
        /* sync back kernel address range */
        clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
index 2a6e84a30a546f022331c653ec0725bd73086bc5..4296beb8fdd3b2d91a9644c9843d66dd2132c2ad 100644 (file)
@@ -100,10 +100,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
 /* Logical package management. We might want to allocate that dynamically */
 static int *physical_to_logical_pkg __read_mostly;
 static unsigned long *physical_package_map __read_mostly;;
-static unsigned long *logical_package_map  __read_mostly;
 static unsigned int max_physical_pkg_id __read_mostly;
 unsigned int __max_logical_packages __read_mostly;
 EXPORT_SYMBOL(__max_logical_packages);
+static unsigned int logical_packages __read_mostly;
+static bool logical_packages_frozen __read_mostly;
 
 /* Maximum number of SMT threads on any online core */
 int __max_smt_threads __read_mostly;
@@ -277,14 +278,14 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu)
        if (test_and_set_bit(pkg, physical_package_map))
                goto found;
 
-       new = find_first_zero_bit(logical_package_map, __max_logical_packages);
-       if (new >= __max_logical_packages) {
+       if (logical_packages_frozen) {
                physical_to_logical_pkg[pkg] = -1;
-               pr_warn("APIC(%x) Package %u exceeds logical package map\n",
+               pr_warn("APIC(%x) Package %u exceeds logical package max\n",
                        apicid, pkg);
                return -ENOSPC;
        }
-       set_bit(new, logical_package_map);
+
+       new = logical_packages++;
        pr_info("APIC(%x) Converting physical %u to logical package %u\n",
                apicid, pkg, new);
        physical_to_logical_pkg[pkg] = new;
@@ -341,6 +342,7 @@ static void __init smp_init_package_map(void)
        }
 
        __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
+       logical_packages = 0;
 
        /*
         * Possibly larger than what we need as the number of apic ids per
@@ -352,10 +354,6 @@ static void __init smp_init_package_map(void)
        memset(physical_to_logical_pkg, 0xff, size);
        size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
        physical_package_map = kzalloc(size, GFP_KERNEL);
-       size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
-       logical_package_map = kzalloc(size, GFP_KERNEL);
-
-       pr_info("Max logical packages: %u\n", __max_logical_packages);
 
        for_each_present_cpu(cpu) {
                unsigned int apicid = apic->cpu_present_to_apicid(cpu);
@@ -369,6 +367,15 @@ static void __init smp_init_package_map(void)
                set_cpu_possible(cpu, false);
                set_cpu_present(cpu, false);
        }
+
+       if (logical_packages > __max_logical_packages) {
+               pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
+                       logical_packages, __max_logical_packages);
+               logical_packages_frozen = true;
+               __max_logical_packages  = logical_packages;
+       }
+
+       pr_info("Max logical packages: %u\n", __max_logical_packages);
 }
 
 void __init smp_store_boot_cpu_info(void)
index 1ef87e887051e950071aa746f68ca9736f3c7e4a..78b9cb5a26af31559625f3a4e40e62fdc7d53512 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/nmi.h>
 #include <asm/x86_init.h>
 #include <asm/geode.h>
+#include <asm/apic.h>
 
 unsigned int __read_mostly cpu_khz;    /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -1249,6 +1250,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
                (unsigned long)tsc_khz / 1000,
                (unsigned long)tsc_khz % 1000);
 
+       /* Inform the TSC deadline clockevent devices about the recalibration */
+       lapic_update_tsc_freq();
+
 out:
        if (boot_cpu_has(X86_FEATURE_ART))
                art_related_clocksource = &clocksource_tsc;
index 6c1ff31d99ffeb0d0a28c5ee472bb1865ff23df3..495c776de4b470f8eb53236a0ddeb2ca8f043b6b 100644 (file)
@@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
                *cursor &= 0xfe;
        }
        /*
-        * Similar treatment for VEX3 prefix.
-        * TODO: add XOP/EVEX treatment when insn decoder supports them
+        * Similar treatment for VEX3/EVEX prefix.
+        * TODO: add XOP treatment when insn decoder supports them
         */
-       if (insn->vex_prefix.nbytes == 3) {
+       if (insn->vex_prefix.nbytes >= 3) {
                /*
                 * vex2:     c5    rvvvvLpp   (has no b bit)
                 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp
                 * evex:     62    rxbR00mm wvvvv1pp zllBVaaa
-                *   (evex will need setting of both b and x since
-                *   in non-sib encoding evex.x is 4th bit of MODRM.rm)
-                * Setting VEX3.b (setting because it has inverted meaning):
+                * Setting VEX3.b (setting because it has inverted meaning).
+                * Setting EVEX.x since (in non-SIB encoding) EVEX.x
+                * is the 4th bit of MODRM.rm, and needs the same treatment.
+                * For VEX3-encoded insns, VEX3.x value has no effect in
+                * non-SIB encoding, the change is superfluous but harmless.
                 */
                cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
-               *cursor |= 0x20;
+               *cursor |= 0x60;
        }
 
        /*
@@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
 
        reg = MODRM_REG(insn);  /* Fetch modrm.reg */
        reg2 = 0xff;            /* Fetch vex.vvvv */
-       if (insn->vex_prefix.nbytes == 2)
-               reg2 = insn->vex_prefix.bytes[1];
-       else if (insn->vex_prefix.nbytes == 3)
+       if (insn->vex_prefix.nbytes)
                reg2 = insn->vex_prefix.bytes[2];
        /*
-        * TODO: add XOP, EXEV vvvv reading.
+        * TODO: add XOP vvvv reading.
         *
         * vex.vvvv field is in bits 6-3, bits are inverted.
         * But in 32-bit mode, high-order bit may be ignored.
index 5f42d038fcb4cf501455c29e0014f8d1e1e0dacf..c7220ba94aa776dceb3db4413ea9dec4ebace324 100644 (file)
@@ -109,6 +109,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
 {
        bool new_val, old_val;
        struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
+       struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
        union kvm_ioapic_redirect_entry *e;
 
        e = &ioapic->redirtbl[RTC_GSI];
@@ -117,16 +118,17 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
                return;
 
        new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
-       old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+       old_val = test_bit(vcpu->vcpu_id, dest_map->map);
 
        if (new_val == old_val)
                return;
 
        if (new_val) {
-               __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+               __set_bit(vcpu->vcpu_id, dest_map->map);
+               dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
                ioapic->rtc_status.pending_eoi++;
        } else {
-               __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+               __clear_bit(vcpu->vcpu_id, dest_map->map);
                ioapic->rtc_status.pending_eoi--;
                rtc_status_pending_eoi_check_valid(ioapic);
        }
index 39b91127ef07a48a3cf4313d2d0340088bf8b7c6..cd944435dfbd73583a4b2d532f6f47aae022c84e 100644 (file)
@@ -23,8 +23,8 @@
 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
        [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
        [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
-       [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
-       [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
+       [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
+       [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
        [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
        [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
        [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
index a45d8580f91e7e8459666d7c1f0076ab1803b821..5cede40e255241a1d8db2cc52f24505a5ced65ca 100644 (file)
@@ -422,6 +422,7 @@ struct nested_vmx {
        struct list_head vmcs02_pool;
        int vmcs02_num;
        u64 vmcs01_tsc_offset;
+       bool change_vmcs01_virtual_x2apic_mode;
        /* L2 must run next, and mustn't decide to exit to L1. */
        bool nested_run_pending;
        /*
@@ -435,6 +436,8 @@ struct nested_vmx {
        bool pi_pending;
        u16 posted_intr_nv;
 
+       unsigned long *msr_bitmap;
+
        struct hrtimer preemption_timer;
        bool preemption_timer_expired;
 
@@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
 static unsigned long *vmx_msr_bitmap_longmode;
 static unsigned long *vmx_msr_bitmap_legacy_x2apic;
 static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_nested;
 static unsigned long *vmx_vmread_bitmap;
 static unsigned long *vmx_vmwrite_bitmap;
 
@@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
                        new.control) != old.control);
 }
 
+static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
+{
+       vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
+       vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
@@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        /* Setup TSC multiplier */
        if (kvm_has_tsc_control &&
-           vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
-               vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
-               vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
-       }
+           vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
+               decache_tsc_multiplier(vmx);
 
        vmx_vcpu_pi_load(vcpu, cpu);
        vmx->host_pkru = read_pkru();
@@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
        unsigned long *msr_bitmap;
 
        if (is_guest_mode(vcpu))
-               msr_bitmap = vmx_msr_bitmap_nested;
+               msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
        else if (cpu_has_secondary_exec_ctrls() &&
                 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
                  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
@@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
        if (!vmx_msr_bitmap_longmode_x2apic)
                goto out4;
 
-       if (nested) {
-               vmx_msr_bitmap_nested =
-                       (unsigned long *)__get_free_page(GFP_KERNEL);
-               if (!vmx_msr_bitmap_nested)
-                       goto out5;
-       }
-
        vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
        if (!vmx_vmread_bitmap)
                goto out6;
@@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
 
        memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
        memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
-       if (nested)
-               memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
 
        if (setup_vmcs_config(&vmcs_config) < 0) {
                r = -EIO;
@@ -6529,9 +6526,6 @@ out8:
 out7:
        free_page((unsigned long)vmx_vmread_bitmap);
 out6:
-       if (nested)
-               free_page((unsigned long)vmx_msr_bitmap_nested);
-out5:
        free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
 out4:
        free_page((unsigned long)vmx_msr_bitmap_longmode);
@@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
        free_page((unsigned long)vmx_io_bitmap_a);
        free_page((unsigned long)vmx_vmwrite_bitmap);
        free_page((unsigned long)vmx_vmread_bitmap);
-       if (nested)
-               free_page((unsigned long)vmx_msr_bitmap_nested);
 
        free_kvm_area();
 }
@@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
+       if (cpu_has_vmx_msr_bitmap()) {
+               vmx->nested.msr_bitmap =
+                               (unsigned long *)__get_free_page(GFP_KERNEL);
+               if (!vmx->nested.msr_bitmap)
+                       goto out_msr_bitmap;
+       }
+
        vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
        if (!vmx->nested.cached_vmcs12)
-               return -ENOMEM;
+               goto out_cached_vmcs12;
 
        if (enable_shadow_vmcs) {
                shadow_vmcs = alloc_vmcs();
-               if (!shadow_vmcs) {
-                       kfree(vmx->nested.cached_vmcs12);
-                       return -ENOMEM;
-               }
+               if (!shadow_vmcs)
+                       goto out_shadow_vmcs;
                /* mark vmcs as shadow */
                shadow_vmcs->revision_id |= (1u << 31);
                /* init shadow vmcs */
@@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
        skip_emulated_instruction(vcpu);
        nested_vmx_succeed(vcpu);
        return 1;
+
+out_shadow_vmcs:
+       kfree(vmx->nested.cached_vmcs12);
+
+out_cached_vmcs12:
+       free_page((unsigned long)vmx->nested.msr_bitmap);
+
+out_msr_bitmap:
+       return -ENOMEM;
 }
 
 /*
@@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
        vmx->nested.vmxon = false;
        free_vpid(vmx->nested.vpid02);
        nested_release_vmcs12(vmx);
+       if (vmx->nested.msr_bitmap) {
+               free_page((unsigned long)vmx->nested.msr_bitmap);
+               vmx->nested.msr_bitmap = NULL;
+       }
        if (enable_shadow_vmcs)
                free_vmcs(vmx->nested.current_shadow_vmcs);
        kfree(vmx->nested.cached_vmcs12);
@@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
 {
        u32 sec_exec_control;
 
+       /* Postpone execution until vmcs01 is the current VMCS. */
+       if (is_guest_mode(vcpu)) {
+               to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
+               return;
+       }
+
        /*
         * There is not point to enable virtualize x2apic without enable
         * apicv
@@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
 {
        int msr;
        struct page *page;
-       unsigned long *msr_bitmap;
+       unsigned long *msr_bitmap_l1;
+       unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
 
+       /* This shortcut is ok because we support only x2APIC MSRs so far. */
        if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
                return false;
 
@@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
                WARN_ON(1);
                return false;
        }
-       msr_bitmap = (unsigned long *)kmap(page);
-       if (!msr_bitmap) {
+       msr_bitmap_l1 = (unsigned long *)kmap(page);
+       if (!msr_bitmap_l1) {
                nested_release_page_clean(page);
                WARN_ON(1);
                return false;
        }
 
+       memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
+
        if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
                if (nested_cpu_has_apic_reg_virt(vmcs12))
                        for (msr = 0x800; msr <= 0x8ff; msr++)
                                nested_vmx_disable_intercept_for_msr(
-                                       msr_bitmap,
-                                       vmx_msr_bitmap_nested,
+                                       msr_bitmap_l1, msr_bitmap_l0,
                                        msr, MSR_TYPE_R);
-               /* TPR is allowed */
-               nested_vmx_disable_intercept_for_msr(msr_bitmap,
-                               vmx_msr_bitmap_nested,
+
+               nested_vmx_disable_intercept_for_msr(
+                               msr_bitmap_l1, msr_bitmap_l0,
                                APIC_BASE_MSR + (APIC_TASKPRI >> 4),
                                MSR_TYPE_R | MSR_TYPE_W);
+
                if (nested_cpu_has_vid(vmcs12)) {
-                       /* EOI and self-IPI are allowed */
                        nested_vmx_disable_intercept_for_msr(
-                               msr_bitmap,
-                               vmx_msr_bitmap_nested,
+                               msr_bitmap_l1, msr_bitmap_l0,
                                APIC_BASE_MSR + (APIC_EOI >> 4),
                                MSR_TYPE_W);
                        nested_vmx_disable_intercept_for_msr(
-                               msr_bitmap,
-                               vmx_msr_bitmap_nested,
+                               msr_bitmap_l1, msr_bitmap_l0,
                                APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
                                MSR_TYPE_W);
                }
-       } else {
-               /*
-                * Enable reading intercept of all the x2apic
-                * MSRs. We should not rely on vmcs12 to do any
-                * optimizations here, it may have been modified
-                * by L1.
-                */
-               for (msr = 0x800; msr <= 0x8ff; msr++)
-                       __vmx_enable_intercept_for_msr(
-                               vmx_msr_bitmap_nested,
-                               msr,
-                               MSR_TYPE_R);
-
-               __vmx_enable_intercept_for_msr(
-                               vmx_msr_bitmap_nested,
-                               APIC_BASE_MSR + (APIC_TASKPRI >> 4),
-                               MSR_TYPE_W);
-               __vmx_enable_intercept_for_msr(
-                               vmx_msr_bitmap_nested,
-                               APIC_BASE_MSR + (APIC_EOI >> 4),
-                               MSR_TYPE_W);
-               __vmx_enable_intercept_for_msr(
-                               vmx_msr_bitmap_nested,
-                               APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
-                               MSR_TYPE_W);
        }
        kunmap(page);
        nested_release_page_clean(page);
@@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        }
 
        if (cpu_has_vmx_msr_bitmap() &&
-           exec_control & CPU_BASED_USE_MSR_BITMAPS) {
-               nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
-               /* MSR_BITMAP will be set by following vmx_set_efer. */
-       else
+           exec_control & CPU_BASED_USE_MSR_BITMAPS &&
+           nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
+               /* MSR_BITMAP will be set by following vmx_set_efer. */
+       else
                exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
 
        /*
@@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                        vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
        else
                vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+       if (kvm_has_tsc_control)
+               decache_tsc_multiplier(vmx);
 
        if (enable_vpid) {
                /*
@@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
        else
                vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
                              PIN_BASED_VMX_PREEMPTION_TIMER);
+       if (kvm_has_tsc_control)
+               decache_tsc_multiplier(vmx);
+
+       if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
+               vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
+               vmx_set_virtual_x2apic_mode(vcpu,
+                               vcpu->arch.apic_base & X2APIC_ENABLE);
+       }
 
        /* This is needed for same reason as it was needed in prepare_vmcs02 */
        vmx->host_rsp = 0;
index 19f9f9e05c2a812fd07d07ce0e69223ec2accc25..699f8726539a2044616b82bd9cf792a8617f8001 100644 (file)
@@ -2743,16 +2743,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
 
-               if (kvm_lapic_hv_timer_in_use(vcpu) &&
-                               kvm_x86_ops->set_hv_timer(vcpu,
-                                       kvm_get_lapic_tscdeadline_msr(vcpu)))
-                       kvm_lapic_switch_to_sw_timer(vcpu);
                if (check_tsc_unstable()) {
                        u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
                        kvm_x86_ops->write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
                }
+               if (kvm_lapic_hv_timer_in_use(vcpu) &&
+                               kvm_x86_ops->set_hv_timer(vcpu,
+                                       kvm_get_lapic_tscdeadline_msr(vcpu)))
+                       kvm_lapic_switch_to_sw_timer(vcpu);
                /*
                 * On a host with synchronized TSC, there is no need to update
                 * kvmclock on vcpu->cpu migration
index 02de3d74d2c5bb319d48503371f451672d66dc80..8a602a1e404a262f32fbe708e926e986636dcfca 100644 (file)
@@ -35,6 +35,7 @@ ENDPROC(__sw_hweight32)
 
 ENTRY(__sw_hweight64)
 #ifdef CONFIG_X86_64
+       pushq   %rdi
        pushq   %rdx
 
        movq    %rdi, %rdx                      # w -> t
@@ -60,6 +61,7 @@ ENTRY(__sw_hweight64)
        shrq    $56, %rax                       # w = w_tmp >> 56
 
        popq    %rdx
+       popq    %rdi
        ret
 #else /* CONFIG_X86_32 */
        /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
index f7dfeda83e5c444c85ec0527e2dba9fa86dc207a..121f59c6ee54e0159aab44889274f3bdf0b1041a 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/cpufeature.h>
 #include <asm/setup.h>
 
-#define debug_putstr(v) early_printk(v)
+#define debug_putstr(v) early_printk("%s", v)
 #define has_cpuflag(f) boot_cpu_has(f)
 #define get_boot_seed() kaslr_offset()
 #endif
index ec21796ac5fd5a95f5e386af57a67d2e4ba51f61..4473cb4f8b906dcae083a4b29c38b72d9b7d56d3 100644 (file)
@@ -3,15 +3,17 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
                           unsigned long addr, unsigned long end)
 {
        addr &= PMD_MASK;
        for (; addr < end; addr += PMD_SIZE) {
                pmd_t *pmd = pmd_page + pmd_index(addr);
 
-               if (!pmd_present(*pmd))
-                       set_pmd(pmd, __pmd(addr | pmd_flag));
+               if (pmd_present(*pmd))
+                       continue;
+
+               set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
        }
 }
 
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 
                if (pud_present(*pud)) {
                        pmd = pmd_offset(pud, 0);
-                       ident_pmd_init(info->pmd_flag, pmd, addr, next);
+                       ident_pmd_init(info, pmd, addr, next);
                        continue;
                }
                pmd = (pmd_t *)info->alloc_pgt_page(info->context);
                if (!pmd)
                        return -ENOMEM;
-               ident_pmd_init(info->pmd_flag, pmd, addr, next);
+               ident_pmd_init(info, pmd, addr, next);
                set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
        }
 
@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 }
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-                             unsigned long addr, unsigned long end)
+                             unsigned long pstart, unsigned long pend)
 {
+       unsigned long addr = pstart + info->offset;
+       unsigned long end = pend + info->offset;
        unsigned long next;
        int result;
-       int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
 
        for (; addr < end; addr = next) {
-               pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+               pgd_t *pgd = pgd_page + pgd_index(addr);
                pud_t *pud;
 
                next = (addr & PGDIR_MASK) + PGDIR_SIZE;
index 620928903be3eead494b3f3e290f0bdb2d8325ab..d28a2d741f9e9e74e989272101b4a50e1ffa3050 100644 (file)
@@ -122,8 +122,18 @@ __ref void *alloc_low_pages(unsigned int num)
        return __va(pfn << PAGE_SHIFT);
 }
 
-/* need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS */
-#define INIT_PGT_BUF_SIZE      (6 * PAGE_SIZE)
+/*
+ * By default need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS.
+ * With KASLR memory randomization, depending on the machine e820 memory
+ * and the PUD alignment. We may need twice more pages when KASLR memory
+ * randomization is enabled.
+ */
+#ifndef CONFIG_RANDOMIZE_MEMORY
+#define INIT_PGD_PAGE_COUNT      6
+#else
+#define INIT_PGD_PAGE_COUNT      12
+#endif
+#define INIT_PGT_BUF_SIZE      (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
 RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
 void  __init early_alloc_pgt_buf(void)
 {
index 26dccd6c0df189a5b3a8df554af50801df8e3afc..bda8d5eef04d5594630114541d4b1ce043ff890a 100644 (file)
@@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
  */
 static inline bool kaslr_memory_enabled(void)
 {
-       return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
+       return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
 }
 
 /* Initialize base and padding for each memory region randomized with KASLR */
@@ -97,7 +97,7 @@ void __init kernel_randomize_memory(void)
         * add padding if needed (especially for memory hotplug support).
         */
        BUG_ON(kaslr_regions[0].base != &page_offset_base);
-       memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) +
+       memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
                CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
 
        /* Adapt phyiscal memory region size based on available memory */
index 849dc09fa4f0b803b7139015de6b98f7778165bd..e3353c97d0862d2a20bdd060fc229af2de8324bb 100644 (file)
@@ -917,11 +917,11 @@ static void populate_pte(struct cpa_data *cpa,
        }
 }
 
-static int populate_pmd(struct cpa_data *cpa,
-                       unsigned long start, unsigned long end,
-                       unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+static long populate_pmd(struct cpa_data *cpa,
+                        unsigned long start, unsigned long end,
+                        unsigned num_pages, pud_t *pud, pgprot_t pgprot)
 {
-       unsigned int cur_pages = 0;
+       long cur_pages = 0;
        pmd_t *pmd;
        pgprot_t pmd_pgprot;
 
@@ -991,12 +991,12 @@ static int populate_pmd(struct cpa_data *cpa,
        return num_pages;
 }
 
-static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
-                       pgprot_t pgprot)
+static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+                        pgprot_t pgprot)
 {
        pud_t *pud;
        unsigned long end;
-       int cur_pages = 0;
+       long cur_pages = 0;
        pgprot_t pud_pgprot;
 
        end = start + (cpa->numpages << PAGE_SHIFT);
@@ -1052,7 +1052,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
 
        /* Map trailing leftover */
        if (start < end) {
-               int tmp;
+               long tmp;
 
                pud = pud_offset(pgd, start);
                if (pud_none(*pud))
@@ -1078,7 +1078,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
        pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
        pud_t *pud = NULL;      /* shut up gcc */
        pgd_t *pgd_entry;
-       int ret;
+       long ret;
 
        pgd_entry = cpa->pgd + pgd_index(addr);
 
@@ -1327,7 +1327,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
 
 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
 {
-       int ret, numpages = cpa->numpages;
+       unsigned long numpages = cpa->numpages;
+       int ret;
 
        while (numpages) {
                /*
index ecb1b69c1651323ea412914c1701712ea77d41ca..170cc4ff057b398382bef3dd635d6a9115460d88 100644 (file)
@@ -927,9 +927,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
 }
 
 /*
- * prot is passed in as a parameter for the new mapping. If the vma has a
- * linear pfn mapping for the entire range reserve the entire vma range with
- * single reserve_pfn_range call.
+ * prot is passed in as a parameter for the new mapping. If the vma has
+ * a linear pfn mapping for the entire range, or no vma is provided,
+ * reserve the entire pfn + size range with single reserve_pfn_range
+ * call.
  */
 int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                    unsigned long pfn, unsigned long addr, unsigned long size)
@@ -938,11 +939,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
        enum page_cache_mode pcm;
 
        /* reserve the whole chunk starting from paddr */
-       if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
+       if (!vma || (addr == vma->vm_start
+                               && size == (vma->vm_end - vma->vm_start))) {
                int ret;
 
                ret = reserve_pfn_range(paddr, size, prot, 0);
-               if (!ret)
+               if (ret == 0 && vma)
                        vma->vm_flags |= VM_PAT;
                return ret;
        }
@@ -997,7 +999,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
        resource_size_t paddr;
        unsigned long prot;
 
-       if (!(vma->vm_flags & VM_PAT))
+       if (vma && !(vma->vm_flags & VM_PAT))
                return;
 
        /* free the chunk starting from pfn or the whole chunk */
@@ -1011,7 +1013,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
                size = vma->vm_end - vma->vm_start;
        }
        free_pfn_range(paddr, size);
-       vma->vm_flags &= ~VM_PAT;
+       if (vma)
+               vma->vm_flags &= ~VM_PAT;
 }
 
 /*
index 837ea36a837d3063754c34c7fbb38be662b91226..6d52b94f4bb915d119e16da1628743b93fa7af39 100644 (file)
@@ -553,15 +553,21 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
 
 /*
- * Broadwell EP Home Agent BARs erroneously return non-zero values when read.
+ * Device [8086:2fc0]
+ * Erratum HSE43
+ * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
+ * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
  *
- * See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
- * entry BDF2.
+ * Devices [8086:6f60,6fa0,6fc0]
+ * Erratum BDF2
+ * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
+ * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
  */
-static void pci_bdwep_bar(struct pci_dev *dev)
+static void pci_invalid_bar(struct pci_dev *dev)
 {
        dev->non_compliant_bars = 1;
 }
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
index b814ca675131ec1bf647a91290cff0a76a308814..7948be342ee990fe0e7c9358a2b639483be4eac7 100644 (file)
@@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
  * @node:      list item for parent traversal.
  * @rcu:       RCU callback item for freeing.
  * @irq:       back pointer to parent.
+ * @enabled:   true if driver enabled IRQ
  * @virq:      the virtual IRQ value provided to the requesting driver.
  *
  * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
@@ -50,6 +51,7 @@ struct vmd_irq {
        struct list_head        node;
        struct rcu_head         rcu;
        struct vmd_irq_list     *irq;
+       bool                    enabled;
        unsigned int            virq;
 };
 
@@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&list_lock, flags);
+       WARN_ON(vmdirq->enabled);
        list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+       vmdirq->enabled = true;
        raw_spin_unlock_irqrestore(&list_lock, flags);
 
        data->chip->irq_unmask(data);
@@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
        data->chip->irq_mask(data);
 
        raw_spin_lock_irqsave(&list_lock, flags);
-       list_del_rcu(&vmdirq->node);
-       INIT_LIST_HEAD_RCU(&vmdirq->node);
+       if (vmdirq->enabled) {
+               list_del_rcu(&vmdirq->node);
+               vmdirq->enabled = false;
+       }
        raw_spin_unlock_irqrestore(&list_lock, flags);
 }
 
index 677e29e294732560e2a1e66edbaa57a94b5d5cd9..8dd3784eb0752ea6ff4cc0d21f249ca084a323f6 100644 (file)
@@ -245,7 +245,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * text and allocate a new stack because we can't rely on the
         * stack pointer being < 4GB.
         */
-       if (!IS_ENABLED(CONFIG_EFI_MIXED))
+       if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
                return 0;
 
        /*
index 4480c06cade78d663f18db8ef09ff3db723dc1fe..89d1146f5a6f76424a0045266862eee566c2b930 100644 (file)
@@ -254,6 +254,7 @@ void __init efi_free_boot_services(void)
        for_each_efi_memory_desc(md) {
                unsigned long long start = md->phys_addr;
                unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+               size_t rm_size;
 
                if (md->type != EFI_BOOT_SERVICES_CODE &&
                    md->type != EFI_BOOT_SERVICES_DATA)
@@ -263,6 +264,26 @@ void __init efi_free_boot_services(void)
                if (md->attribute & EFI_MEMORY_RUNTIME)
                        continue;
 
+               /*
+                * Nasty quirk: if all sub-1MB memory is used for boot
+                * services, we can get here without having allocated the
+                * real mode trampoline.  It's too late to hand boot services
+                * memory back to the memblock allocator, so instead
+                * try to manually allocate the trampoline if needed.
+                *
+                * I've seen this on a Dell XPS 13 9350 with firmware
+                * 1.4.4 with SGX enabled booting Linux via Fedora 24's
+                * grub2-efi on a hard disk.  (And no, I don't know why
+                * this happened, but Linux should still try to boot rather
+                * panicing early.)
+                */
+               rm_size = real_mode_size_needed();
+               if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
+                       set_real_mode_mem(start, rm_size);
+                       start += rm_size;
+                       size -= rm_size;
+               }
+
                free_bootmem_late(start, size);
        }
 
index 66b2166ea4a1c715a0362ed99cbeb3692a031476..23f2f3e41c7f48a60d3bd35fef9ef7025ca3e389 100644 (file)
@@ -187,7 +187,8 @@ EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
 void uv_bios_init(void)
 {
        uv_systab = NULL;
-       if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) {
+       if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
+           !efi.uv_systab || efi_runtime_disabled()) {
                pr_crit("UV: UVsystab: missing\n");
                return;
        }
@@ -199,12 +200,14 @@ void uv_bios_init(void)
                return;
        }
 
+       /* Starting with UV4 the UV systab size is variable */
        if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
+               int size = uv_systab->size;
+
                iounmap(uv_systab);
-               uv_systab = ioremap(efi.uv_systab, uv_systab->size);
+               uv_systab = ioremap(efi.uv_systab, size);
                if (!uv_systab) {
-                       pr_err("UV: UVsystab: ioremap(%d) failed!\n",
-                               uv_systab->size);
+                       pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
                        return;
                }
        }
index f0b5f2d402afb15f639be87f9581c44c7298bdef..9634557a544478fdde15eff06d8fa120ffce894d 100644 (file)
@@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void)
        struct x86_mapping_info info = {
                .alloc_pgt_page = alloc_pgt_page,
                .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
-               .kernel_mapping = true,
+               .offset         = __PAGE_OFFSET,
        };
        unsigned long mstart, mend;
        pgd_t *pgd;
@@ -113,7 +113,7 @@ static int set_up_temporary_mappings(void)
                        return result;
        }
 
-       temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
+       temp_level4_pgt = __pa(pgd);
        return 0;
 }
 
index 705e3fffb4a1a3296ac5745603681cc434c44d92..5db706f14111c7c12a55fc0731a86641fe2c4741 100644 (file)
@@ -1,9 +1,11 @@
 #include <linux/io.h>
+#include <linux/slab.h>
 #include <linux/memblock.h>
 
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 #include <asm/realmode.h>
+#include <asm/tlbflush.h>
 
 struct real_mode_header *real_mode_header;
 u32 *trampoline_cr4_features;
@@ -11,25 +13,37 @@ u32 *trampoline_cr4_features;
 /* Hold the pgd entry used on booting additional CPUs */
 pgd_t trampoline_pgd_entry;
 
+void __init set_real_mode_mem(phys_addr_t mem, size_t size)
+{
+       void *base = __va(mem);
+
+       real_mode_header = (struct real_mode_header *) base;
+       printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+              base, (unsigned long long)mem, size);
+}
+
 void __init reserve_real_mode(void)
 {
        phys_addr_t mem;
-       unsigned char *base;
-       size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+       size_t size = real_mode_size_needed();
+
+       if (!size)
+               return;
+
+       WARN_ON(slab_is_available());
 
        /* Has to be under 1M so we can execute real-mode AP code. */
        mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-       if (!mem)
-               panic("Cannot allocate trampoline\n");
+       if (!mem) {
+               pr_info("No sub-1M memory is available for the trampoline\n");
+               return;
+       }
 
-       base = __va(mem);
        memblock_reserve(mem, size);
-       real_mode_header = (struct real_mode_header *) base;
-       printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
-              base, (unsigned long long)mem, size);
+       set_real_mode_mem(mem, size);
 }
 
-void __init setup_real_mode(void)
+static void __init setup_real_mode(void)
 {
        u16 real_mode_seg;
        const u32 *rel;
@@ -84,7 +98,7 @@ void __init setup_real_mode(void)
 
        trampoline_header->start = (u64) secondary_startup_64;
        trampoline_cr4_features = &trampoline_header->cr4;
-       *trampoline_cr4_features = __read_cr4();
+       *trampoline_cr4_features = mmu_cr4_features;
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
        trampoline_pgd[0] = trampoline_pgd_entry.pgd;
@@ -100,7 +114,7 @@ void __init setup_real_mode(void)
  * need to mark it executable at do_pre_smp_initcalls() at least,
  * thus run it as a early_initcall().
  */
-static int __init set_real_mode_permissions(void)
+static void __init set_real_mode_permissions(void)
 {
        unsigned char *base = (unsigned char *) real_mode_header;
        size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
@@ -119,7 +133,16 @@ static int __init set_real_mode_permissions(void)
        set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
        set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
        set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+}
+
+static int __init init_real_mode(void)
+{
+       if (!real_mode_header)
+               panic("Real mode trampoline was not allocated");
+
+       setup_real_mode();
+       set_real_mode_permissions();
 
        return 0;
 }
-early_initcall(set_real_mode_permissions);
+early_initcall(init_real_mode);
index ebd4dd6ef73b0e09a7fc70d8e605354be67a61cb..a7ef7b131e2564fd50d1614e8047f95841335c46 100644 (file)
@@ -84,7 +84,10 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
        case EAX:
        case EIP:
        case UESP:
+               break;
        case ORIG_EAX:
+               /* Update the syscall number. */
+               UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
                break;
        case FS:
                if (value && (value & 3) != 3)
index faab418876ce7deb2e4889a9ff3ab30efd7ac852..0b5c184dd5b3b80894c3db60ead6432ad20dca57 100644 (file)
@@ -78,7 +78,11 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
        case RSI:
        case RDI:
        case RBP:
+               break;
+
        case ORIG_RAX:
+               /* Update the syscall number. */
+               UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
                break;
 
        case FS:
index 8ffb089b19a5690596637d4eb9d0d8026eb88255..b86ebb1a9a7f45cff133b5e99b90e34734252d16 100644 (file)
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 
 /* Linux <-> Xen vCPU id mapping */
-DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
 
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
index f39477538fef0a8759ce966b40e5034c0cb43bba..aa7354088008ba5f73d3ba1af83eb902b0629ec6 100644 (file)
@@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 
-       if (bio_op(bio) == REQ_OP_DISCARD)
-               goto integrity_clone;
-
-       if (bio_op(bio) == REQ_OP_WRITE_SAME) {
+       switch (bio_op(bio)) {
+       case REQ_OP_DISCARD:
+       case REQ_OP_SECURE_ERASE:
+               break;
+       case REQ_OP_WRITE_SAME:
                bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
-               goto integrity_clone;
+               break;
+       default:
+               bio_for_each_segment(bv, bio_src, iter)
+                       bio->bi_io_vec[bio->bi_vcnt++] = bv;
+               break;
        }
 
-       bio_for_each_segment(bv, bio_src, iter)
-               bio->bi_io_vec[bio->bi_vcnt++] = bv;
-
-integrity_clone:
        if (bio_integrity(bio_src)) {
                int ret;
 
@@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
         * Discards need a mutable bio_vec to accommodate the payload
         * required by the DSM TRIM and UNMAP commands.
         */
-       if (bio_op(bio) == REQ_OP_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
                split = bio_clone_bioset(bio, gfp, bs);
        else
                split = bio_clone_fast(bio, gfp, bs);
index 999442ec4601487a35c76dad156dfa438a2a35b3..36c7ac328d8c17bd1967f898c91ff49f7a9b050d 100644 (file)
@@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
 void blk_set_queue_dying(struct request_queue *q)
 {
-       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+       spin_lock_irq(q->queue_lock);
+       queue_flag_set(QUEUE_FLAG_DYING, q);
+       spin_unlock_irq(q->queue_lock);
 
        if (q->mq_ops)
                blk_mq_wake_waiters(q);
index 3eec75a9e91d8e172cfc0a413f74e52a8004aee3..2642e5fc8b69a03494b62638d4eca98ee07b7edc 100644 (file)
@@ -94,8 +94,30 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        bool do_split = true;
        struct bio *new = NULL;
        const unsigned max_sectors = get_max_io_size(q, bio);
+       unsigned bvecs = 0;
 
        bio_for_each_segment(bv, bio, iter) {
+               /*
+                * With arbitrary bio size, the incoming bio may be very
+                * big. We have to split the bio into small bios so that
+                * each holds at most BIO_MAX_PAGES bvecs because
+                * bio_clone() can fail to allocate big bvecs.
+                *
+                * It should have been better to apply the limit per
+                * request queue in which bio_clone() is involved,
+                * instead of globally. The biggest blocker is the
+                * bio_clone() in bio bounce.
+                *
+                * If bio is splitted by this reason, we should have
+                * allowed to continue bios merging, but don't do
+                * that now for making the change simple.
+                *
+                * TODO: deal with bio bounce's bio_clone() gracefully
+                * and convert the global limit into per-queue limit.
+                */
+               if (bvecs++ >= BIO_MAX_PAGES)
+                       goto split;
+
                /*
                 * If the queue doesn't support SG gaps and adding this
                 * offset would create a gap, disallow it.
@@ -172,12 +194,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
        struct bio *split, *res;
        unsigned nsegs;
 
-       if (bio_op(*bio) == REQ_OP_DISCARD)
+       switch (bio_op(*bio)) {
+       case REQ_OP_DISCARD:
+       case REQ_OP_SECURE_ERASE:
                split = blk_bio_discard_split(q, *bio, bs, &nsegs);
-       else if (bio_op(*bio) == REQ_OP_WRITE_SAME)
+               break;
+       case REQ_OP_WRITE_SAME:
                split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
-       else
+               break;
+       default:
                split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+               break;
+       }
 
        /* physical segments can be figured out during splitting */
        res = split ? split : *bio;
@@ -213,7 +241,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
         * This should probably be returning 0, but blk_add_request_payload()
         * (Christoph!!!!)
         */
-       if (bio_op(bio) == REQ_OP_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
                return 1;
 
        if (bio_op(bio) == REQ_OP_WRITE_SAME)
@@ -385,7 +413,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
        nsegs = 0;
        cluster = blk_queue_cluster(q);
 
-       if (bio_op(bio) == REQ_OP_DISCARD) {
+       switch (bio_op(bio)) {
+       case REQ_OP_DISCARD:
+       case REQ_OP_SECURE_ERASE:
                /*
                 * This is a hack - drivers should be neither modifying the
                 * biovec, nor relying on bi_vcnt - but because of
@@ -393,19 +423,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
                 * a payload we need to set up here (thank you Christoph) and
                 * bi_vcnt is really the only way of telling if we need to.
                 */
-
-               if (bio->bi_vcnt)
-                       goto single_segment;
-
-               return 0;
-       }
-
-       if (bio_op(bio) == REQ_OP_WRITE_SAME) {
-single_segment:
+               if (!bio->bi_vcnt)
+                       return 0;
+               /* Fall through */
+       case REQ_OP_WRITE_SAME:
                *sg = sglist;
                bvec = bio_iovec(bio);
                sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
                return 1;
+       default:
+               break;
        }
 
        for_each_bio(bio)
index e931a0e8e73dfb23761a0fa2f2819e43207dc07c..c207fa9870ebcaaed6bfb02efa70b5f7333954f8 100644 (file)
@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
        if (ret)
                return ERR_PTR(ret);
 
+       /*
+        * Check if the hardware context is actually mapped to anything.
+        * If not tell the caller that it should skip this queue.
+        */
        hctx = q->queue_hw_ctx[hctx_idx];
+       if (!blk_mq_hw_queue_mapped(hctx)) {
+               ret = -EXDEV;
+               goto out_queue_exit;
+       }
        ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
 
        blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
        rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
        if (!rq) {
-               blk_queue_exit(q);
-               return ERR_PTR(-EWOULDBLOCK);
+               ret = -EWOULDBLOCK;
+               goto out_queue_exit;
        }
 
        return rq;
+
+out_queue_exit:
+       blk_queue_exit(q);
+       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
@@ -793,11 +805,12 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
        struct list_head *dptr;
        int queued;
 
-       WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
-
        if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
                return;
 
+       WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+               cpu_online(hctx->next_cpu));
+
        hctx->run++;
 
        /*
@@ -1036,10 +1049,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 EXPORT_SYMBOL(blk_mq_delay_queue);
 
 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
-                                           struct blk_mq_ctx *ctx,
                                            struct request *rq,
                                            bool at_head)
 {
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+
        trace_block_rq_insert(hctx->queue, rq);
 
        if (at_head)
@@ -1053,20 +1067,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
 
-       __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
+       __blk_mq_insert_req_list(hctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
-               bool async)
+                          bool async)
 {
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
        struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
-
-       current_ctx = blk_mq_get_ctx(q);
-       if (!cpu_online(ctx->cpu))
-               rq->mq_ctx = ctx = current_ctx;
 
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
@@ -1076,8 +1086,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
 
        if (run_queue)
                blk_mq_run_hw_queue(hctx, async);
-
-       blk_mq_put_ctx(current_ctx);
 }
 
 static void blk_mq_insert_requests(struct request_queue *q,
@@ -1088,14 +1096,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
 
 {
        struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *current_ctx;
 
        trace_block_unplug(q, depth, !from_schedule);
 
-       current_ctx = blk_mq_get_ctx(q);
-
-       if (!cpu_online(ctx->cpu))
-               ctx = current_ctx;
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
        /*
@@ -1107,15 +1110,14 @@ static void blk_mq_insert_requests(struct request_queue *q,
                struct request *rq;
 
                rq = list_first_entry(list, struct request, queuelist);
+               BUG_ON(rq->mq_ctx != ctx);
                list_del_init(&rq->queuelist);
-               rq->mq_ctx = ctx;
-               __blk_mq_insert_req_list(hctx, ctx, rq, false);
+               __blk_mq_insert_req_list(hctx, rq, false);
        }
        blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
 
        blk_mq_run_hw_queue(hctx, from_schedule);
-       blk_mq_put_ctx(current_ctx);
 }
 
 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
@@ -1630,16 +1632,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
        return 0;
 }
 
+/*
+ * 'cpu' is going away. splice any existing rq_list entries from this
+ * software queue to the hw queue dispatch list, and ensure that it
+ * gets run.
+ */
 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
 {
-       struct request_queue *q = hctx->queue;
        struct blk_mq_ctx *ctx;
        LIST_HEAD(tmp);
 
-       /*
-        * Move ctx entries to new CPU, if this one is going away.
-        */
-       ctx = __blk_mq_get_ctx(q, cpu);
+       ctx = __blk_mq_get_ctx(hctx->queue, cpu);
 
        spin_lock(&ctx->lock);
        if (!list_empty(&ctx->rq_list)) {
@@ -1651,24 +1654,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
        if (list_empty(&tmp))
                return NOTIFY_OK;
 
-       ctx = blk_mq_get_ctx(q);
-       spin_lock(&ctx->lock);
-
-       while (!list_empty(&tmp)) {
-               struct request *rq;
-
-               rq = list_first_entry(&tmp, struct request, queuelist);
-               rq->mq_ctx = ctx;
-               list_move_tail(&rq->queuelist, &ctx->rq_list);
-       }
-
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
-       blk_mq_hctx_mark_pending(hctx, ctx);
-
-       spin_unlock(&ctx->lock);
+       spin_lock(&hctx->lock);
+       list_splice_tail_init(&tmp, &hctx->dispatch);
+       spin_unlock(&hctx->lock);
 
        blk_mq_run_hw_queue(hctx, true);
-       blk_mq_put_ctx(ctx);
        return NOTIFY_OK;
 }
 
index f1aba26f47194d2d107717ac769c4e7e7b42bc72..a3ea8260c94c89236f938fb255158be8cb880a73 100644 (file)
@@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
        /*
         * If previous slice expired, start a new one otherwise renew/extend
         * existing slice to make sure it is at least throtl_slice interval
-        * long since now.
+        * long since now. New slice is started only for empty throttle group.
+        * If there is queued bio, that means there should be an active
+        * slice and it should be extended instead.
         */
-       if (throtl_slice_used(tg, rw))
+       if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
                throtl_start_new_slice(tg, rw);
        else {
                if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
index 7096c22041e7e6680b320ca08e4be831591b6477..f7d973a56fd75076d816d8a52bc1169b7dc2ae36 100644 (file)
@@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
-               if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD))
+               if (req_op(rq) != req_op(pos))
                        break;
                if (rq_data_dir(rq) != rq_data_dir(pos))
                        break;
index a9377bef25e3ed0b3d3b2bf82b1cc456ab371920..84d71482bf080288d2c379b03dbb02894ba3b4c5 100644 (file)
@@ -439,7 +439,7 @@ config CRYPTO_CRC32C_INTEL
 
 config CRYPT_CRC32C_VPMSUM
        tristate "CRC32c CRC algorithm (powerpc64)"
-       depends on PPC64
+       depends on PPC64 && ALTIVEC
        select CRYPTO_HASH
        select CRC32
        help
index 3699995301084f4a14261e76ec0e27c5ff32a6dc..a832426820e8ba5e912324153cea821083cd2b6a 100644 (file)
@@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
                return blkcipher_walk_done(desc, walk, -EINVAL);
        }
 
+       bsize = min(walk->walk_blocksize, n);
+
        walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
                         BLKCIPHER_WALK_DIFF);
        if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
@@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
                }
        }
 
-       bsize = min(walk->walk_blocksize, n);
        n = scatterwalk_clamp(&walk->in, n);
        n = scatterwalk_clamp(&walk->out, n);
 
index cf8037a87b2d124259df01378df3ac1b43973336..0c654e59f2157bc13e387a3ebfdcdc8b07b8170a 100644 (file)
@@ -631,9 +631,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
 
 static int cryptd_hash_import(struct ahash_request *req, const void *in)
 {
-       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct shash_desc *desc = cryptd_shash_desc(req);
+
+       desc->tfm = ctx->child;
+       desc->flags = req->base.flags;
 
-       return crypto_shash_import(&rctx->desc, in);
+       return crypto_shash_import(desc, in);
 }
 
 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
@@ -733,13 +738,14 @@ static void cryptd_aead_crypt(struct aead_request *req,
        rctx = aead_request_ctx(req);
        compl = rctx->complete;
 
+       tfm = crypto_aead_reqtfm(req);
+
        if (unlikely(err == -EINPROGRESS))
                goto out;
        aead_request_set_tfm(req, child);
        err = crypt( req );
 
 out:
-       tfm = crypto_aead_reqtfm(req);
        ctx = crypto_aead_ctx(tfm);
        refcnt = atomic_read(&ctx->refcnt);
 
index 1b01fe98e91f8cc19ec6e33c8198d715e64d5415..e3d889b122e0f5c3e7b7646b704436ee5ac5910c 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * echainiv: Encrypted Chain IV Generator
  *
- * This generator generates an IV based on a sequence number by xoring it
- * with a salt and then encrypting it with the same key as used to encrypt
+ * This generator generates an IV based on a sequence number by multiplying
+ * it with a salt and then encrypting it with the same key as used to encrypt
  * the plain text.  This algorithm requires that the block size be equal
  * to the IV size.  It is mainly useful for CBC.
  *
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/spinlock.h>
+#include <linux/slab.h>
 #include <linux/string.h>
 
-#define MAX_IV_SIZE 16
-
-static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
-
-/* We don't care if we get preempted and read/write IVs from the next CPU. */
-static void echainiv_read_iv(u8 *dst, unsigned size)
-{
-       u32 *a = (u32 *)dst;
-       u32 __percpu *b = echainiv_iv;
-
-       for (; size >= 4; size -= 4) {
-               *a++ = this_cpu_read(*b);
-               b++;
-       }
-}
-
-static void echainiv_write_iv(const u8 *src, unsigned size)
-{
-       const u32 *a = (const u32 *)src;
-       u32 __percpu *b = echainiv_iv;
-
-       for (; size >= 4; size -= 4) {
-               this_cpu_write(*b, *a);
-               a++;
-               b++;
-       }
-}
-
-static void echainiv_encrypt_complete2(struct aead_request *req, int err)
-{
-       struct aead_request *subreq = aead_request_ctx(req);
-       struct crypto_aead *geniv;
-       unsigned int ivsize;
-
-       if (err == -EINPROGRESS)
-               return;
-
-       if (err)
-               goto out;
-
-       geniv = crypto_aead_reqtfm(req);
-       ivsize = crypto_aead_ivsize(geniv);
-
-       echainiv_write_iv(subreq->iv, ivsize);
-
-       if (req->iv != subreq->iv)
-               memcpy(req->iv, subreq->iv, ivsize);
-
-out:
-       if (req->iv != subreq->iv)
-               kzfree(subreq->iv);
-}
-
-static void echainiv_encrypt_complete(struct crypto_async_request *base,
-                                        int err)
-{
-       struct aead_request *req = base->data;
-
-       echainiv_encrypt_complete2(req, err);
-       aead_request_complete(req, err);
-}
-
 static int echainiv_encrypt(struct aead_request *req)
 {
        struct crypto_aead *geniv = crypto_aead_reqtfm(req);
        struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
        struct aead_request *subreq = aead_request_ctx(req);
-       crypto_completion_t compl;
-       void *data;
+       __be64 nseqno;
+       u64 seqno;
        u8 *info;
        unsigned int ivsize = crypto_aead_ivsize(geniv);
        int err;
@@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
 
        aead_request_set_tfm(subreq, ctx->child);
 
-       compl = echainiv_encrypt_complete;
-       data = req;
        info = req->iv;
 
        if (req->src != req->dst) {
@@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
                        return err;
        }
 
-       if (unlikely(!IS_ALIGNED((unsigned long)info,
-                                crypto_aead_alignmask(geniv) + 1))) {
-               info = kmalloc(ivsize, req->base.flags &
-                                      CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-                                                                 GFP_ATOMIC);
-               if (!info)
-                       return -ENOMEM;
-
-               memcpy(info, req->iv, ivsize);
-       }
-
-       aead_request_set_callback(subreq, req->base.flags, compl, data);
+       aead_request_set_callback(subreq, req->base.flags,
+                                 req->base.complete, req->base.data);
        aead_request_set_crypt(subreq, req->dst, req->dst,
                               req->cryptlen, info);
        aead_request_set_ad(subreq, req->assoclen);
 
-       crypto_xor(info, ctx->salt, ivsize);
+       memcpy(&nseqno, info + ivsize - 8, 8);
+       seqno = be64_to_cpu(nseqno);
+       memset(info, 0, ivsize);
+
        scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
-       echainiv_read_iv(info, ivsize);
 
-       err = crypto_aead_encrypt(subreq);
-       echainiv_encrypt_complete2(req, err);
-       return err;
+       do {
+               u64 a;
+
+               memcpy(&a, ctx->salt + ivsize - 8, 8);
+
+               a |= 1;
+               a *= seqno;
+
+               memcpy(info + ivsize - 8, &a, 8);
+       } while ((ivsize -= 8));
+
+       return crypto_aead_encrypt(subreq);
 }
 
 static int echainiv_decrypt(struct aead_request *req)
@@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
        alg = crypto_spawn_aead_alg(spawn);
 
        err = -EINVAL;
-       if (inst->alg.ivsize & (sizeof(u32) - 1) ||
-           inst->alg.ivsize > MAX_IV_SIZE)
+       if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
                goto free_inst;
 
        inst->alg.encrypt = echainiv_encrypt;
@@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
        inst->alg.init = aead_init_geniv;
        inst->alg.exit = aead_exit_geniv;
 
-       inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
        inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
        inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 
index 877019a6d3ea81c3e130deb882e5ceccb1fe62ae..8baab4307f7b9a9064fa1234825fca29a34380c6 100644 (file)
@@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
        struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+       unsigned int dst_len;
        unsigned int pos;
-
-       if (err == -EOVERFLOW)
-               /* Decrypted value had no leading 0 byte */
-               err = -EINVAL;
+       u8 *out_buf;
 
        if (err)
                goto done;
 
-       if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
-               err = -EINVAL;
+       err = -EINVAL;
+       dst_len = req_ctx->child_req.dst_len;
+       if (dst_len < ctx->key_size - 1)
                goto done;
+
+       out_buf = req_ctx->out_buf;
+       if (dst_len == ctx->key_size) {
+               if (out_buf[0] != 0x00)
+                       /* Decrypted value had no leading 0 byte */
+                       goto done;
+
+               dst_len--;
+               out_buf++;
        }
 
-       if (req_ctx->out_buf[0] != 0x02) {
-               err = -EINVAL;
+       if (out_buf[0] != 0x02)
                goto done;
-       }
-       for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
-               if (req_ctx->out_buf[pos] == 0x00)
+
+       for (pos = 1; pos < dst_len; pos++)
+               if (out_buf[pos] == 0x00)
                        break;
-       if (pos < 9 || pos == req_ctx->child_req.dst_len) {
-               err = -EINVAL;
+       if (pos < 9 || pos == dst_len)
                goto done;
-       }
        pos++;
 
-       if (req->dst_len < req_ctx->child_req.dst_len - pos)
+       err = 0;
+
+       if (req->dst_len < dst_len - pos)
                err = -EOVERFLOW;
-       req->dst_len = req_ctx->child_req.dst_len - pos;
+       req->dst_len = dst_len - pos;
 
        if (!err)
                sg_copy_from_buffer(req->dst,
                                sg_nents_for_len(req->dst, req->dst_len),
-                               req_ctx->out_buf + pos, req->dst_len);
+                               out_buf + pos, req->dst_len);
 
 done:
        kzfree(req_ctx->out_buf);
index 62264397a2d2863680d7cec283aeec59dda02448..7e8ed96236cefa794ca39684c9c2592ca49f7976 100644 (file)
 #define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
 
 static const u64 keccakf_rndc[24] = {
-       0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
-       0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
-       0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
-       0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
-       0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
-       0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
-       0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
-       0x8000000000008080, 0x0000000080000001, 0x8000000080008008
+       0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
+       0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
+       0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
+       0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
+       0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
+       0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
+       0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
+       0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
 };
 
 static const int keccakf_rotc[24] = {
index 8c234dd9b8bc595c21d9d5abaa7327e02b9cc572..80cc7c089a15e908ae070d95ad4879e5b6309555 100644 (file)
@@ -1527,11 +1527,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
 {
        struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
        u64 offset = nfit_blk->stat_offset + mmio->size * bw;
+       const u32 STATUS_MASK = 0x80000037;
 
        if (mmio->num_lines)
                offset = to_interleave_offset(offset, mmio);
 
-       return readl(mmio->addr.base + offset);
+       return readl(mmio->addr.base + offset) & STATUS_MASK;
 }
 
 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
index 4c745bf389fea19b0cd049533183087be8873a5d..161f91539ae62b5d05547a939825321015580943 100644 (file)
@@ -42,7 +42,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
                list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
                        struct acpi_nfit_system_address *spa = nfit_spa->spa;
 
-                       if (nfit_spa_type(spa) == NFIT_SPA_PM)
+                       if (nfit_spa_type(spa) != NFIT_SPA_PM)
                                continue;
                        /* find the spa that covers the mce addr */
                        if (spa->address > mce->addr)
index ad9fc84a8601206cec6cc1fb15cb36e5c23250cb..e878fc799af792aa7ebd77076d57306eea2d4d0b 100644 (file)
@@ -2054,7 +2054,7 @@ int __init acpi_scan_init(void)
 
 static struct acpi_probe_entry *ape;
 static int acpi_probe_count;
-static DEFINE_SPINLOCK(acpi_probe_lock);
+static DEFINE_MUTEX(acpi_probe_mutex);
 
 static int __init acpi_match_madt(struct acpi_subtable_header *header,
                                  const unsigned long end)
@@ -2073,7 +2073,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
        if (acpi_disabled)
                return 0;
 
-       spin_lock(&acpi_probe_lock);
+       mutex_lock(&acpi_probe_mutex);
        for (ape = ap_head; nr; ape++, nr--) {
                if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
                        acpi_probe_count = 0;
@@ -2086,7 +2086,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
                                count++;
                }
        }
-       spin_unlock(&acpi_probe_lock);
+       mutex_unlock(&acpi_probe_mutex);
 
        return count;
 }
index 7461a587b39b4e9d953e3a0063b9f1fb8cad0319..dcf2c724fd066c33cf74a3f502d2ae6ba3d276bc 100644 (file)
@@ -2524,7 +2524,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
 
                /* Do not receive interrupts sent by dummy ports */
                if (!pp) {
-                       disable_irq(irq + i);
+                       disable_irq(irq);
                        continue;
                }
 
index 633aa2934a18f5c05f5e03cdaf5742a9b0735f99..44f97ad3c88d5234ca78e6fd348402393e4ab47f 100644 (file)
@@ -144,7 +144,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
        ap->ioaddr.altstatus_addr = base + 0x1E;
        ap->ioaddr.bmdma_addr = base;
        ata_sff_std_ports(&ap->ioaddr);
-       ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+       ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
 
        ninja32_program(base);
        /* FIXME: Should we disable them at remove ? */
index e097d355cc04a2040a129b2ffbea13c404169182..82a081ea43176ca8e1d8accd886ad7065287a8aa 100644 (file)
@@ -301,7 +301,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
        int (*callback)(struct device *);
        int retval;
 
-       trace_rpm_idle(dev, rpmflags);
+       trace_rpm_idle_rcuidle(dev, rpmflags);
        retval = rpm_check_suspend_allowed(dev);
        if (retval < 0)
                ;       /* Conditions are wrong. */
@@ -337,7 +337,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
                        dev->power.request_pending = true;
                        queue_work(pm_wq, &dev->power.work);
                }
-               trace_rpm_return_int(dev, _THIS_IP_, 0);
+               trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
                return 0;
        }
 
@@ -352,7 +352,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
        wake_up_all(&dev->power.wait_queue);
 
  out:
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
        return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 }
 
@@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
        struct device *parent = NULL;
        int retval;
 
-       trace_rpm_suspend(dev, rpmflags);
+       trace_rpm_suspend_rcuidle(dev, rpmflags);
 
  repeat:
        retval = rpm_check_suspend_allowed(dev);
@@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
        }
 
  out:
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 
        return retval;
 
@@ -601,7 +601,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
        struct device *parent = NULL;
        int retval = 0;
 
-       trace_rpm_resume(dev, rpmflags);
+       trace_rpm_resume_rcuidle(dev, rpmflags);
 
  repeat:
        if (dev->power.runtime_error)
@@ -764,7 +764,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
                spin_lock_irq(&dev->power.lock);
        }
 
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 
        return retval;
 }
index aa56af87d94118338befbbf62aa7f7b64cfd6570..b11af3f2c1dbf2e1d6faf31cb7bc38e663b2285c 100644 (file)
@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
                unsigned int new_base_reg, new_top_reg;
                unsigned int min, max;
                unsigned int max_dist;
+               unsigned int dist, best_dist = UINT_MAX;
 
                max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
                        map->cache_word_size;
@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
                                &base_reg, &top_reg);
 
                        if (base_reg <= max && top_reg >= min) {
-                               new_base_reg = min(reg, base_reg);
-                               new_top_reg = max(reg, top_reg);
-                       } else {
-                               if (max < base_reg)
-                                       node = node->rb_left;
+                               if (reg < base_reg)
+                                       dist = base_reg - reg;
+                               else if (reg > top_reg)
+                                       dist = reg - top_reg;
                                else
-                                       node = node->rb_right;
-
-                               continue;
+                                       dist = 0;
+                               if (dist < best_dist) {
+                                       rbnode = rbnode_tmp;
+                                       best_dist = dist;
+                                       new_base_reg = min(reg, base_reg);
+                                       new_top_reg = max(reg, top_reg);
+                               }
                        }
 
-                       ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
+                       /*
+                        * Keep looking, we want to choose the closest block,
+                        * otherwise we might end up creating overlapping
+                        * blocks, which breaks the rbtree.
+                        */
+                       if (reg < base_reg)
+                               node = node->rb_left;
+                       else if (reg > top_reg)
+                               node = node->rb_right;
+                       else
+                               break;
+               }
+
+               if (rbnode) {
+                       ret = regcache_rbtree_insert_to_block(map, rbnode,
                                                              new_base_reg,
                                                              new_top_reg, reg,
                                                              value);
                        if (ret)
                                return ret;
-                       rbtree_ctx->cached_rbnode = rbnode_tmp;
+                       rbtree_ctx->cached_rbnode = rbnode;
                        return 0;
                }
 
index df7ff72908216f5d7a97f03af3f06ae4fd5adefd..4e582561e1e7a0cdffeaf8b3901d3a56d66a37ad 100644 (file)
@@ -38,10 +38,11 @@ static int regcache_hw_init(struct regmap *map)
 
        /* calculate the size of reg_defaults */
        for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
-               if (!regmap_volatile(map, i * map->reg_stride))
+               if (regmap_readable(map, i * map->reg_stride) &&
+                   !regmap_volatile(map, i * map->reg_stride))
                        count++;
 
-       /* all registers are volatile, so just bypass */
+       /* all registers are unreadable or volatile, so just bypass */
        if (!count) {
                map->cache_bypass = true;
                return 0;
index 51fa7d66a393bf5bee6818c1432211dfd76582ce..e964d068874dea77f0282d58e0dd97501f7481a6 100644 (file)
@@ -1474,6 +1474,12 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                ret = map->bus->write(map->bus_context, buf, len);
 
                kfree(buf);
+       } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
+               /* regcache_drop_region() takes lock that we already have,
+                * thus call map->cache_ops->drop() directly
+                */
+               if (map->cache_ops && map->cache_ops->drop)
+                       map->cache_ops->drop(map, reg, reg + 1);
        }
 
        trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
index b71a9c76700956e1ddfda4e742a2053b0f0769fe..e3d8e4ced4a23c57d1f36da593e80a64d8b1d142 100644 (file)
@@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
        if (UFDCS->rawcmd == 1)
                UFDCS->rawcmd = 2;
 
-       if (mode & (FMODE_READ|FMODE_WRITE)) {
-               UDRS->last_checked = 0;
-               clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
-               check_disk_change(bdev);
-               if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
-                       goto out;
-               if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+       if (!(mode & FMODE_NDELAY)) {
+               if (mode & (FMODE_READ|FMODE_WRITE)) {
+                       UDRS->last_checked = 0;
+                       clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+                       check_disk_change(bdev);
+                       if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+                               goto out;
+                       if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+                               goto out;
+               }
+               res = -EROFS;
+               if ((mode & FMODE_WRITE) &&
+                   !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
                        goto out;
        }
-
-       res = -EROFS;
-
-       if ((mode & FMODE_WRITE) &&
-                       !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
-               goto out;
-
        mutex_unlock(&open_lock);
        mutex_unlock(&floppy_mutex);
        return 0;
index 1a04af6d24212cdd16e5c8091d01f16e2b409384..6c6519f6492a4198c78cae1eaad5e33e03efd2d9 100644 (file)
@@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev)
        bool need_put = !!rbd_dev->opts;
 
        ceph_oid_destroy(&rbd_dev->header_oid);
+       ceph_oloc_destroy(&rbd_dev->header_oloc);
 
        rbd_put_client(rbd_dev->rbd_client);
        rbd_spec_put(rbd_dev->spec);
@@ -5336,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        }
        spec->pool_id = (u64)rc;
 
-       /* The ceph file layout needs to fit pool id in 32 bits */
-
-       if (spec->pool_id > (u64)U32_MAX) {
-               rbd_warn(NULL, "pool id too large (%llu > %u)",
-                               (unsigned long long)spec->pool_id, U32_MAX);
-               rc = -EIO;
-               goto err_out_client;
-       }
-
        rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
        if (!rbd_dev) {
                rc = -ENOMEM;
index 1523e05c46fc95b29c47af3b51ebdc9f93af9029..93b1aaa5ba3be26d5de4d0a7b461ecc2fe7beb61 100644 (file)
@@ -391,22 +391,16 @@ static int init_vq(struct virtio_blk *vblk)
                num_vqs = 1;
 
        vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
-       if (!vblk->vqs) {
-               err = -ENOMEM;
-               goto out;
-       }
+       if (!vblk->vqs)
+               return -ENOMEM;
 
        names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
-       if (!names)
-               goto err_names;
-
        callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
-       if (!callbacks)
-               goto err_callbacks;
-
        vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
-       if (!vqs)
-               goto err_vqs;
+       if (!names || !callbacks || !vqs) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        for (i = 0; i < num_vqs; i++) {
                callbacks[i] = virtblk_done;
@@ -417,7 +411,7 @@ static int init_vq(struct virtio_blk *vblk)
        /* Discover virtqueues and write information to configuration.  */
        err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
        if (err)
-               goto err_find_vqs;
+               goto out;
 
        for (i = 0; i < num_vqs; i++) {
                spin_lock_init(&vblk->vqs[i].lock);
@@ -425,16 +419,12 @@ static int init_vq(struct virtio_blk *vblk)
        }
        vblk->num_vqs = num_vqs;
 
- err_find_vqs:
+out:
        kfree(vqs);
- err_vqs:
        kfree(callbacks);
- err_callbacks:
        kfree(names);
- err_names:
        if (err)
                kfree(vblk->vqs);
- out:
        return err;
 }
 
index be4fea6a5dd33695df30f87a1fea5341eadbd709..88ef6d4729b46594b8542fa81467a45763f2f343 100644 (file)
@@ -189,6 +189,8 @@ struct blkfront_info
        struct mutex mutex;
        struct xenbus_device *xbdev;
        struct gendisk *gd;
+       u16 sector_size;
+       unsigned int physical_sector_size;
        int vdevice;
        blkif_vdev_t handle;
        enum blkif_state connected;
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
        .map_queue = blk_mq_map_queue,
 };
 
+static void blkif_set_queue_limits(struct blkfront_info *info)
+{
+       struct request_queue *rq = info->rq;
+       struct gendisk *gd = info->gd;
+       unsigned int segments = info->max_indirect_segments ? :
+                               BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+
+       if (info->feature_discard) {
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+               blk_queue_max_discard_sectors(rq, get_capacity(gd));
+               rq->limits.discard_granularity = info->discard_granularity;
+               rq->limits.discard_alignment = info->discard_alignment;
+               if (info->feature_secdiscard)
+                       queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
+       }
+
+       /* Hard sector size and max sectors impersonate the equiv. hardware. */
+       blk_queue_logical_block_size(rq, info->sector_size);
+       blk_queue_physical_block_size(rq, info->physical_sector_size);
+       blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+
+       /* Each segment in a request is up to an aligned page in size. */
+       blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+       blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+       /* Ensure a merged request will fit in a single I/O ring slot. */
+       blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+
+       /* Make sure buffer addresses are sector-aligned. */
+       blk_queue_dma_alignment(rq, 511);
+
+       /* Make sure we don't use bounce buffers. */
+       blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
+}
+
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
-                               unsigned int physical_sector_size,
-                               unsigned int segments)
+                               unsigned int physical_sector_size)
 {
        struct request_queue *rq;
        struct blkfront_info *info = gd->private_data;
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
        }
 
        rq->queuedata = info;
-       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
-
-       if (info->feature_discard) {
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
-               blk_queue_max_discard_sectors(rq, get_capacity(gd));
-               rq->limits.discard_granularity = info->discard_granularity;
-               rq->limits.discard_alignment = info->discard_alignment;
-               if (info->feature_secdiscard)
-                       queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
-       }
-
-       /* Hard sector size and max sectors impersonate the equiv. hardware. */
-       blk_queue_logical_block_size(rq, sector_size);
-       blk_queue_physical_block_size(rq, physical_sector_size);
-       blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
-
-       /* Each segment in a request is up to an aligned page in size. */
-       blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
-       blk_queue_max_segment_size(rq, PAGE_SIZE);
-
-       /* Ensure a merged request will fit in a single I/O ring slot. */
-       blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
-
-       /* Make sure buffer addresses are sector-aligned. */
-       blk_queue_dma_alignment(rq, 511);
-
-       /* Make sure we don't use bounce buffers. */
-       blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
-
-       gd->queue = rq;
+       info->rq = gd->queue = rq;
+       info->gd = gd;
+       info->sector_size = sector_size;
+       info->physical_sector_size = physical_sector_size;
+       blkif_set_queue_limits(info);
 
        return 0;
 }
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        gd->private_data = info;
        set_capacity(gd, capacity);
 
-       if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
-                                info->max_indirect_segments ? :
-                                BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+       if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
                del_gendisk(gd);
                goto release;
        }
 
-       info->rq = gd->queue;
-       info->gd = gd;
-
        xlvbd_flush(info);
 
        if (vdisk_info & VDISK_READONLY)
@@ -1315,7 +1323,7 @@ free_shadow:
                        rinfo->ring_ref[i] = GRANT_INVALID_REF;
                }
        }
-       free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
+       free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
        rinfo->ring.sring = NULL;
 
        if (rinfo->irq)
@@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info)
        struct split_bio *split_bio;
 
        blkfront_gather_backend_features(info);
+       /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
+       blkif_set_queue_limits(info);
        segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
-       blk_queue_max_segments(info->rq, segs);
+       blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
 
        for (r_index = 0; r_index < info->nr_rings; r_index++) {
                struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
@@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
        if (err) {
                xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
                                 info->xbdev->otherend);
-               return;
+               goto fail;
        }
 
        xenbus_switch_state(info->xbdev, XenbusStateConnected);
@@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info)
        device_add_disk(&info->xbdev->dev, info->gd);
 
        info->is_ready = 1;
+       return;
+
+fail:
+       blkif_free(info, 0);
+       return;
 }
 
 /**
index 5755907f836f8d7a6aa5c892e8eef542e6e9c678..ffa7c9dcbd7a1be41ea07e4bf7e976fb494f65d6 100644 (file)
@@ -551,7 +551,7 @@ static struct attribute *cci5xx_pmu_event_attrs[] = {
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
-       CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
+       CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
        NULL
 };
index 97a9185af433f052f315ae99e12f41232a1b1a3d..884c0305e29033085ff4131434f29f07bd5d78ae 100644 (file)
@@ -187,6 +187,7 @@ struct arm_ccn {
        struct arm_ccn_component *xp;
 
        struct arm_ccn_dt dt;
+       int mn_id;
 };
 
 static DEFINE_MUTEX(arm_ccn_mutex);
@@ -212,6 +213,7 @@ static int arm_ccn_node_to_xp_port(int node)
 #define CCN_CONFIG_TYPE(_config)       (((_config) >> 8) & 0xff)
 #define CCN_CONFIG_EVENT(_config)      (((_config) >> 16) & 0xff)
 #define CCN_CONFIG_PORT(_config)       (((_config) >> 24) & 0x3)
+#define CCN_CONFIG_BUS(_config)                (((_config) >> 24) & 0x3)
 #define CCN_CONFIG_VC(_config)         (((_config) >> 26) & 0x7)
 #define CCN_CONFIG_DIR(_config)                (((_config) >> 29) & 0x1)
 #define CCN_CONFIG_MASK(_config)       (((_config) >> 30) & 0xf)
@@ -241,6 +243,7 @@ static CCN_FORMAT_ATTR(xp, "config:0-7");
 static CCN_FORMAT_ATTR(type, "config:8-15");
 static CCN_FORMAT_ATTR(event, "config:16-23");
 static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(bus, "config:24-25");
 static CCN_FORMAT_ATTR(vc, "config:26-28");
 static CCN_FORMAT_ATTR(dir, "config:29-29");
 static CCN_FORMAT_ATTR(mask, "config:30-33");
@@ -253,6 +256,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
        &arm_ccn_pmu_format_attr_type.attr.attr,
        &arm_ccn_pmu_format_attr_event.attr.attr,
        &arm_ccn_pmu_format_attr_port.attr.attr,
+       &arm_ccn_pmu_format_attr_bus.attr.attr,
        &arm_ccn_pmu_format_attr_vc.attr.attr,
        &arm_ccn_pmu_format_attr_dir.attr.attr,
        &arm_ccn_pmu_format_attr_mask.attr.attr,
@@ -328,6 +332,7 @@ struct arm_ccn_pmu_event {
 static ssize_t arm_ccn_pmu_event_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
+       struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
        struct arm_ccn_pmu_event *event = container_of(attr,
                        struct arm_ccn_pmu_event, attr);
        ssize_t res;
@@ -349,10 +354,17 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
                break;
        case CCN_TYPE_XP:
                res += snprintf(buf + res, PAGE_SIZE - res,
-                               ",xp=?,port=?,vc=?,dir=?");
+                               ",xp=?,vc=?");
                if (event->event == CCN_EVENT_WATCHPOINT)
                        res += snprintf(buf + res, PAGE_SIZE - res,
-                                       ",cmp_l=?,cmp_h=?,mask=?");
+                                       ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
+               else
+                       res += snprintf(buf + res, PAGE_SIZE - res,
+                                       ",bus=?");
+
+               break;
+       case CCN_TYPE_MN:
+               res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
                break;
        default:
                res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
@@ -383,9 +395,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
 }
 
 static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
-       CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
-       CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
-       CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
        CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
        CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
        CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
@@ -733,9 +745,10 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
 
        if (has_branch_stack(event) || event->attr.exclude_user ||
                        event->attr.exclude_kernel || event->attr.exclude_hv ||
-                       event->attr.exclude_idle) {
+                       event->attr.exclude_idle || event->attr.exclude_host ||
+                       event->attr.exclude_guest) {
                dev_warn(ccn->dev, "Can't exclude execution levels!\n");
-               return -EOPNOTSUPP;
+               return -EINVAL;
        }
 
        if (event->cpu < 0) {
@@ -759,6 +772,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
 
        /* Validate node/xp vs topology */
        switch (type) {
+       case CCN_TYPE_MN:
+               if (node_xp != ccn->mn_id) {
+                       dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
+                       return -EINVAL;
+               }
+               break;
        case CCN_TYPE_XP:
                if (node_xp >= ccn->num_xps) {
                        dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
@@ -886,6 +905,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
        struct arm_ccn_component *xp;
        u32 val, dt_cfg;
 
+       /* Nothing to do for cycle counter */
+       if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+               return;
+
        if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
                xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
        else
@@ -917,38 +940,17 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
                        arm_ccn_pmu_read_counter(ccn, hw->idx));
        hw->state = 0;
 
-       /*
-        * Pin the timer, so that the overflows are handled by the chosen
-        * event->cpu (this is the same one as presented in "cpumask"
-        * attribute).
-        */
-       if (!ccn->irq)
-               hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
-                               HRTIMER_MODE_REL_PINNED);
-
        /* Set the DT bus input, engaging the counter */
        arm_ccn_pmu_xp_dt_config(event, 1);
 }
 
 static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
 {
-       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
        struct hw_perf_event *hw = &event->hw;
-       u64 timeout;
 
        /* Disable counting, setting the DT bus to pass-through mode */
        arm_ccn_pmu_xp_dt_config(event, 0);
 
-       if (!ccn->irq)
-               hrtimer_cancel(&ccn->dt.hrtimer);
-
-       /* Let the DT bus drain */
-       timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
-                       ccn->num_xps;
-       while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
-                       timeout)
-               cpu_relax();
-
        if (flags & PERF_EF_UPDATE)
                arm_ccn_pmu_event_update(event);
 
@@ -988,7 +990,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
 
        /* Comparison values */
        writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
-       writel((cmp_l >> 32) & 0xefffffff,
+       writel((cmp_l >> 32) & 0x7fffffff,
                        source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
        writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
        writel((cmp_h >> 32) & 0x0fffffff,
@@ -996,7 +998,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
 
        /* Mask */
        writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
-       writel((mask_l >> 32) & 0xefffffff,
+       writel((mask_l >> 32) & 0x7fffffff,
                        source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
        writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
        writel((mask_h >> 32) & 0x0fffffff,
@@ -1014,7 +1016,7 @@ static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
        hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
 
        id = (CCN_CONFIG_VC(event->attr.config) << 4) |
-                       (CCN_CONFIG_PORT(event->attr.config) << 3) |
+                       (CCN_CONFIG_BUS(event->attr.config) << 3) |
                        (CCN_CONFIG_EVENT(event->attr.config) << 0);
 
        val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
@@ -1099,15 +1101,31 @@ static void arm_ccn_pmu_event_config(struct perf_event *event)
        spin_unlock(&ccn->dt.config_lock);
 }
 
+static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
+{
+       return bitmap_weight(ccn->dt.pmu_counters_mask,
+                            CCN_NUM_PMU_EVENT_COUNTERS + 1);
+}
+
 static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
 {
        int err;
        struct hw_perf_event *hw = &event->hw;
+       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 
        err = arm_ccn_pmu_event_alloc(event);
        if (err)
                return err;
 
+       /*
+        * Pin the timer, so that the overflows are handled by the chosen
+        * event->cpu (this is the same one as presented in "cpumask"
+        * attribute).
+        */
+       if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
+               hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+                             HRTIMER_MODE_REL_PINNED);
+
        arm_ccn_pmu_event_config(event);
 
        hw->state = PERF_HES_STOPPED;
@@ -1120,9 +1138,14 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
 
 static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
 {
+       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+
        arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
 
        arm_ccn_pmu_event_release(event);
+
+       if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
+               hrtimer_cancel(&ccn->dt.hrtimer);
 }
 
 static void arm_ccn_pmu_event_read(struct perf_event *event)
@@ -1130,6 +1153,24 @@ static void arm_ccn_pmu_event_read(struct perf_event *event)
        arm_ccn_pmu_event_update(event);
 }
 
+static void arm_ccn_pmu_enable(struct pmu *pmu)
+{
+       struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+       u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+       val |= CCN_DT_PMCR__PMU_EN;
+       writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
+static void arm_ccn_pmu_disable(struct pmu *pmu)
+{
+       struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+       u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+       val &= ~CCN_DT_PMCR__PMU_EN;
+       writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
 static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
 {
        u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
@@ -1252,6 +1293,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
                .start = arm_ccn_pmu_event_start,
                .stop = arm_ccn_pmu_event_stop,
                .read = arm_ccn_pmu_event_read,
+               .pmu_enable = arm_ccn_pmu_enable,
+               .pmu_disable = arm_ccn_pmu_disable,
        };
 
        /* No overflow interrupt? Have to use a timer instead. */
@@ -1361,6 +1404,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
 
        switch (type) {
        case CCN_TYPE_MN:
+               ccn->mn_id = id;
+               return 0;
        case CCN_TYPE_DT:
                return 0;
        case CCN_TYPE_XP:
@@ -1471,8 +1516,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
                /* Can set 'disable' bits, so can acknowledge interrupts */
                writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
                                ccn->base + CCN_MN_ERRINT_STATUS);
-               err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0,
-                               dev_name(ccn->dev), ccn);
+               err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
+                                      IRQF_NOBALANCING | IRQF_NO_THREAD,
+                                      dev_name(ccn->dev), ccn);
                if (err)
                        return err;
 
index c3cb76b363c63c54d67343dacf4c9ec20032a95f..9efdf1de4035e226825c8435f72f07e8055584d2 100644 (file)
@@ -178,6 +178,7 @@ static int vexpress_config_populate(struct device_node *node)
 
        parent = class_find_device(vexpress_config_class, NULL, bridge,
                        vexpress_config_node_match);
+       of_node_put(bridge);
        if (WARN_ON(!parent))
                return -ENODEV;
 
index 44311296ec021350c7a9598ee32010e17463da3e..0f7d28a98b9a0ad926a99d82221b5cff4b031508 100644 (file)
@@ -845,6 +845,8 @@ void intel_gtt_insert_page(dma_addr_t addr,
                           unsigned int flags)
 {
        intel_private.driver->write_entry(addr, pg, flags);
+       if (intel_private.driver->chipset_flush)
+               intel_private.driver->chipset_flush();
 }
 EXPORT_SYMBOL(intel_gtt_insert_page);
 
index 56ad5a5936a9a3fb17f6480dceb7acd37e3c8be6..8c0770bf8881351c1d956d310c5d670684090770 100644 (file)
@@ -244,7 +244,7 @@ config HW_RANDOM_TX4939
 
 config HW_RANDOM_MXC_RNGA
        tristate "Freescale i.MX RNGA Random Number Generator"
-       depends on ARCH_HAS_RNGA
+       depends on SOC_IMX31
        default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
index 08c7e23ed535bd665d3a9a6db68293d2ae93860b..0c75c3f1689fc2a053daeaab7f6e862f451c331b 100644 (file)
@@ -957,7 +957,7 @@ int tpm2_auto_startup(struct tpm_chip *chip)
                goto out;
 
        rc = tpm2_do_selftest(chip);
-       if (rc != TPM2_RC_INITIALIZE) {
+       if (rc != 0 && rc != TPM2_RC_INITIALIZE) {
                dev_err(&chip->dev, "TPM self test failed\n");
                goto out;
        }
@@ -974,7 +974,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
                }
        }
 
-       return rc;
 out:
        if (rc > 0)
                rc = -ENODEV;
index d2406fe2553396a807a169ddd93b656a4876a537..5da47e26a0124262734357853eb8290d6e817712 100644 (file)
@@ -165,6 +165,12 @@ struct ports_device {
         */
        struct virtqueue *c_ivq, *c_ovq;
 
+       /*
+        * A control packet buffer for guest->host requests, protected
+        * by c_ovq_lock.
+        */
+       struct virtio_console_control cpkt;
+
        /* Array of per-port IO virtqueues */
        struct virtqueue **in_vqs, **out_vqs;
 
@@ -560,28 +566,29 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
                                  unsigned int event, unsigned int value)
 {
        struct scatterlist sg[1];
-       struct virtio_console_control cpkt;
        struct virtqueue *vq;
        unsigned int len;
 
        if (!use_multiport(portdev))
                return 0;
 
-       cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
-       cpkt.event = cpu_to_virtio16(portdev->vdev, event);
-       cpkt.value = cpu_to_virtio16(portdev->vdev, value);
-
        vq = portdev->c_ovq;
 
-       sg_init_one(sg, &cpkt, sizeof(cpkt));
-
        spin_lock(&portdev->c_ovq_lock);
-       if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
+
+       portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
+       portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
+       portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
+
+       sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
+
+       if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
                virtqueue_kick(vq);
                while (!virtqueue_get_buf(vq, &len)
                        && !virtqueue_is_broken(vq))
                        cpu_relax();
        }
+
        spin_unlock(&portdev->c_ovq_lock);
        return 0;
 }
index d359c92e13a65c00725f3a1178a1096babc0e226..e38bf60c0ff4e28cfd0005ba888f3280962619a7 100644 (file)
@@ -69,6 +69,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
        DEF_FIXED(".s1",        CLK_S1,            CLK_PLL1_DIV2,  3, 1),
        DEF_FIXED(".s2",        CLK_S2,            CLK_PLL1_DIV2,  4, 1),
        DEF_FIXED(".s3",        CLK_S3,            CLK_PLL1_DIV2,  6, 1),
+       DEF_FIXED(".sdsrc",     CLK_SDSRC,         CLK_PLL1_DIV2,  2, 1),
 
        /* Core Clock Outputs */
        DEF_FIXED("ztr",        R8A7795_CLK_ZTR,   CLK_PLL1_DIV2,  6, 1),
@@ -87,10 +88,10 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
        DEF_FIXED("s3d2",       R8A7795_CLK_S3D2,  CLK_S3,         2, 1),
        DEF_FIXED("s3d4",       R8A7795_CLK_S3D4,  CLK_S3,         4, 1),
 
-       DEF_GEN3_SD("sd0",      R8A7795_CLK_SD0,   CLK_PLL1_DIV2, 0x0074),
-       DEF_GEN3_SD("sd1",      R8A7795_CLK_SD1,   CLK_PLL1_DIV2, 0x0078),
-       DEF_GEN3_SD("sd2",      R8A7795_CLK_SD2,   CLK_PLL1_DIV2, 0x0268),
-       DEF_GEN3_SD("sd3",      R8A7795_CLK_SD3,   CLK_PLL1_DIV2, 0x026c),
+       DEF_GEN3_SD("sd0",      R8A7795_CLK_SD0,   CLK_SDSRC,     0x0074),
+       DEF_GEN3_SD("sd1",      R8A7795_CLK_SD1,   CLK_SDSRC,     0x0078),
+       DEF_GEN3_SD("sd2",      R8A7795_CLK_SD2,   CLK_SDSRC,     0x0268),
+       DEF_GEN3_SD("sd3",      R8A7795_CLK_SD3,   CLK_SDSRC,     0x026c),
 
        DEF_FIXED("cl",         R8A7795_CLK_CL,    CLK_PLL1_DIV2, 48, 1),
        DEF_FIXED("cp",         R8A7795_CLK_CP,    CLK_EXTAL,      2, 1),
index c109d80e7a8a96340f187a81b3699ae115928a71..cdfabeb9a034c5fec840dc3c952a385b36536df5 100644 (file)
@@ -833,9 +833,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
 
        /* perihp */
        GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
-                       RK3399_CLKGATE_CON(5), 0, GFLAGS),
-       GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(5), 1, GFLAGS),
+       GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+                       RK3399_CLKGATE_CON(5), 0, GFLAGS),
        COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
                        RK3399_CLKGATE_CON(5), 2, GFLAGS),
@@ -923,9 +923,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
                        RK3399_CLKGATE_CON(6), 14, GFLAGS),
 
        GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED,
-                       RK3399_CLKGATE_CON(6), 12, GFLAGS),
-       GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(6), 13, GFLAGS),
+       GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
+                       RK3399_CLKGATE_CON(6), 12, GFLAGS),
        COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS),
        GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED,
@@ -1071,7 +1071,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
        /* vio */
        COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
-                       RK3399_CLKGATE_CON(11), 10, GFLAGS),
+                       RK3399_CLKGATE_CON(11), 0, GFLAGS),
        COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0,
                        RK3399_CLKSEL_CON(43), 0, 5, DFLAGS,
                        RK3399_CLKGATE_CON(11), 1, GFLAGS),
@@ -1484,6 +1484,7 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
        "hclk_perilp1",
        "hclk_perilp1_noc",
        "aclk_dmac0_perilp",
+       "aclk_emmc_noc",
        "gpll_hclk_perilp1_src",
        "gpll_aclk_perilp0_src",
        "gpll_aclk_perihp_src",
index 9af359544110b59fd61dca76e69a7b922107fb15..267f99523fbe176d890e17a43a19c84abe733fef 100644 (file)
@@ -783,14 +783,14 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
        [RST_BUS_I2S1]          =  { 0x2d0, BIT(13) },
        [RST_BUS_I2S2]          =  { 0x2d0, BIT(14) },
 
-       [RST_BUS_I2C0]          =  { 0x2d4, BIT(0) },
-       [RST_BUS_I2C1]          =  { 0x2d4, BIT(1) },
-       [RST_BUS_I2C2]          =  { 0x2d4, BIT(2) },
-       [RST_BUS_UART0]         =  { 0x2d4, BIT(16) },
-       [RST_BUS_UART1]         =  { 0x2d4, BIT(17) },
-       [RST_BUS_UART2]         =  { 0x2d4, BIT(18) },
-       [RST_BUS_UART3]         =  { 0x2d4, BIT(19) },
-       [RST_BUS_SCR]           =  { 0x2d4, BIT(20) },
+       [RST_BUS_I2C0]          =  { 0x2d8, BIT(0) },
+       [RST_BUS_I2C1]          =  { 0x2d8, BIT(1) },
+       [RST_BUS_I2C2]          =  { 0x2d8, BIT(2) },
+       [RST_BUS_UART0]         =  { 0x2d8, BIT(16) },
+       [RST_BUS_UART1]         =  { 0x2d8, BIT(17) },
+       [RST_BUS_UART2]         =  { 0x2d8, BIT(18) },
+       [RST_BUS_UART3]         =  { 0x2d8, BIT(19) },
+       [RST_BUS_SCR]           =  { 0x2d8, BIT(20) },
 };
 
 static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
index fc17b5295e1694629a2c48d5647ea03f8879433f..51d4bac97ab301f9d0213ff40327325622fd2109 100644 (file)
@@ -31,7 +31,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
                return;
 
        WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg,
-                                          !(reg & lock), 100, 70000));
+                                          reg & lock, 100, 70000));
 }
 
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
index 4470ffc8cf0d3f2cd68e0587eb2f873e87dac008..d6fafb397489caaebee47a7c086bdccda7c1244f 100644 (file)
@@ -14,9 +14,9 @@
 #include "ccu_gate.h"
 #include "ccu_nk.h"
 
-void ccu_nk_find_best(unsigned long parent, unsigned long rate,
-                     unsigned int max_n, unsigned int max_k,
-                     unsigned int *n, unsigned int *k)
+static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
+                            unsigned int max_n, unsigned int max_k,
+                            unsigned int *n, unsigned int *k)
 {
        unsigned long best_rate = 0;
        unsigned int best_k = 0, best_n = 0;
index 0ee1f363e4be154749d4b37a07e215dbd705d76e..d8eab90ae661c245507fc31d11139ff23ff28745 100644 (file)
@@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
                                          SUN4I_PLL2_PRE_DIV_WIDTH,
                                          CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
                                          &sun4i_a10_pll2_lock);
-       if (!prediv_clk) {
+       if (IS_ERR(prediv_clk)) {
                pr_err("Couldn't register the prediv clock\n");
                goto err_free_array;
        }
@@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
                                          &mult->hw, &clk_multiplier_ops,
                                          &gate->hw, &clk_gate_ops,
                                          CLK_SET_RATE_PARENT);
-       if (!base_clk) {
+       if (IS_ERR(base_clk)) {
                pr_err("Couldn't register the base multiplier clock\n");
                goto err_free_multiplier;
        }
index 411d3033a96e9a729fd9b289cf390344a17f415f..b200ebf159eec42b3c46970c318fef053cfa468e 100644 (file)
@@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node)
                return;
 
        reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!reg) {
+       if (IS_ERR(reg)) {
                pr_err("Could not get registers for sun8i-mbus-clk\n");
                goto err_free_parents;
        }
index 64da7b79a6e47fd4acf9098f1ff6c17ad07741da..933b5dd698b8cc86ddc924fd65aee483b53a2aec 100644 (file)
@@ -428,7 +428,7 @@ static struct tegra_clk_pll_params pll_d_params = {
        .div_nmp = &pllp_nmp,
        .freq_table = pll_d_freq_table,
        .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
-                TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+                TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
 static struct tegra_clk_pll_params pll_d2_params = {
@@ -446,7 +446,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
        .div_nmp = &pllp_nmp,
        .freq_table = pll_d_freq_table,
        .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
-                TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+                TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
 static const struct pdiv_map pllu_p[] = {
index 28bce3f4f81d6aa56840657169c0842461f9f515..57700541f95129e6f8f194ede23afeeda35842da 100644 (file)
@@ -8,6 +8,9 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
+#define pr_fmt(fmt)    "arm_arch_timer: " fmt
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
@@ -370,16 +373,33 @@ static bool arch_timer_has_nonsecure_ppi(void)
                arch_timer_ppi[PHYS_NONSECURE_PPI]);
 }
 
+static u32 check_ppi_trigger(int irq)
+{
+       u32 flags = irq_get_trigger_type(irq);
+
+       if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
+               pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
+               pr_warn("WARNING: Please fix your firmware\n");
+               flags = IRQF_TRIGGER_LOW;
+       }
+
+       return flags;
+}
+
 static int arch_timer_starting_cpu(unsigned int cpu)
 {
        struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
+       u32 flags;
 
        __arch_timer_setup(ARCH_CP15_TIMER, clk);
 
-       enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
+       flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
+       enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
 
-       if (arch_timer_has_nonsecure_ppi())
-               enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
+       if (arch_timer_has_nonsecure_ppi()) {
+               flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
+               enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
+       }
 
        arch_counter_set_user_access();
        if (evtstrm_enable)
index 7e3fd375a6278f17d26fc7b584bb98fa70459a02..92f6e4deee74a00499f0c51ede613818086adca7 100644 (file)
@@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base)
 
 }
 
-static void
+static int
 kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
 {
-       int loop_limit = 4;
+       int loop_limit = 3;
 
        /*
         * Read 64-bit free running counter
@@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
         *      if new hi-word is equal to previously read hi-word then stop.
         */
 
-       while (--loop_limit) {
+       do {
                *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
                *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
                if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
                        break;
-       }
+       } while (--loop_limit);
        if (!loop_limit) {
                pr_err("bcm_kona_timer: getting counter failed.\n");
                pr_err(" Timer will be impacted\n");
+               return -ETIMEDOUT;
        }
 
-       return;
+       return 0;
 }
 
 static int kona_timer_set_next_event(unsigned long clc,
@@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc,
 
        uint32_t lsw, msw;
        uint32_t reg;
+       int ret;
 
-       kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+       ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+       if (ret)
+               return ret;
 
        /* Load the "next" event tick value */
        writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
index d91e8725917c615c8e3390d356dd9e3d133a666e..b4b3ab5a11ad006cd0bf2bd4db50627057ea27c8 100644 (file)
@@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency)
        gic_start_count();
 }
 
-static void __init gic_clocksource_of_init(struct device_node *node)
+static int __init gic_clocksource_of_init(struct device_node *node)
 {
        struct clk *clk;
        int ret;
index 937e10b84d5850311d878988780fb7aefee069e6..3e1cb512f3ce962a81b32b5c5ea7ddc1ea5d1b0b 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/of_irq.h>
 #include <linux/sched_clock.h>
 
+#include <clocksource/pxa.h>
+
 #include <asm/div64.h>
 
 #define OSMR0          0x00    /* OS Timer 0 Match Register */
index 97669ee4df2a6625f9e69b83d6d479898aee8515..c83452cacb41223638e6075e206345c11f655240 100644 (file)
@@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = {
        .set_next_event = sun4i_clkevt_next_event,
 };
 
+static void sun4i_timer_clear_interrupt(void)
+{
+       writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
+}
 
 static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = (struct clock_event_device *)dev_id;
 
-       writel(0x1, timer_base + TIMER_IRQ_ST_REG);
+       sun4i_timer_clear_interrupt();
        evt->event_handler(evt);
 
        return IRQ_HANDLED;
@@ -208,6 +212,9 @@ static int __init sun4i_timer_init(struct device_node *node)
        /* Make sure timer is stopped before playing with interrupts */
        sun4i_clkevt_time_stop(0);
 
+       /* clear timer0 interrupt */
+       sun4i_timer_clear_interrupt();
+
        sun4i_clockevent.cpumask = cpu_possible_mask;
        sun4i_clockevent.irq = irq;
 
index 719b478d136e7dbc8be409847c786a17df4cd971..3c39e6f459714e5b725cfdd10663fb04dd62063b 100644 (file)
@@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np)
        struct clk *clk = of_clk_get_by_name(np, "fixed");
        int ret;
 
-       clk = of_clk_get(np, 0);
        if (IS_ERR(clk)) {
                pr_err("Failed to get clock");
                return PTR_ERR(clk);
index a7d9a08e4b0e31b24640d4e188746aa25139bc1b..a8e6c7df853d2c64914c04af5785f61a51d56880 100644 (file)
@@ -202,10 +202,10 @@ static int __init pistachio_clksrc_of_init(struct device_node *node)
        rate = clk_get_rate(fast_clk);
 
        /* Disable irq's for clocksource usage */
-       gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
-       gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
-       gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
-       gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+       gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+       gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+       gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+       gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
 
        /* Enable timer block */
        writel(TIMER_ME_GLOBAL, pcs_gpt.base);
index 1ffac0cb0cb78496d684e987d7b17f3b99a28775..7f0f5b26d8c5ddc5a953d6f12a4b75fb23058b27 100644 (file)
@@ -240,6 +240,7 @@ static int __init at91sam926x_pit_common_init(struct pit_data *data)
 static int __init at91sam926x_pit_dt_init(struct device_node *node)
 {
        struct pit_data *data;
+       int ret;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
@@ -261,6 +262,12 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
                return PTR_ERR(data->mck);
        }
 
+       ret = clk_prepare_enable(data->mck);
+       if (ret) {
+               pr_err("Unable to enable mck\n");
+               return ret;
+       }
+
        /* Get the interrupts property */
        data->irq = irq_of_parse_and_map(node, 0);
        if (!data->irq) {
index 0bb44d5b5df49d385b5bcfd43297b750417461a7..2ee40fd360caaa1ffd3a60700b1bc31a3a533322 100644 (file)
@@ -74,6 +74,8 @@ static const struct of_device_id machines[] __initconst = {
        { .compatible = "ti,omap5", },
 
        { .compatible = "xlnx,zynq-7000", },
+
+       { }
 };
 
 static int __init cpufreq_dt_platdev_init(void)
index 87796e0864e945e41725be06106234ca8a32a133..d3ffde8066298ff48d64c990eefe1dec0bf34cb6 100644 (file)
@@ -145,11 +145,30 @@ static struct powernv_pstate_info {
 /* Use following macros for conversions between pstate_id and index */
 static inline int idx_to_pstate(unsigned int i)
 {
+       if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
+               pr_warn_once("index %u is out of bound\n", i);
+               return powernv_freqs[powernv_pstate_info.nominal].driver_data;
+       }
+
        return powernv_freqs[i].driver_data;
 }
 
 static inline unsigned int pstate_to_idx(int pstate)
 {
+       int min = powernv_freqs[powernv_pstate_info.min].driver_data;
+       int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+
+       if (min > 0) {
+               if (unlikely((pstate < max) || (pstate > min))) {
+                       pr_warn_once("pstate %d is out of bound\n", pstate);
+                       return powernv_pstate_info.nominal;
+               }
+       } else {
+               if (unlikely((pstate > max) || (pstate < min))) {
+                       pr_warn_once("pstate %d is out of bound\n", pstate);
+                       return powernv_pstate_info.nominal;
+               }
+       }
        /*
         * abs() is deliberately used so that is works with
         * both monotonically increasing and decreasing
@@ -593,7 +612,7 @@ void gpstate_timer_handler(unsigned long data)
        } else {
                gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
                                                 gpstates->highest_lpstate_idx,
-                                                freq_data.pstate_id);
+                                                gpstates->last_lpstate_idx);
        }
 
        /*
index ea8189f4b0212cc038f5f4363cf10f5e8a54099a..b3044219772cd7ac57e0bf2559eb7dea8caeca08 100644 (file)
@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                               OP_ALG_AAI_CTR_MOD128);
        const bool is_rfc3686 = alg->caam.rfc3686;
 
+       if (!ctx->authsize)
+               return 0;
+
        /* NULL encryption / decryption */
        if (!ctx->enckeylen)
                return aead_null_set_sh_desc(aead);
@@ -553,7 +556,10 @@ skip_enc:
 
        /* Read and write assoclen bytes */
        append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+       if (alg->caam.geniv)
+               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+       else
+               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
        /* Skip assoc data */
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -562,6 +568,14 @@ skip_enc:
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
                             KEY_VLF);
 
+       if (alg->caam.geniv) {
+               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+                               LDST_SRCDST_BYTE_CONTEXT |
+                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
+               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+       }
+
        /* Load Counter into CONTEXT1 reg */
        if (is_rfc3686)
                append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -614,7 +628,7 @@ skip_enc:
                keys_fit_inline = true;
 
        /* aead_givencrypt shared descriptor */
-       desc = ctx->sh_desc_givenc;
+       desc = ctx->sh_desc_enc;
 
        /* Note: Context registers are saved. */
        init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
@@ -645,13 +659,13 @@ copy_iv:
        append_operation(desc, ctx->class2_alg_type |
                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
-       /* ivsize + cryptlen = seqoutlen - authsize */
-       append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
        /* Read and write assoclen bytes */
        append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
        append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
+       /* ivsize + cryptlen = seqoutlen - authsize */
+       append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
        /* Skip assoc data */
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
@@ -697,7 +711,7 @@ copy_iv:
        ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
                                              desc_bytes(desc),
                                              DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
                dev_err(jrdev, "unable to map shared descriptor\n");
                return -ENOMEM;
        }
@@ -2147,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
 
        init_aead_job(req, edesc, all_contig, encrypt);
 
-       if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+       if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
                append_load_as_imm(desc, req->iv, ivsize,
                                   LDST_CLASS_1_CCB |
                                   LDST_SRCDST_BYTE_CONTEXT |
@@ -2534,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
        return ret;
 }
 
-static int aead_givdecrypt(struct aead_request *req)
-{
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       unsigned int ivsize = crypto_aead_ivsize(aead);
-
-       if (req->cryptlen < ivsize)
-               return -EINVAL;
-
-       req->cryptlen -= ivsize;
-       req->assoclen += ivsize;
-
-       return aead_decrypt(req);
-}
-
 /*
  * allocate and map the ablkcipher extended descriptor for ablkcipher
  */
@@ -3207,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3253,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3299,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3345,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3391,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -3437,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -3483,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3531,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3579,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3627,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3675,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -3723,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -3769,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3815,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3861,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3907,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3953,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -3999,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -4048,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -4099,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -4150,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -4201,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -4252,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -4303,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
index f1ecc8df8d41e40dff086bd0382f98f364ad4bb1..36365b3efdfdce5ffb6bc19a8b72fe31a3a463b3 100644 (file)
@@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template,
                         template->name);
                snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
                         template->driver_name);
+               t_alg->ahash_alg.setkey = NULL;
        }
        alg->cra_module = THIS_MODULE;
        alg->cra_init = caam_hash_cra_init;
index 769148dbaeb34689e20f44fba4ab5c788648a5a3..20f35df8a01fafefbb6f5da84903fa4a2eefc3a7 100644 (file)
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { {
                        .setkey = qat_alg_ablkcipher_xts_setkey,
                        .decrypt = qat_alg_ablkcipher_decrypt,
                        .encrypt = qat_alg_ablkcipher_encrypt,
-                       .min_keysize = AES_MIN_KEY_SIZE,
-                       .max_keysize = AES_MAX_KEY_SIZE,
+                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
+                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
                        .ivsize = AES_BLOCK_SIZE,
                },
        },
index cfb25413917c380e997d497fe82343cab03a88fc..24353ec336c5bc815e499fb1a38b08d21ec12ebd 100644 (file)
@@ -129,8 +129,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
 
-               iv = (u8 *)walk.iv;
                ret = blkcipher_walk_virt(desc, &walk);
+               iv = walk.iv;
                memset(tweak, 0, AES_BLOCK_SIZE);
                aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
 
index 803f3953b341a42aa47adcc4f8405f5dfa501a06..29f600f2c447159e84b17bc43bdcd66ca8bbcc7b 100644 (file)
@@ -459,7 +459,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
        }
 
        pgoff = linear_page_index(vma, pmd_addr);
-       phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
+       phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
        if (phys == -1) {
                dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
                                pgoff);
index dfb168568af1a6d2ba163a7010d611ec0ad3a3f0..1f01e98c83c7ddcccecd50e11671e3b519b2c136 100644 (file)
@@ -116,6 +116,9 @@ static int dax_pmem_probe(struct device *dev)
        if (rc)
                return rc;
 
+       /* adjust the dax_region resource to the start of data */
+       res.start += le64_to_cpu(pfn_sb->dataoff);
+
        nd_region = to_nd_region(dev->parent);
        dax_region = alloc_dax_region(dev, nd_region->id, &res,
                        le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
index ddaee60ae52a5595063c4fac1389eef10ffcb3c0..cf04d249a6a43b26b5168c3a7a55d54eff4ffcde 100644 (file)
@@ -586,6 +586,22 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
+static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+                                     enum dma_data_direction direction)
+{
+       bool write = (direction == DMA_BIDIRECTIONAL ||
+                     direction == DMA_TO_DEVICE);
+       struct reservation_object *resv = dmabuf->resv;
+       long ret;
+
+       /* Wait on any implicit rendering fences */
+       ret = reservation_object_wait_timeout_rcu(resv, write, true,
+                                                 MAX_SCHEDULE_TIMEOUT);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
 
 /**
  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
@@ -608,6 +624,13 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
        if (dmabuf->ops->begin_cpu_access)
                ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
+       /* Ensure that all fences are waited upon - but we first allow
+        * the native handler the chance to do so more efficiently if it
+        * chooses. A double invocation here will be reasonably cheap no-op.
+        */
+       if (ret == 0)
+               ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
index a8731c853da6a3551f405db83c2a9469fc235be3..f1989fcaf3540bc037fa8330dda396375c37194d 100644 (file)
@@ -99,6 +99,7 @@ const struct fence_ops fence_array_ops = {
        .wait = fence_default_wait,
        .release = fence_array_release,
 };
+EXPORT_SYMBOL(fence_array_ops);
 
 /**
  * fence_array_create - Create a custom fence array
@@ -106,14 +107,14 @@ const struct fence_ops fence_array_ops = {
  * @fences:            [in]    array containing the fences
  * @context:           [in]    fence context to use
  * @seqno:             [in]    sequence number to use
- * @signal_on_any      [in]    signal on any fence in the array
+ * @signal_on_any:     [in]    signal on any fence in the array
  *
  * Allocate a fence_array object and initialize the base fence with fence_init().
  * In case of error it returns NULL.
  *
- * The caller should allocte the fences array with num_fences size
+ * The caller should allocate the fences array with num_fences size
  * and fill it with the fences it wants to add to the object. Ownership of this
- * array is take and fence_put() is used on each fence on release.
+ * array is taken and fence_put() is used on each fence on release.
  *
  * If @signal_on_any is true the fence array signals if any fence in the array
  * signals, otherwise it signals when all fences in the array signal.
index 9566a62ad8e3186e719751df84aa35f156dae2b2..723d8af988e5146ba247ce38658fe03f8b057923 100644 (file)
@@ -205,7 +205,7 @@ done:
  * @fence: the shared fence to add
  *
  * Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared_fence has been called.
+ * reservation_object_reserve_shared() has been called.
  */
 void reservation_object_add_shared_fence(struct reservation_object *obj,
                                         struct fence *fence)
index 9aaa608dfe0111515e01b59227a7ca87d72ab61b..b29a9e817320a844ce0a0f8a89711266a17211a0 100644 (file)
 
 static const struct file_operations sync_file_fops;
 
-static struct sync_file *sync_file_alloc(int size)
+static struct sync_file *sync_file_alloc(void)
 {
        struct sync_file *sync_file;
 
-       sync_file = kzalloc(size, GFP_KERNEL);
+       sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
        if (!sync_file)
                return NULL;
 
@@ -45,6 +45,8 @@ static struct sync_file *sync_file_alloc(int size)
 
        init_waitqueue_head(&sync_file->wq);
 
+       INIT_LIST_HEAD(&sync_file->cb.node);
+
        return sync_file;
 
 err:
@@ -54,14 +56,11 @@ err:
 
 static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
 {
-       struct sync_file_cb *check;
        struct sync_file *sync_file;
 
-       check = container_of(cb, struct sync_file_cb, cb);
-       sync_file = check->sync_file;
+       sync_file = container_of(cb, struct sync_file, cb);
 
-       if (atomic_dec_and_test(&sync_file->status))
-               wake_up_all(&sync_file->wq);
+       wake_up_all(&sync_file->wq);
 }
 
 /**
@@ -76,23 +75,17 @@ struct sync_file *sync_file_create(struct fence *fence)
 {
        struct sync_file *sync_file;
 
-       sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
+       sync_file = sync_file_alloc();
        if (!sync_file)
                return NULL;
 
-       sync_file->num_fences = 1;
-       atomic_set(&sync_file->status, 1);
+       sync_file->fence = fence;
+
        snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
                 fence->ops->get_driver_name(fence),
                 fence->ops->get_timeline_name(fence), fence->context,
                 fence->seqno);
 
-       sync_file->cbs[0].fence = fence;
-       sync_file->cbs[0].sync_file = sync_file;
-       if (fence_add_callback(fence, &sync_file->cbs[0].cb,
-                              fence_check_cb_func))
-               atomic_dec(&sync_file->status);
-
        return sync_file;
 }
 EXPORT_SYMBOL(sync_file_create);
@@ -121,14 +114,73 @@ err:
        return NULL;
 }
 
-static void sync_file_add_pt(struct sync_file *sync_file, int *i,
-                            struct fence *fence)
+/**
+ * sync_file_get_fence - get the fence related to the sync_file fd
+ * @fd:                sync_file fd to get the fence from
+ *
+ * Ensures @fd references a valid sync_file and returns a fence that
+ * represents all fence in the sync_file. On error NULL is returned.
+ */
+struct fence *sync_file_get_fence(int fd)
+{
+       struct sync_file *sync_file;
+       struct fence *fence;
+
+       sync_file = sync_file_fdget(fd);
+       if (!sync_file)
+               return NULL;
+
+       fence = fence_get(sync_file->fence);
+       fput(sync_file->file);
+
+       return fence;
+}
+EXPORT_SYMBOL(sync_file_get_fence);
+
+static int sync_file_set_fence(struct sync_file *sync_file,
+                              struct fence **fences, int num_fences)
+{
+       struct fence_array *array;
+
+       /*
+        * The reference for the fences in the new sync_file and held
+        * in add_fence() during the merge procedure, so for num_fences == 1
+        * we already own a new reference to the fence. For num_fence > 1
+        * we own the reference of the fence_array creation.
+        */
+       if (num_fences == 1) {
+               sync_file->fence = fences[0];
+               kfree(fences);
+       } else {
+               array = fence_array_create(num_fences, fences,
+                                          fence_context_alloc(1), 1, false);
+               if (!array)
+                       return -ENOMEM;
+
+               sync_file->fence = &array->base;
+       }
+
+       return 0;
+}
+
+static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+{
+       if (fence_is_array(sync_file->fence)) {
+               struct fence_array *array = to_fence_array(sync_file->fence);
+
+               *num_fences = array->num_fences;
+               return array->fences;
+       }
+
+       *num_fences = 1;
+       return &sync_file->fence;
+}
+
+static void add_fence(struct fence **fences, int *i, struct fence *fence)
 {
-       sync_file->cbs[*i].fence = fence;
-       sync_file->cbs[*i].sync_file = sync_file;
+       fences[*i] = fence;
 
-       if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
-                               fence_check_cb_func)) {
+       if (!fence_is_signaled(fence)) {
                fence_get(fence);
                (*i)++;
        }
@@ -147,16 +199,24 @@ static void sync_file_add_pt(struct sync_file *sync_file, int *i,
 static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
                                         struct sync_file *b)
 {
-       int num_fences = a->num_fences + b->num_fences;
        struct sync_file *sync_file;
-       int i, i_a, i_b;
-       unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
+       struct fence **fences, **nfences, **a_fences, **b_fences;
+       int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
 
-       sync_file = sync_file_alloc(size);
+       sync_file = sync_file_alloc();
        if (!sync_file)
                return NULL;
 
-       atomic_set(&sync_file->status, num_fences);
+       a_fences = get_fences(a, &a_num_fences);
+       b_fences = get_fences(b, &b_num_fences);
+       if (a_num_fences > INT_MAX - b_num_fences)
+               return NULL;
+
+       num_fences = a_num_fences + b_num_fences;
+
+       fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+       if (!fences)
+               goto err;
 
        /*
         * Assume sync_file a and b are both ordered and have no
@@ -165,55 +225,69 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
         * If a sync_file can only be created with sync_file_merge
         * and sync_file_create, this is a reasonable assumption.
         */
-       for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
-               struct fence *pt_a = a->cbs[i_a].fence;
-               struct fence *pt_b = b->cbs[i_b].fence;
+       for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+               struct fence *pt_a = a_fences[i_a];
+               struct fence *pt_b = b_fences[i_b];
 
                if (pt_a->context < pt_b->context) {
-                       sync_file_add_pt(sync_file, &i, pt_a);
+                       add_fence(fences, &i, pt_a);
 
                        i_a++;
                } else if (pt_a->context > pt_b->context) {
-                       sync_file_add_pt(sync_file, &i, pt_b);
+                       add_fence(fences, &i, pt_b);
 
                        i_b++;
                } else {
                        if (pt_a->seqno - pt_b->seqno <= INT_MAX)
-                               sync_file_add_pt(sync_file, &i, pt_a);
+                               add_fence(fences, &i, pt_a);
                        else
-                               sync_file_add_pt(sync_file, &i, pt_b);
+                               add_fence(fences, &i, pt_b);
 
                        i_a++;
                        i_b++;
                }
        }
 
-       for (; i_a < a->num_fences; i_a++)
-               sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
+       for (; i_a < a_num_fences; i_a++)
+               add_fence(fences, &i, a_fences[i_a]);
+
+       for (; i_b < b_num_fences; i_b++)
+               add_fence(fences, &i, b_fences[i_b]);
 
-       for (; i_b < b->num_fences; i_b++)
-               sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
+       if (i == 0)
+               fences[i++] = fence_get(a_fences[0]);
 
-       if (num_fences > i)
-               atomic_sub(num_fences - i, &sync_file->status);
-       sync_file->num_fences = i;
+       if (num_fences > i) {
+               nfences = krealloc(fences, i * sizeof(*fences),
+                                 GFP_KERNEL);
+               if (!nfences)
+                       goto err;
+
+               fences = nfences;
+       }
+
+       if (sync_file_set_fence(sync_file, fences, i) < 0) {
+               kfree(fences);
+               goto err;
+       }
 
        strlcpy(sync_file->name, name, sizeof(sync_file->name));
        return sync_file;
+
+err:
+       fput(sync_file->file);
+       return NULL;
+
 }
 
 static void sync_file_free(struct kref *kref)
 {
        struct sync_file *sync_file = container_of(kref, struct sync_file,
                                                     kref);
-       int i;
-
-       for (i = 0; i < sync_file->num_fences; ++i) {
-               fence_remove_callback(sync_file->cbs[i].fence,
-                                     &sync_file->cbs[i].cb);
-               fence_put(sync_file->cbs[i].fence);
-       }
 
+       if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+               fence_remove_callback(sync_file->fence, &sync_file->cb);
+       fence_put(sync_file->fence);
        kfree(sync_file);
 }
 
@@ -228,17 +302,17 @@ static int sync_file_release(struct inode *inode, struct file *file)
 static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 {
        struct sync_file *sync_file = file->private_data;
-       int status;
 
        poll_wait(file, &sync_file->wq, wait);
 
-       status = atomic_read(&sync_file->status);
+       if (!poll_does_not_wait(wait) &&
+           !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+               if (fence_add_callback(sync_file->fence, &sync_file->cb,
+                                      fence_check_cb_func) < 0)
+                       wake_up_all(&sync_file->wq);
+       }
 
-       if (!status)
-               return POLLIN;
-       if (status < 0)
-               return POLLERR;
-       return 0;
+       return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
 }
 
 static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -315,8 +389,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 {
        struct sync_file_info info;
        struct sync_fence_info *fence_info = NULL;
+       struct fence **fences;
        __u32 size;
-       int ret, i;
+       int num_fences, ret, i;
 
        if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
                return -EFAULT;
@@ -324,6 +399,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
        if (info.flags || info.pad)
                return -EINVAL;
 
+       fences = get_fences(sync_file, &num_fences);
+
        /*
         * Passing num_fences = 0 means that userspace doesn't want to
         * retrieve any sync_fence_info. If num_fences = 0 we skip filling
@@ -333,16 +410,16 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
        if (!info.num_fences)
                goto no_fences;
 
-       if (info.num_fences < sync_file->num_fences)
+       if (info.num_fences < num_fences)
                return -EINVAL;
 
-       size = sync_file->num_fences * sizeof(*fence_info);
+       size = num_fences * sizeof(*fence_info);
        fence_info = kzalloc(size, GFP_KERNEL);
        if (!fence_info)
                return -ENOMEM;
 
-       for (i = 0; i < sync_file->num_fences; ++i)
-               sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]);
+       for (i = 0; i < num_fences; i++)
+               sync_fill_fence_info(fences[i], &fence_info[i]);
 
        if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
                         size)) {
@@ -352,11 +429,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 
 no_fences:
        strlcpy(info.name, sync_file->name, sizeof(info.name));
-       info.status = atomic_read(&sync_file->status);
-       if (info.status >= 0)
-               info.status = !info.status;
-
-       info.num_fences = sync_file->num_fences;
+       info.status = fence_is_signaled(sync_file->fence);
+       info.num_fences = num_fences;
 
        if (copy_to_user((void __user *)arg, &info, sizeof(info)))
                ret = -EFAULT;
index e434ffe7bc5c57cfd22f7b9f163f0819a1294981..832cbd6471450a0f8f571145c603696f6e19687c 100644 (file)
@@ -2067,7 +2067,7 @@ err_dma_unregister:
 err_clk_disable:
        clk_disable_unprepare(atxdmac->clk);
 err_free_irq:
-       free_irq(atxdmac->irq, atxdmac->dma.dev);
+       free_irq(atxdmac->irq, atxdmac);
        return ret;
 }
 
@@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
        dma_async_device_unregister(&atxdmac->dma);
        clk_disable_unprepare(atxdmac->clk);
 
-       free_irq(atxdmac->irq, atxdmac->dma.dev);
+       free_irq(atxdmac->irq, atxdmac);
 
        for (i = 0; i < atxdmac->dma.chancnt; i++) {
                struct at_xdmac_chan *atchan = &atxdmac->chan[i];
index aad167eaaee80b29e9386574c183533d0e8f0d83..de2a2a2b1d7526d99f00766e307f1a0d222b11ec 100644 (file)
@@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev)
                rc = of_property_read_u32(np, "reg", &off);
                if (rc) {
                        dev_err(dev, "Reg property not found in JQ node\n");
+                       of_node_put(np);
                        return -ENODEV;
                }
                /* Find out the Job Rings present under each JQ */
index a4c53be482cf3ce6ba639c510911e2060f07c616..624f1e1e9c55b0eb872c2b39f8debb8e381adb9f 100644 (file)
@@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
 {
        struct mdc_dma *mdma;
        struct resource *res;
-       const struct of_device_id *match;
        unsigned int i;
        u32 val;
        int ret;
@@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
                return -ENOMEM;
        platform_set_drvdata(pdev, mdma);
 
-       match = of_match_device(mdc_dma_of_match, &pdev->dev);
-       mdma->soc = match->data;
+       mdma->soc = of_device_get_match_data(&pdev->dev);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mdma->regs = devm_ioremap_resource(&pdev->dev, res);
index dc7850a422b8d4212495d067c4ea31c26e0af8a2..3f56f9ca44824d90d67c47ce78841d223a963ba3 100644 (file)
@@ -638,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc,
                vd_last_issued = list_entry(vc->desc_issued.prev,
                                            struct virt_dma_desc, node);
                pxad_desc_chain(vd_last_issued, vd);
-               if (is_chan_running(chan) || is_desc_completed(vd_last_issued))
+               if (is_chan_running(chan) || is_desc_completed(vd))
                        return true;
        }
 
@@ -671,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
        struct virt_dma_desc *vd, *tmp;
        unsigned int dcsr;
        unsigned long flags;
+       bool vd_completed;
        dma_cookie_t last_started = 0;
 
        BUG_ON(!chan);
@@ -681,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
 
        spin_lock_irqsave(&chan->vc.lock, flags);
        list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
+               vd_completed = is_desc_completed(vd);
                dev_dbg(&chan->vc.chan.dev->device,
-                       "%s(): checking txd %p[%x]: completed=%d\n",
-                       __func__, vd, vd->tx.cookie, is_desc_completed(vd));
+                       "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
+                       __func__, vd, vd->tx.cookie, vd_completed,
+                       dcsr);
                last_started = vd->tx.cookie;
                if (to_pxad_sw_desc(vd)->cyclic) {
                        vchan_cyclic_callback(vd);
                        break;
                }
-               if (is_desc_completed(vd)) {
+               if (vd_completed) {
                        list_del(&vd->node);
                        vchan_cookie_complete(vd);
                } else {
index 749f1bd5d65d77c2c9f200b437ecd6d98e445361..06ecdc38cee0a32dfa312a6ada4d807a560aa47d 100644 (file)
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
 {
        struct usb_dmac_chan *chan = dev;
        irqreturn_t ret = IRQ_NONE;
-       u32 mask = USB_DMACHCR_TE;
-       u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+       u32 mask = 0;
        u32 chcr;
+       bool xfer_end = false;
 
        spin_lock(&chan->vc.lock);
 
        chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
-       if (chcr & check_bits)
-               mask |= USB_DMACHCR_DE | check_bits;
+       if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
+               mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
+               if (chcr & USB_DMACHCR_DE)
+                       xfer_end = true;
+               ret |= IRQ_HANDLED;
+       }
        if (chcr & USB_DMACHCR_NULL) {
                /* An interruption of TE will happen after we set FTE */
                mask |= USB_DMACHCR_NULL;
                chcr |= USB_DMACHCR_FTE;
                ret |= IRQ_HANDLED;
        }
-       usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+       if (mask)
+               usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
 
-       if (chcr & check_bits) {
+       if (xfer_end)
                usb_dmac_isr_transfer_end(chan);
-               ret |= IRQ_HANDLED;
-       }
 
        spin_unlock(&chan->vc.lock);
 
index d0c1dab9b435480bef9d6c61f42cac9dbea0107f..dff1a4a6dc1b5cd63df8071564e3606c4305e44e 100644 (file)
@@ -251,6 +251,14 @@ config EDAC_SBRIDGE
          Support for error detection and correction the Intel
          Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
 
+config EDAC_SKX
+       tristate "Intel Skylake server Integrated MC"
+       depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+       depends on PCI_MMCONFIG
+       help
+         Support for error detection and correction the Intel
+         Skylake server Integrated Memory Controllers.
+
 config EDAC_MPC85XX
        tristate "Freescale MPC83xx / MPC85xx"
        depends on EDAC_MM_EDAC && FSL_SOC
index f9e4a3e0e6e915d1b5b6217e76e4f7f52323656c..986049925b08569d45692d3e0d0b16e6394363c4 100644 (file)
@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400)              += i5400_edac.o
 obj-$(CONFIG_EDAC_I7300)               += i7300_edac.o
 obj-$(CONFIG_EDAC_I7CORE)              += i7core_edac.o
 obj-$(CONFIG_EDAC_SBRIDGE)             += sb_edac.o
+obj-$(CONFIG_EDAC_SKX)                 += skx_edac.o
 obj-$(CONFIG_EDAC_E7XXX)               += e7xxx_edac.o
 obj-$(CONFIG_EDAC_E752X)               += e752x_edac.o
 obj-$(CONFIG_EDAC_I82443BXGX)          += i82443bxgx_edac.o
index 4fb2eb7c800d8839c6329cd34589eeea4dbaa5c0..ce0067b7a2f675e7933f91e7fa453b8992789763 100644 (file)
@@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
 /* Knight's Landing Support */
 /*
  * KNL's memory channels are swizzled between memory controllers.
- * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2
+ * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
  */
-#define knl_channel_remap(channel) ((channel + 3) % 6)
+#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
 
 /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
 #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
@@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg)
        mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
        chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
 
-       return knl_channel_remap(mc*3 + chan);
+       return knl_channel_remap(mc, chan);
 }
 
 /*
@@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
                } else {
                        char A = *("A");
 
-                       channel = knl_channel_remap(channel);
+                       /*
+                        * Reported channel is in range 0-2, so we can't map it
+                        * back to mc. To figure out mc we check machine check
+                        * bank register that reported this error.
+                        * bank15 means mc0 and bank16 means mc1.
+                        */
+                       channel = knl_channel_remap(m->bank == 16, channel);
                        channel_mask = 1 << channel;
+
                        snprintf(msg, sizeof(msg),
                                "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
                                overflow ? " OVERFLOW" : "",
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
new file mode 100644 (file)
index 0000000..0ff4878
--- /dev/null
@@ -0,0 +1,1121 @@
+/*
+ * EDAC driver for Intel(R) Xeon(R) Skylake processors
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_core.h"
+
+#define SKX_REVISION    " Ver: 1.0 "
+
+/*
+ * Debug macros
+ */
+#define skx_printk(level, fmt, arg...)                 \
+       edac_printk(level, "skx", fmt, ##arg)
+
+#define skx_mc_printk(mci, level, fmt, arg...)         \
+       edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
+
+/*
+ * Get a bit field at register value <v>, from bit <lo> to bit <hi>
+ */
+#define GET_BITFIELD(v, lo, hi) \
+       (((v) & GENMASK_ULL((hi), (lo))) >> (lo))
+
+static LIST_HEAD(skx_edac_list);
+
+static u64 skx_tolm, skx_tohm;
+
+#define NUM_IMC                        2       /* memory controllers per socket */
+#define NUM_CHANNELS           3       /* channels per memory controller */
+#define NUM_DIMMS              2       /* Max DIMMS per channel */
+
+#define        MASK26  0x3FFFFFF               /* Mask for 2^26 */
+#define MASK29 0x1FFFFFFF              /* Mask for 2^29 */
+
+/*
+ * Each cpu socket contains some pci devices that provide global
+ * information, and also some that are local to each of the two
+ * memory controllers on the die.
+ */
+struct skx_dev {
+       struct list_head        list;
+       u8                      bus[4];
+       struct pci_dev  *sad_all;
+       struct pci_dev  *util_all;
+       u32     mcroute;
+       struct skx_imc {
+               struct mem_ctl_info *mci;
+               u8      mc;     /* system wide mc# */
+               u8      lmc;    /* socket relative mc# */
+               u8      src_id, node_id;
+               struct skx_channel {
+                       struct pci_dev *cdev;
+                       struct skx_dimm {
+                               u8      close_pg;
+                               u8      bank_xor_enable;
+                               u8      fine_grain_bank;
+                               u8      rowbits;
+                               u8      colbits;
+                       } dimms[NUM_DIMMS];
+               } chan[NUM_CHANNELS];
+       } imc[NUM_IMC];
+};
+static int skx_num_sockets;
+
+struct skx_pvt {
+       struct skx_imc  *imc;
+};
+
+struct decoded_addr {
+       struct skx_dev *dev;
+       u64     addr;
+       int     socket;
+       int     imc;
+       int     channel;
+       u64     chan_addr;
+       int     sktways;
+       int     chanways;
+       int     dimm;
+       int     rank;
+       int     channel_rank;
+       u64     rank_address;
+       int     row;
+       int     column;
+       int     bank_address;
+       int     bank_group;
+};
+
+static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+{
+       struct skx_dev *d;
+
+       list_for_each_entry(d, &skx_edac_list, list) {
+               if (d->bus[idx] == bus)
+                       return d;
+       }
+
+       return NULL;
+}
+
+enum munittype {
+       CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
+};
+
+struct munit {
+       u16     did;
+       u16     devfn[NUM_IMC];
+       u8      busidx;
+       u8      per_socket;
+       enum munittype mtype;
+};
+
+/*
+ * List of PCI device ids that we need together with some device
+ * number and function numbers to tell which memory controller the
+ * device belongs to.
+ */
+static const struct munit skx_all_munits[] = {
+       { 0x2054, { }, 1, 1, SAD_ALL },
+       { 0x2055, { }, 1, 1, UTIL_ALL },
+       { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
+       { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
+       { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
+       { 0x208e, { }, 1, 0, SAD },
+       { }
+};
+
+/*
+ * We use the per-socket device 0x2016 to count how many sockets are present,
+ * and to detemine which PCI buses are associated with each socket. Allocate
+ * and build the full list of all the skx_dev structures that we need here.
+ */
+static int get_all_bus_mappings(void)
+{
+       struct pci_dev *pdev, *prev;
+       struct skx_dev *d;
+       u32 reg;
+       int ndev = 0;
+
+       prev = NULL;
+       for (;;) {
+               pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev);
+               if (!pdev)
+                       break;
+               ndev++;
+               d = kzalloc(sizeof(*d), GFP_KERNEL);
+               if (!d) {
+                       pci_dev_put(pdev);
+                       return -ENOMEM;
+               }
+               pci_read_config_dword(pdev, 0xCC, &reg);
+               d->bus[0] =  GET_BITFIELD(reg, 0, 7);
+               d->bus[1] =  GET_BITFIELD(reg, 8, 15);
+               d->bus[2] =  GET_BITFIELD(reg, 16, 23);
+               d->bus[3] =  GET_BITFIELD(reg, 24, 31);
+               edac_dbg(2, "busses: %x, %x, %x, %x\n",
+                        d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+               list_add_tail(&d->list, &skx_edac_list);
+               skx_num_sockets++;
+               prev = pdev;
+       }
+
+       return ndev;
+}
+
+static int get_all_munits(const struct munit *m)
+{
+       struct pci_dev *pdev, *prev;
+       struct skx_dev *d;
+       u32 reg;
+       int i = 0, ndev = 0;
+
+       prev = NULL;
+       for (;;) {
+               pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
+               if (!pdev)
+                       break;
+               ndev++;
+               if (m->per_socket == NUM_IMC) {
+                       for (i = 0; i < NUM_IMC; i++)
+                               if (m->devfn[i] == pdev->devfn)
+                                       break;
+                       if (i == NUM_IMC)
+                               goto fail;
+               }
+               d = get_skx_dev(pdev->bus->number, m->busidx);
+               if (!d)
+                       goto fail;
+
+               /* Be sure that the device is enabled */
+               if (unlikely(pci_enable_device(pdev) < 0)) {
+                       skx_printk(KERN_ERR,
+                               "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did);
+                       goto fail;
+               }
+
+               switch (m->mtype) {
+               case CHAN0: case CHAN1: case CHAN2:
+                       pci_dev_get(pdev);
+                       d->imc[i].chan[m->mtype].cdev = pdev;
+                       break;
+               case SAD_ALL:
+                       pci_dev_get(pdev);
+                       d->sad_all = pdev;
+                       break;
+               case UTIL_ALL:
+                       pci_dev_get(pdev);
+                       d->util_all = pdev;
+                       break;
+               case SAD:
+                       /*
+                        * one of these devices per core, including cores
+                        * that don't exist on this SKU. Ignore any that
+                        * read a route table of zero, make sure all the
+                        * non-zero values match.
+                        */
+                       pci_read_config_dword(pdev, 0xB4, &reg);
+                       if (reg != 0) {
+                               if (d->mcroute == 0)
+                                       d->mcroute = reg;
+                               else if (d->mcroute != reg) {
+                                       skx_printk(KERN_ERR,
+                                               "mcroute mismatch\n");
+                                       goto fail;
+                               }
+                       }
+                       ndev--;
+                       break;
+               }
+
+               prev = pdev;
+       }
+
+       return ndev;
+fail:
+       pci_dev_put(pdev);
+       return -ENODEV;
+}
+
+const struct x86_cpu_id skx_cpuids[] = {
+       { X86_VENDOR_INTEL, 6, 0x55, 0, 0 },    /* Skylake */
+       { }
+};
+MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
+
+static u8 get_src_id(struct skx_dev *d)
+{
+       u32 reg;
+
+       pci_read_config_dword(d->util_all, 0xF0, &reg);
+
+       return GET_BITFIELD(reg, 12, 14);
+}
+
+static u8 skx_get_node_id(struct skx_dev *d)
+{
+       u32 reg;
+
+       pci_read_config_dword(d->util_all, 0xF4, &reg);
+
+       return GET_BITFIELD(reg, 0, 2);
+}
+
+static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
+                        int maxval, char *name)
+{
+       u32 val = GET_BITFIELD(reg, lobit, hibit);
+
+       if (val < minval || val > maxval) {
+               edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg);
+               return -EINVAL;
+       }
+       return val + add;
+}
+
+#define IS_DIMM_PRESENT(mtr)           GET_BITFIELD((mtr), 15, 15)
+
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
+#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
+
+static int get_width(u32 mtr)
+{
+       switch (GET_BITFIELD(mtr, 8, 9)) {
+       case 0:
+               return DEV_X4;
+       case 1:
+               return DEV_X8;
+       case 2:
+               return DEV_X16;
+       }
+       return DEV_UNKNOWN;
+}
+
+static int skx_get_hi_lo(void)
+{
+       struct pci_dev *pdev;
+       u32 reg;
+
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL);
+       if (!pdev) {
+               edac_dbg(0, "Can't get tolm/tohm\n");
+               return -ENODEV;
+       }
+
+       pci_read_config_dword(pdev, 0xD0, &reg);
+       skx_tolm = reg;
+       pci_read_config_dword(pdev, 0xD4, &reg);
+       skx_tohm = reg;
+       pci_read_config_dword(pdev, 0xD8, &reg);
+       skx_tohm |= (u64)reg << 32;
+
+       pci_dev_put(pdev);
+       edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm);
+
+       return 0;
+}
+
+static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+                        struct skx_imc *imc, int chan, int dimmno)
+{
+       int  banks = 16, ranks, rows, cols, npages;
+       u64 size;
+
+       if (!IS_DIMM_PRESENT(mtr))
+               return 0;
+       ranks = numrank(mtr);
+       rows = numrow(mtr);
+       cols = numcol(mtr);
+
+       /*
+        * Compute size in 8-byte (2^3) words, then shift to MiB (2^20)
+        */
+       size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
+       npages = MiB_TO_PAGES(size);
+
+       edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+                imc->mc, chan, dimmno, size, npages,
+                banks, ranks, rows, cols);
+
+       imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
+       imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
+       imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
+       imc->chan[chan].dimms[dimmno].rowbits = rows;
+       imc->chan[chan].dimms[dimmno].colbits = cols;
+
+       dimm->nr_pages = npages;
+       dimm->grain = 32;
+       dimm->dtype = get_width(mtr);
+       dimm->mtype = MEM_DDR4;
+       dimm->edac_mode = EDAC_SECDED; /* likely better than this */
+       snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
+                imc->src_id, imc->lmc, chan, dimmno);
+
+       return 1;
+}
+
+#define SKX_GET_MTMTR(dev, reg) \
+       pci_read_config_dword((dev), 0x87c, &reg)
+
+static bool skx_check_ecc(struct pci_dev *pdev)
+{
+       u32 mtmtr;
+
+       SKX_GET_MTMTR(pdev, mtmtr);
+
+       return !!GET_BITFIELD(mtmtr, 2, 2);
+}
+
+static int skx_get_dimm_config(struct mem_ctl_info *mci)
+{
+       struct skx_pvt *pvt = mci->pvt_info;
+       struct skx_imc *imc = pvt->imc;
+       struct dimm_info *dimm;
+       int i, j;
+       u32 mtr, amap;
+       int ndimms;
+
+       for (i = 0; i < NUM_CHANNELS; i++) {
+               ndimms = 0;
+               pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
+               for (j = 0; j < NUM_DIMMS; j++) {
+                       dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+                                            mci->n_layers, i, j, 0);
+                       pci_read_config_dword(imc->chan[i].cdev,
+                                       0x80 + 4*j, &mtr);
+                       ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j);
+               }
+               if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
+                       skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
+                       return -ENODEV;
+               }
+       }
+
+       return 0;
+}
+
+static void skx_unregister_mci(struct skx_imc *imc)
+{
+       struct mem_ctl_info *mci = imc->mci;
+
+       if (!mci)
+               return;
+
+       edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
+
+       /* Remove MC sysfs nodes */
+       edac_mc_del_mc(mci->pdev);
+
+       edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
+       kfree(mci->ctl_name);
+       edac_mc_free(mci);
+}
+
+static int skx_register_mci(struct skx_imc *imc)
+{
+       struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
+       struct pci_dev *pdev = imc->chan[0].cdev;
+       struct skx_pvt *pvt;
+       int rc;
+
+       /* allocate a new MC control structure */
+       layers[0].type = EDAC_MC_LAYER_CHANNEL;
+       layers[0].size = NUM_CHANNELS;
+       layers[0].is_virt_csrow = false;
+       layers[1].type = EDAC_MC_LAYER_SLOT;
+       layers[1].size = NUM_DIMMS;
+       layers[1].is_virt_csrow = true;
+       mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
+                           sizeof(struct skx_pvt));
+
+       if (unlikely(!mci))
+               return -ENOMEM;
+
+       edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
+
+       /* Associate skx_dev and mci for future usage */
+       imc->mci = mci;
+       pvt = mci->pvt_info;
+       pvt->imc = imc;
+
+       mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
+                                 imc->node_id, imc->lmc);
+       mci->mtype_cap = MEM_FLAG_DDR4;
+       mci->edac_ctl_cap = EDAC_FLAG_NONE;
+       mci->edac_cap = EDAC_FLAG_NONE;
+       mci->mod_name = "skx_edac.c";
+       mci->dev_name = pci_name(imc->chan[0].cdev);
+       mci->mod_ver = SKX_REVISION;
+       mci->ctl_page_to_phys = NULL;
+
+       rc = skx_get_dimm_config(mci);
+       if (rc < 0)
+               goto fail;
+
+       /* record ptr to the generic device */
+       mci->pdev = &pdev->dev;
+
+       /* add this new MC control structure to EDAC's list of MCs */
+       if (unlikely(edac_mc_add_mc(mci))) {
+               edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+               rc = -EINVAL;
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       kfree(mci->ctl_name);
+       edac_mc_free(mci);
+       imc->mci = NULL;
+       return rc;
+}
+
+#define        SKX_MAX_SAD 24
+
+#define SKX_GET_SAD(d, i, reg) \
+       pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &reg)
+#define SKX_GET_ILV(d, i, reg) \
+       pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &reg)
+
+#define        SKX_SAD_MOD3MODE(sad)   GET_BITFIELD((sad), 30, 31)
+#define        SKX_SAD_MOD3(sad)       GET_BITFIELD((sad), 27, 27)
+#define SKX_SAD_LIMIT(sad)     (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
+#define        SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
+#define        SKX_SAD_ATTR(sad)       GET_BITFIELD((sad), 3, 4)
+#define        SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
+#define SKX_SAD_ENABLE(sad)    GET_BITFIELD((sad), 0, 0)
+
+#define SKX_ILV_REMOTE(tgt)    (((tgt) & 8) == 0)
+#define SKX_ILV_TARGET(tgt)    ((tgt) & 7)
+
+static bool skx_sad_decode(struct decoded_addr *res)
+{
+       struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list);
+       u64 addr = res->addr;
+       int i, idx, tgt, lchan, shift;
+       u32 sad, ilv;
+       u64 limit, prev_limit;
+       int remote = 0;
+
+       /* Simple sanity check for I/O space or out of range */
+       if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
+               edac_dbg(0, "Address %llx out of range\n", addr);
+               return false;
+       }
+
+restart:
+       prev_limit = 0;
+       for (i = 0; i < SKX_MAX_SAD; i++) {
+               SKX_GET_SAD(d, i, sad);
+               limit = SKX_SAD_LIMIT(sad);
+               if (SKX_SAD_ENABLE(sad)) {
+                       if (addr >= prev_limit && addr <= limit)
+                               goto sad_found;
+               }
+               prev_limit = limit + 1;
+       }
+       edac_dbg(0, "No SAD entry for %llx\n", addr);
+       return false;
+
+sad_found:
+       SKX_GET_ILV(d, i, ilv);
+
+       switch (SKX_SAD_INTERLEAVE(sad)) {
+       case 0:
+               idx = GET_BITFIELD(addr, 6, 8);
+               break;
+       case 1:
+               idx = GET_BITFIELD(addr, 8, 10);
+               break;
+       case 2:
+               idx = GET_BITFIELD(addr, 12, 14);
+               break;
+       case 3:
+               idx = GET_BITFIELD(addr, 30, 32);
+               break;
+       }
+
+       tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
+
+       /* If point to another node, find it and start over */
+       if (SKX_ILV_REMOTE(tgt)) {
+               if (remote) {
+                       edac_dbg(0, "Double remote!\n");
+                       return false;
+               }
+               remote = 1;
+               list_for_each_entry(d, &skx_edac_list, list) {
+                       if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
+                               goto restart;
+               }
+               edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
+               return false;
+       }
+
+       if (SKX_SAD_MOD3(sad) == 0)
+               lchan = SKX_ILV_TARGET(tgt);
+       else {
+               switch (SKX_SAD_MOD3MODE(sad)) {
+               case 0:
+                       shift = 6;
+                       break;
+               case 1:
+                       shift = 8;
+                       break;
+               case 2:
+                       shift = 12;
+                       break;
+               default:
+                       edac_dbg(0, "illegal mod3mode\n");
+                       return false;
+               }
+               switch (SKX_SAD_MOD3ASMOD2(sad)) {
+               case 0:
+                       lchan = (addr >> shift) % 3;
+                       break;
+               case 1:
+                       lchan = (addr >> shift) % 2;
+                       break;
+               case 2:
+                       lchan = (addr >> shift) % 2;
+                       lchan = (lchan << 1) | ~lchan;
+                       break;
+               case 3:
+                       lchan = ((addr >> shift) % 2) << 1;
+                       break;
+               }
+               lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
+       }
+
+       res->dev = d;
+       res->socket = d->imc[0].src_id;
+       res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
+       res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
+
+       edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n",
+                res->addr, res->socket, res->imc, res->channel);
+       return true;
+}
+
+#define        SKX_MAX_TAD 8
+
+#define SKX_GET_TADBASE(d, mc, i, reg)                 \
+       pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &reg)
+#define SKX_GET_TADWAYNESS(d, mc, i, reg)              \
+       pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &reg)
+#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg)     \
+       pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &reg)
+
+#define        SKX_TAD_BASE(b)         ((u64)GET_BITFIELD((b), 12, 31) << 26)
+#define SKX_TAD_SKT_GRAN(b)    GET_BITFIELD((b), 4, 5)
+#define SKX_TAD_CHN_GRAN(b)    GET_BITFIELD((b), 6, 7)
+#define        SKX_TAD_LIMIT(b)        (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
+#define        SKX_TAD_OFFSET(b)       ((u64)GET_BITFIELD((b), 4, 23) << 26)
+#define        SKX_TAD_SKTWAYS(b)      (1 << GET_BITFIELD((b), 10, 11))
+#define        SKX_TAD_CHNWAYS(b)      (GET_BITFIELD((b), 8, 9) + 1)
+
+/* which bit used for both socket and channel interleave */
+static int skx_granularity[] = { 6, 8, 12, 30 };
+
+static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
+{
+       addr >>= shift;
+       addr /= ways;
+       addr <<= shift;
+
+       return addr | (lowbits & ((1ull << shift) - 1));
+}
+
+static bool skx_tad_decode(struct decoded_addr *res)
+{
+       int i;
+       u32 base, wayness, chnilvoffset;
+       int skt_interleave_bit, chn_interleave_bit;
+       u64 channel_addr;
+
+       for (i = 0; i < SKX_MAX_TAD; i++) {
+               SKX_GET_TADBASE(res->dev, res->imc, i, base);
+               SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
+               if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
+                       goto tad_found;
+       }
+       edac_dbg(0, "No TAD entry for %llx\n", res->addr);
+       return false;
+
+tad_found:
+       res->sktways = SKX_TAD_SKTWAYS(wayness);
+       res->chanways = SKX_TAD_CHNWAYS(wayness);
+       skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
+       chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
+
+       SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
+       channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
+
+       if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
+               /* Must handle channel first, then socket */
+               channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+                                                res->chanways, channel_addr);
+               channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+                                                res->sktways, channel_addr);
+       } else {
+               /* Handle socket then channel. Preserve low bits from original address */
+               channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+                                                res->sktways, res->addr);
+               channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+                                                res->chanways, res->addr);
+       }
+
+       res->chan_addr = channel_addr;
+
+       edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n",
+                res->addr, res->chan_addr, res->sktways, res->chanways);
+       return true;
+}
+
+#define SKX_MAX_RIR 4
+
+#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg)          \
+       pci_read_config_dword((d)->imc[mc].chan[ch].cdev,       \
+                             0x108 + 4 * (i), &reg)
+#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg)         \
+       pci_read_config_dword((d)->imc[mc].chan[ch].cdev,       \
+                             0x120 + 16 * idx + 4 * (i), &reg)
+
+#define        SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
+#define        SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
+#define        SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
+#define        SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
+#define        SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
+
+static bool skx_rir_decode(struct decoded_addr *res)
+{
+       int i, idx, chan_rank;
+       int shift;
+       u32 rirway, rirlv;
+       u64 rank_addr, prev_limit = 0, limit;
+
+       if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
+               shift = 6;
+       else
+               shift = 13;
+
+       for (i = 0; i < SKX_MAX_RIR; i++) {
+               SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
+               limit = SKX_RIR_LIMIT(rirway);
+               if (SKX_RIR_VALID(rirway)) {
+                       if (prev_limit <= res->chan_addr &&
+                           res->chan_addr <= limit)
+                               goto rir_found;
+               }
+               prev_limit = limit;
+       }
+       edac_dbg(0, "No RIR entry for %llx\n", res->addr);
+       return false;
+
+rir_found:
+       rank_addr = res->chan_addr >> shift;
+       rank_addr /= SKX_RIR_WAYS(rirway);
+       rank_addr <<= shift;
+       rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
+
+       res->rank_address = rank_addr;
+       idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
+
+       SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
+       res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
+       chan_rank = SKX_RIR_CHAN_RANK(rirlv);
+       res->channel_rank = chan_rank;
+       res->dimm = chan_rank / 4;
+       res->rank = chan_rank % 4;
+
+       edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n",
+                res->addr, res->dimm, res->rank,
+                res->channel_rank, res->rank_address);
+       return true;
+}
+
+static u8 skx_close_row[] = {
+       15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+};
+static u8 skx_close_column[] = {
+       3, 4, 5, 14, 19, 23, 24, 25, 26, 27
+};
+static u8 skx_open_row[] = {
+       14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+};
+static u8 skx_open_column[] = {
+       3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+};
+static u8 skx_open_fine_column[] = {
+       3, 4, 5, 7, 8, 9, 10, 11, 12, 13
+};
+
+static int skx_bits(u64 addr, int nbits, u8 *bits)
+{
+       int i, res = 0;
+
+       for (i = 0; i < nbits; i++)
+               res |= ((addr >> bits[i]) & 1) << i;
+       return res;
+}
+
+static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
+{
+       int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
+
+       if (do_xor)
+               ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
+
+       return ret;
+}
+
+static bool skx_mad_decode(struct decoded_addr *r)
+{
+       struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
+       int bg0 = dimm->fine_grain_bank ? 6 : 13;
+
+       if (dimm->close_pg) {
+               r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
+               r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
+               r->column |= 0x400; /* C10 is autoprecharge, always set */
+               r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
+               r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
+       } else {
+               r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
+               if (dimm->fine_grain_bank)
+                       r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
+               else
+                       r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
+               r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
+               r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
+       }
+       r->row &= (1u << dimm->rowbits) - 1;
+
+       edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n",
+                r->addr, r->row, r->column, r->bank_address,
+                r->bank_group);
+       return true;
+}
+
+static bool skx_decode(struct decoded_addr *res)
+{
+
+       return skx_sad_decode(res) && skx_tad_decode(res) &&
+               skx_rir_decode(res) && skx_mad_decode(res);
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr.
+ * Write an address to this file to exercise the address decode
+ * logic in this driver.
+ */
+static struct dentry *skx_test;
+static u64 skx_fake_addr;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+       struct decoded_addr res;
+
+       res.addr = val;
+       skx_decode(&res);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static struct dentry *mydebugfs_create(const char *name, umode_t mode,
+                                      struct dentry *parent, u64 *value)
+{
+       return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
+}
+
+static void setup_skx_debug(void)
+{
+       skx_test = debugfs_create_dir("skx_edac_test", NULL);
+       mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr);
+}
+
+static void teardown_skx_debug(void)
+{
+       debugfs_remove_recursive(skx_test);
+}
+#else
+static void setup_skx_debug(void)
+{
+}
+
+static void teardown_skx_debug(void)
+{
+}
+#endif /*CONFIG_EDAC_DEBUG*/
+
+static void skx_mce_output_error(struct mem_ctl_info *mci,
+                                const struct mce *m,
+                                struct decoded_addr *res)
+{
+       enum hw_event_mc_err_type tp_event;
+       char *type, *optype, msg[256];
+       bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+       bool overflow = GET_BITFIELD(m->status, 62, 62);
+       bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+       bool recoverable;
+       u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+       u32 mscod = GET_BITFIELD(m->status, 16, 31);
+       u32 errcode = GET_BITFIELD(m->status, 0, 15);
+       u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+
+       recoverable = GET_BITFIELD(m->status, 56, 56);
+
+       if (uncorrected_error) {
+               if (ripv) {
+                       type = "FATAL";
+                       tp_event = HW_EVENT_ERR_FATAL;
+               } else {
+                       type = "NON_FATAL";
+                       tp_event = HW_EVENT_ERR_UNCORRECTED;
+               }
+       } else {
+               type = "CORRECTED";
+               tp_event = HW_EVENT_ERR_CORRECTED;
+       }
+
+       /*
+        * According with Table 15-9 of the Intel Architecture spec vol 3A,
+        * memory errors should fit in this mask:
+        *      000f 0000 1mmm cccc (binary)
+        * where:
+        *      f = Correction Report Filtering Bit. If 1, subsequent errors
+        *          won't be shown
+        *      mmm = error type
+        *      cccc = channel
+        * If the mask doesn't match, report an error to the parsing logic
+        */
+       if (!((errcode & 0xef80) == 0x80)) {
+               optype = "Can't parse: it is not a mem";
+       } else {
+               switch (optypenum) {
+               case 0:
+                       optype = "generic undef request error";
+                       break;
+               case 1:
+                       optype = "memory read error";
+                       break;
+               case 2:
+                       optype = "memory write error";
+                       break;
+               case 3:
+                       optype = "addr/cmd error";
+                       break;
+               case 4:
+                       optype = "memory scrubbing error";
+                       break;
+               default:
+                       optype = "reserved";
+                       break;
+               }
+       }
+
+       snprintf(msg, sizeof(msg),
+                "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
+                overflow ? " OVERFLOW" : "",
+                (uncorrected_error && recoverable) ? " recoverable" : "",
+                mscod, errcode,
+                res->socket, res->imc, res->rank,
+                res->bank_group, res->bank_address, res->row, res->column);
+
+       edac_dbg(0, "%s\n", msg);
+
+       /* Call the helper to output message */
+       edac_mc_handle_error(tp_event, mci, core_err_cnt,
+                            m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+                            res->channel, res->dimm, -1,
+                            optype, msg);
+}
+
+static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+                              void *data)
+{
+       struct mce *mce = (struct mce *)data;
+       struct decoded_addr res;
+       struct mem_ctl_info *mci;
+       char *type;
+
+       if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+               return NOTIFY_DONE;
+
+       /* ignore unless this is memory related with an address */
+       if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
+               return NOTIFY_DONE;
+
+       res.addr = mce->addr;
+       if (!skx_decode(&res))
+               return NOTIFY_DONE;
+       mci = res.dev->imc[res.imc].mci;
+
+       if (mce->mcgstatus & MCG_STATUS_MCIP)
+               type = "Exception";
+       else
+               type = "Event";
+
+       skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
+
+       skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
+                         "Bank %d: %016Lx\n", mce->extcpu, type,
+                         mce->mcgstatus, mce->bank, mce->status);
+       skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
+       skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
+       skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
+
+       skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
+                         "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
+                         mce->time, mce->socketid, mce->apicid);
+
+       skx_mce_output_error(mci, mce, &res);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block skx_mce_dec = {
+       .notifier_call = skx_mce_check_error,
+};
+
+static void skx_remove(void)
+{
+       int i, j;
+       struct skx_dev *d, *tmp;
+
+       edac_dbg(0, "\n");
+
+       list_for_each_entry_safe(d, tmp, &skx_edac_list, list) {
+               list_del(&d->list);
+               for (i = 0; i < NUM_IMC; i++) {
+                       skx_unregister_mci(&d->imc[i]);
+                       for (j = 0; j < NUM_CHANNELS; j++)
+                               pci_dev_put(d->imc[i].chan[j].cdev);
+               }
+               pci_dev_put(d->util_all);
+               pci_dev_put(d->sad_all);
+
+               kfree(d);
+       }
+}
+
+/*
+ * skx_init:
+ *     make sure we are running on the correct cpu model
+ *     search for all the devices we need
+ *     check which DIMMs are present.
+ */
+int __init skx_init(void)
+{
+       const struct x86_cpu_id *id;
+       const struct munit *m;
+       int rc = 0, i;
+       u8 mc = 0, src_id, node_id;
+       struct skx_dev *d;
+
+       edac_dbg(2, "\n");
+
+       id = x86_match_cpu(skx_cpuids);
+       if (!id)
+               return -ENODEV;
+
+       rc = skx_get_hi_lo();
+       if (rc)
+               return rc;
+
+       rc = get_all_bus_mappings();
+       if (rc < 0)
+               goto fail;
+       if (rc == 0) {
+               edac_dbg(2, "No memory controllers found\n");
+               return -ENODEV;
+       }
+
+       for (m = skx_all_munits; m->did; m++) {
+               rc = get_all_munits(m);
+               if (rc < 0)
+                       goto fail;
+               if (rc != m->per_socket * skx_num_sockets) {
+                       edac_dbg(2, "Expected %d, got %d of %x\n",
+                                m->per_socket * skx_num_sockets, rc, m->did);
+                       rc = -ENODEV;
+                       goto fail;
+               }
+       }
+
+       list_for_each_entry(d, &skx_edac_list, list) {
+               src_id = get_src_id(d);
+               node_id = skx_get_node_id(d);
+               edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+               for (i = 0; i < NUM_IMC; i++) {
+                       d->imc[i].mc = mc++;
+                       d->imc[i].lmc = i;
+                       d->imc[i].src_id = src_id;
+                       d->imc[i].node_id = node_id;
+                       rc = skx_register_mci(&d->imc[i]);
+                       if (rc < 0)
+                               goto fail;
+               }
+       }
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+       setup_skx_debug();
+
+       mce_register_decode_chain(&skx_mce_dec);
+
+       return 0;
+fail:
+       skx_remove();
+       return rc;
+}
+
+static void __exit skx_exit(void)
+{
+       edac_dbg(2, "\n");
+       mce_unregister_decode_chain(&skx_mce_dec);
+       skx_remove();
+       teardown_skx_debug();
+}
+
+module_init(skx_init);
+module_exit(skx_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");
index 438893762076ce9f025243f810de8a11e329adfd..ce2bc2a38101afdaed96bebedd186a528508f8cd 100644 (file)
@@ -709,9 +709,10 @@ static int scpi_probe(struct platform_device *pdev)
                struct mbox_client *cl = &pchan->cl;
                struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
 
-               if (of_address_to_resource(shmem, 0, &res)) {
+               ret = of_address_to_resource(shmem, 0, &res);
+               of_node_put(shmem);
+               if (ret) {
                        dev_err(dev, "failed to get SCPI payload mem resource\n");
-                       ret = -EINVAL;
                        goto err;
                }
 
index 94a58a082b9930a2acc05192273c6941ef003ef6..44c01390d0353fd3170fc797eb4ce6393229bd14 100644 (file)
@@ -229,14 +229,14 @@ static int __init dmi_id_init(void)
 
        ret = device_register(dmi_dev);
        if (ret)
-               goto fail_free_dmi_dev;
+               goto fail_put_dmi_dev;
 
        return 0;
 
-fail_free_dmi_dev:
-       kfree(dmi_dev);
-fail_class_unregister:
+fail_put_dmi_dev:
+       put_device(dmi_dev);
 
+fail_class_unregister:
        class_unregister(&dmi_class);
 
        return ret;
index c99c24bc79b02262298ea64b5ed3e6e625429f4d..9ae6c116c4746286770052fb6f241baddc08d5b0 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/efi.h>
+#include <linux/vmalloc.h>
 
 #define NO_FURTHER_WRITE_ACTION -1
 
@@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
        int ret;
        void *cap_hdr_temp;
 
-       cap_hdr_temp = kmap(cap_info->pages[0]);
+       cap_hdr_temp = vmap(cap_info->pages, cap_info->index,
+                       VM_MAP, PAGE_KERNEL);
        if (!cap_hdr_temp) {
-               pr_debug("%s: kmap() failed\n", __func__);
+               pr_debug("%s: vmap() failed\n", __func__);
                return -EFAULT;
        }
 
        ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
-       kunmap(cap_info->pages[0]);
+       vunmap(cap_hdr_temp);
        if (ret) {
                pr_err("%s: efi_capsule_update() failed\n", __func__);
                return ret;
index 53b9fd2293ee8f5af6f7f6a38de61730c8c99ec9..6eedff45e6d77811a5c4922e6be6fa5cabb3b307 100644 (file)
@@ -190,9 +190,9 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule,
  * map the capsule described by @capsule with its data in @pages and
  * send it to the firmware via the UpdateCapsule() runtime service.
  *
- * @capsule must be a virtual mapping of the first page in @pages
- * (@pages[0]) in the kernel address space. That is, a
- * capsule_header_t that describes the entire contents of the capsule
+ * @capsule must be a virtual mapping of the complete capsule update in the
+ * kernel address space, as the capsule can be consumed immediately.
+ * capsule_header_t that describes the entire contents of the capsule
  * must be at the start of the first data page.
  *
  * Even though this function will validate that the firmware supports
index 5a2631af7410782dc8f7ba993ab0b1139bf71abb..7dd2e2d372317f8a4ddb1fe464b29f662a001a28 100644 (file)
@@ -657,9 +657,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
                }
 
                if (subnode) {
-                       node = of_get_flat_dt_subnode_by_name(node, subnode);
-                       if (node < 0)
+                       int err = of_get_flat_dt_subnode_by_name(node, subnode);
+
+                       if (err < 0)
                                return 0;
+
+                       node = err;
                }
 
                return __find_uefi_params(node, info, dt_params[i].params);
index 3bd127f953151dff89d0563db13deb12046e4315..aded10662020493390e29cb74a1910a4c51e4172 100644 (file)
@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
 #define EFI_ALLOC_ALIGN                EFI_PAGE_SIZE
 #endif
 
+#define EFI_MMAP_NR_SLACK_SLOTS        8
+
 struct file_info {
        efi_file_handle_t *handle;
        u64 size;
@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
        }
 }
 
+static inline bool mmap_has_headroom(unsigned long buff_size,
+                                    unsigned long map_size,
+                                    unsigned long desc_size)
+{
+       unsigned long slack = buff_size - map_size;
+
+       return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
+}
+
 efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
-                               efi_memory_desc_t **map,
-                               unsigned long *map_size,
-                               unsigned long *desc_size,
-                               u32 *desc_ver,
-                               unsigned long *key_ptr)
+                               struct efi_boot_memmap *map)
 {
        efi_memory_desc_t *m = NULL;
        efi_status_t status;
        unsigned long key;
        u32 desc_version;
 
-       *map_size = sizeof(*m) * 32;
+       *map->desc_size =       sizeof(*m);
+       *map->map_size =        *map->desc_size * 32;
+       *map->buff_size =       *map->map_size;
 again:
-       /*
-        * Add an additional efi_memory_desc_t because we're doing an
-        * allocation which may be in a new descriptor region.
-        */
-       *map_size += sizeof(*m);
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               *map_size, (void **)&m);
+                               *map->map_size, (void **)&m);
        if (status != EFI_SUCCESS)
                goto fail;
 
-       *desc_size = 0;
+       *map->desc_size = 0;
        key = 0;
-       status = efi_call_early(get_memory_map, map_size, m,
-                               &key, desc_size, &desc_version);
-       if (status == EFI_BUFFER_TOO_SMALL) {
+       status = efi_call_early(get_memory_map, map->map_size, m,
+                               &key, map->desc_size, &desc_version);
+       if (status == EFI_BUFFER_TOO_SMALL ||
+           !mmap_has_headroom(*map->buff_size, *map->map_size,
+                              *map->desc_size)) {
                efi_call_early(free_pool, m);
+               /*
+                * Make sure there is some entries of headroom so that the
+                * buffer can be reused for a new map after allocations are
+                * no longer permitted.  Its unlikely that the map will grow to
+                * exceed this headroom once we are ready to trigger
+                * ExitBootServices()
+                */
+               *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+               *map->buff_size = *map->map_size;
                goto again;
        }
 
        if (status != EFI_SUCCESS)
                efi_call_early(free_pool, m);
 
-       if (key_ptr && status == EFI_SUCCESS)
-               *key_ptr = key;
-       if (desc_ver && status == EFI_SUCCESS)
-               *desc_ver = desc_version;
+       if (map->key_ptr && status == EFI_SUCCESS)
+               *map->key_ptr = key;
+       if (map->desc_ver && status == EFI_SUCCESS)
+               *map->desc_ver = desc_version;
 
 fail:
-       *map = m;
+       *map->map = m;
        return status;
 }
 
@@ -113,13 +128,20 @@ fail:
 unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
 {
        efi_status_t status;
-       unsigned long map_size;
+       unsigned long map_size, buff_size;
        unsigned long membase  = EFI_ERROR;
        struct efi_memory_map map;
        efi_memory_desc_t *md;
+       struct efi_boot_memmap boot_map;
 
-       status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map,
-                                   &map_size, &map.desc_size, NULL, NULL);
+       boot_map.map =          (efi_memory_desc_t **)&map.map;
+       boot_map.map_size =     &map_size;
+       boot_map.desc_size =    &map.desc_size;
+       boot_map.desc_ver =     NULL;
+       boot_map.key_ptr =      NULL;
+       boot_map.buff_size =    &buff_size;
+
+       status = efi_get_memory_map(sys_table_arg, &boot_map);
        if (status != EFI_SUCCESS)
                return membase;
 
@@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
                            unsigned long size, unsigned long align,
                            unsigned long *addr, unsigned long max)
 {
-       unsigned long map_size, desc_size;
+       unsigned long map_size, desc_size, buff_size;
        efi_memory_desc_t *map;
        efi_status_t status;
        unsigned long nr_pages;
        u64 max_addr = 0;
        int i;
+       struct efi_boot_memmap boot_map;
+
+       boot_map.map =          &map;
+       boot_map.map_size =     &map_size;
+       boot_map.desc_size =    &desc_size;
+       boot_map.desc_ver =     NULL;
+       boot_map.key_ptr =      NULL;
+       boot_map.buff_size =    &buff_size;
 
-       status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
-                                   NULL, NULL);
+       status = efi_get_memory_map(sys_table_arg, &boot_map);
        if (status != EFI_SUCCESS)
                goto fail;
 
@@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                           unsigned long size, unsigned long align,
                           unsigned long *addr)
 {
-       unsigned long map_size, desc_size;
+       unsigned long map_size, desc_size, buff_size;
        efi_memory_desc_t *map;
        efi_status_t status;
        unsigned long nr_pages;
        int i;
+       struct efi_boot_memmap boot_map;
 
-       status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
-                                   NULL, NULL);
+       boot_map.map =          &map;
+       boot_map.map_size =     &map_size;
+       boot_map.desc_size =    &desc_size;
+       boot_map.desc_ver =     NULL;
+       boot_map.key_ptr =      NULL;
+       boot_map.buff_size =    &buff_size;
+
+       status = efi_get_memory_map(sys_table_arg, &boot_map);
        if (status != EFI_SUCCESS)
                goto fail;
 
@@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
        *cmd_line_len = options_bytes;
        return (char *)cmdline_addr;
 }
+
+/*
+ * Handle calling ExitBootServices according to the requirements set out by the
+ * spec.  Obtains the current memory map, and returns that info after calling
+ * ExitBootServices.  The client must specify a function to perform any
+ * processing of the memory map data prior to ExitBootServices.  A client
+ * specific structure may be passed to the function via priv.  The client
+ * function may be called multiple times.
+ */
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
+                                   void *handle,
+                                   struct efi_boot_memmap *map,
+                                   void *priv,
+                                   efi_exit_boot_map_processing priv_func)
+{
+       efi_status_t status;
+
+       status = efi_get_memory_map(sys_table_arg, map);
+
+       if (status != EFI_SUCCESS)
+               goto fail;
+
+       status = priv_func(sys_table_arg, map, priv);
+       if (status != EFI_SUCCESS)
+               goto free_map;
+
+       status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+
+       if (status == EFI_INVALID_PARAMETER) {
+               /*
+                * The memory map changed between efi_get_memory_map() and
+                * exit_boot_services().  Per the UEFI Spec v2.6, Section 6.4:
+                * EFI_BOOT_SERVICES.ExitBootServices we need to get the
+                * updated map, and try again.  The spec implies one retry
+                * should be sufficent, which is confirmed against the EDK2
+                * implementation.  Per the spec, we can only invoke
+                * get_memory_map() and exit_boot_services() - we cannot alloc
+                * so efi_get_memory_map() cannot be used, and we must reuse
+                * the buffer.  For all practical purposes, the headroom in the
+                * buffer should account for any changes in the map so the call
+                * to get_memory_map() is expected to succeed here.
+                */
+               *map->map_size = *map->buff_size;
+               status = efi_call_early(get_memory_map,
+                                       map->map_size,
+                                       *map->map,
+                                       map->key_ptr,
+                                       map->desc_size,
+                                       map->desc_ver);
+
+               /* exit_boot_services() was called, thus cannot free */
+               if (status != EFI_SUCCESS)
+                       goto fail;
+
+               status = priv_func(sys_table_arg, map, priv);
+               /* exit_boot_services() was called, thus cannot free */
+               if (status != EFI_SUCCESS)
+                       goto fail;
+
+               status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+       }
+
+       /* exit_boot_services() was called, thus cannot free */
+       if (status != EFI_SUCCESS)
+               goto fail;
+
+       return EFI_SUCCESS;
+
+free_map:
+       efi_call_early(free_pool, *map->map);
+fail:
+       return status;
+}
index e58abfa953cc5a30f49e4bdbd5f2857c6bcf876d..a6a93116a8f053f6c14911376ffa6da7f1dff44e 100644 (file)
@@ -152,6 +152,27 @@ fdt_set_fail:
 #define EFI_FDT_ALIGN EFI_PAGE_SIZE
 #endif
 
+struct exit_boot_struct {
+       efi_memory_desc_t *runtime_map;
+       int *runtime_entry_count;
+};
+
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+                                  struct efi_boot_memmap *map,
+                                  void *priv)
+{
+       struct exit_boot_struct *p = priv;
+       /*
+        * Update the memory map with virtual addresses. The function will also
+        * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+        * entries so that we can pass it straight to SetVirtualAddressMap()
+        */
+       efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+                       p->runtime_map, p->runtime_entry_count);
+
+       return EFI_SUCCESS;
+}
+
 /*
  * Allocate memory for a new FDT, then add EFI, commandline, and
  * initrd related fields to the FDT.  This routine increases the
@@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                                            unsigned long fdt_addr,
                                            unsigned long fdt_size)
 {
-       unsigned long map_size, desc_size;
+       unsigned long map_size, desc_size, buff_size;
        u32 desc_ver;
        unsigned long mmap_key;
        efi_memory_desc_t *memory_map, *runtime_map;
        unsigned long new_fdt_size;
        efi_status_t status;
        int runtime_entry_count = 0;
+       struct efi_boot_memmap map;
+       struct exit_boot_struct priv;
+
+       map.map =       &runtime_map;
+       map.map_size =  &map_size;
+       map.desc_size = &desc_size;
+       map.desc_ver =  &desc_ver;
+       map.key_ptr =   &mmap_key;
+       map.buff_size = &buff_size;
 
        /*
         * Get a copy of the current memory map that we will use to prepare
@@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
         * subsequent allocations adding entries, since they could not affect
         * the number of EFI_MEMORY_RUNTIME regions.
         */
-       status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
-                                   &desc_size, &desc_ver, &mmap_key);
+       status = efi_get_memory_map(sys_table, &map);
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
                return status;
@@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
        pr_efi(sys_table,
               "Exiting boot services and installing virtual address map...\n");
 
+       map.map = &memory_map;
        /*
         * Estimate size of new FDT, and allocate memory for it. We
         * will allocate a bigger buffer if this ends up being too
@@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                 * we can get the memory map key  needed for
                 * exit_boot_services().
                 */
-               status = efi_get_memory_map(sys_table, &memory_map, &map_size,
-                                           &desc_size, &desc_ver, &mmap_key);
+               status = efi_get_memory_map(sys_table, &map);
                if (status != EFI_SUCCESS)
                        goto fail_free_new_fdt;
 
@@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                }
        }
 
-       /*
-        * Update the memory map with virtual addresses. The function will also
-        * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
-        * entries so that we can pass it straight into SetVirtualAddressMap()
-        */
-       efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
-                       &runtime_entry_count);
-
-       /* Now we are ready to exit_boot_services.*/
-       status = sys_table->boottime->exit_boot_services(handle, mmap_key);
+       sys_table->boottime->free_pool(memory_map);
+       priv.runtime_map = runtime_map;
+       priv.runtime_entry_count = &runtime_entry_count;
+       status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+                                       exit_boot_func);
 
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
index 53f6d3fe6d8621519634d3aa2299aeaf5f2ca90d..0c9f58c5ba501157f0c8826ac0d80b485d86b716 100644 (file)
@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
                              unsigned long random_seed)
 {
        unsigned long map_size, desc_size, total_slots = 0, target_slot;
+       unsigned long buff_size;
        efi_status_t status;
        efi_memory_desc_t *memory_map;
        int map_offset;
+       struct efi_boot_memmap map;
 
-       status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
-                                   &desc_size, NULL, NULL);
+       map.map =       &memory_map;
+       map.map_size =  &map_size;
+       map.desc_size = &desc_size;
+       map.desc_ver =  NULL;
+       map.key_ptr =   NULL;
+       map.buff_size = &buff_size;
+
+       status = efi_get_memory_map(sys_table_arg, &map);
        if (status != EFI_SUCCESS)
                return status;
 
index 98dd47a30fc754af1aede05d31dc2c6179d6d2b1..24caedb00a7a34f141bced4062f07a8d72439a63 100644 (file)
@@ -50,6 +50,7 @@ config GPIO_DEVRES
 config OF_GPIO
        def_bool y
        depends on OF
+       depends on HAS_IOMEM
 
 config GPIO_ACPI
        def_bool y
@@ -188,7 +189,7 @@ config GPIO_EP93XX
 config GPIO_ETRAXFS
        bool "Axis ETRAX FS General I/O"
        depends on CRIS || COMPILE_TEST
-       depends on OF
+       depends on OF_GPIO
        select GPIO_GENERIC
        select GPIOLIB_IRQCHIP
        help
@@ -214,7 +215,7 @@ config GPIO_GENERIC_PLATFORM
 
 config GPIO_GRGPIO
        tristate "Aeroflex Gaisler GRGPIO support"
-       depends on OF
+       depends on OF_GPIO
        select GPIO_GENERIC
        select IRQ_DOMAIN
        help
@@ -312,7 +313,7 @@ config GPIO_MPC8XXX
 config GPIO_MVEBU
        def_bool y
        depends on PLAT_ORION
-       depends on OF
+       depends on OF_GPIO
        select GENERIC_IRQ_CHIP
 
 config GPIO_MXC
@@ -405,7 +406,7 @@ config GPIO_TEGRA
        bool "NVIDIA Tegra GPIO support"
        default ARCH_TEGRA
        depends on ARCH_TEGRA || COMPILE_TEST
-       depends on OF
+       depends on OF_GPIO
        help
          Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
 
@@ -1099,7 +1100,7 @@ menu "SPI GPIO expanders"
 
 config GPIO_74X164
        tristate "74x164 serial-in/parallel-out 8-bits shift register"
-       depends on OF
+       depends on OF_GPIO
        help
          Driver for 74x164 compatible serial-in/parallel-out 8-outputs
          shift registers. This driver can be used to provide access
@@ -1130,6 +1131,7 @@ menu "SPI or I2C GPIO expanders"
 
 config GPIO_MCP23S08
        tristate "Microchip MCP23xxx I/O expander"
+       depends on OF_GPIO
        select GPIOLIB_IRQCHIP
        help
          SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
index 08807368f0078337bff7ba6c63bebadf72f5e018..946d09195598f4dacde27162193e1ee99490481f 100644 (file)
@@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts)
        ts->chip.parent = dev;
        ts->chip.owner = THIS_MODULE;
 
+       ret = gpiochip_add_data(&ts->chip, ts);
+       if (ret)
+               goto exit_destroy;
+
        /*
         * initialize pullups according to platform data and cache the
         * register values for later use.
@@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts)
                }
        }
 
-       ret = gpiochip_add_data(&ts->chip, ts);
-       if (ret)
-               goto exit_destroy;
-
        return ret;
 
 exit_destroy:
index ac22efc1840ee020f750359b13239ff59d1b20dc..99d37b56c258a24b67505022431ceb1e92403d68 100644 (file)
@@ -564,7 +564,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
        mcp->chip.direction_output = mcp23s08_direction_output;
        mcp->chip.set = mcp23s08_set;
        mcp->chip.dbg_show = mcp23s08_dbg_show;
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_GPIO
        mcp->chip.of_gpio_n_cells = 2;
        mcp->chip.of_node = dev->of_node;
 #endif
index 0c99e8fb9af3822350ffd660ab64b678a11bac65..8d8ee0ebf14c03dc96b95b8d98c2f2d2c73352c9 100644 (file)
@@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
 {
        irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
                                 handle_edge_irq);
-       irq_set_noprobe(irq);
+       irq_set_probe(irq);
 
        return 0;
 }
index 75e7b3919ea7756f0d417eee62855145ca678957..a28feb3edf33350f5e7de8986cdca6c3ea2ee764 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/io.h>
-#include <linux/io-mapping.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
index fc357319de357cb0243330e75baeb8a35b23c889..483059a22b1b714461303fb3f67d783bbce19a07 100644 (file)
@@ -108,33 +108,13 @@ config DRM_KMS_CMA_HELPER
 
 source "drivers/gpu/drm/i2c/Kconfig"
 
-config DRM_TDFX
-       tristate "3dfx Banshee/Voodoo3+"
-       depends on DRM && PCI
-       help
-         Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
-         graphics card.  If M is selected, the module will be called tdfx.
-
 source "drivers/gpu/drm/arm/Kconfig"
 
-config DRM_R128
-       tristate "ATI Rage 128"
-       depends on DRM && PCI
-       select FW_LOADER
-       help
-         Choose this option if you have an ATI Rage 128 graphics card.  If M
-         is selected, the module will be called r128.  AGP support for
-         this card is strongly suggested (unless you have a PCI version).
-
 config DRM_RADEON
        tristate "ATI Radeon"
        depends on DRM && PCI
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select FW_LOADER
         select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
         select DRM_TTM
        select POWER_SUPPLY
        select HWMON
@@ -153,12 +133,8 @@ source "drivers/gpu/drm/radeon/Kconfig"
 config DRM_AMDGPU
        tristate "AMD GPU"
        depends on DRM && PCI
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select FW_LOADER
         select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
         select DRM_TTM
        select POWER_SUPPLY
        select HWMON
@@ -171,55 +147,11 @@ config DRM_AMDGPU
          If M is selected, the module will be called amdgpu.
 
 source "drivers/gpu/drm/amd/amdgpu/Kconfig"
-source "drivers/gpu/drm/amd/powerplay/Kconfig"
-
-source "drivers/gpu/drm/amd/acp/Kconfig"
 
 source "drivers/gpu/drm/nouveau/Kconfig"
 
-config DRM_I810
-       tristate "Intel I810"
-       # !PREEMPT because of missing ioctl locking
-       depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
-       help
-         Choose this option if you have an Intel I810 graphics card.  If M is
-         selected, the module will be called i810.  AGP support is required
-         for this driver to work.
-
 source "drivers/gpu/drm/i915/Kconfig"
 
-config DRM_MGA
-       tristate "Matrox g200/g400"
-       depends on DRM && PCI
-       select FW_LOADER
-       help
-         Choose this option if you have a Matrox G200, G400 or G450 graphics
-         card.  If M is selected, the module will be called mga.  AGP
-         support is required for this driver to work.
-
-config DRM_SIS
-       tristate "SiS video cards"
-       depends on DRM && AGP
-       depends on FB_SIS || FB_SIS=n
-       help
-         Choose this option if you have a SiS 630 or compatible video
-          chipset. If M is selected the module will be called sis. AGP
-          support is required for this driver to work.
-
-config DRM_VIA
-       tristate "Via unichrome video cards"
-       depends on DRM && PCI
-       help
-         Choose this option if you have a Via unichrome or compatible video
-         chipset. If M is selected the module will be called via.
-
-config DRM_SAVAGE
-       tristate "Savage video cards"
-       depends on DRM && PCI
-       help
-         Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
-         chipset. If M is selected the module will be called savage.
-
 config DRM_VGEM
        tristate "Virtual GEM provider"
        depends on DRM
@@ -290,3 +222,81 @@ source "drivers/gpu/drm/arc/Kconfig"
 source "drivers/gpu/drm/hisilicon/Kconfig"
 
 source "drivers/gpu/drm/mediatek/Kconfig"
+
+# Keep legacy drivers last
+
+menuconfig DRM_LEGACY
+       bool "Enable legacy drivers (DANGEROUS)"
+       depends on DRM
+       help
+         Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
+         APIs to user-space, which can be used to circumvent access
+         restrictions and other security measures. For backwards compatibility
+         those drivers are still available, but their use is highly
+         inadvisable and might harm your system.
+
+         You are recommended to use the safe modeset-only drivers instead, and
+         perform 3D emulation in user-space.
+
+         Unless you have strong reasons to go rogue, say "N".
+
+if DRM_LEGACY
+
+config DRM_TDFX
+       tristate "3dfx Banshee/Voodoo3+"
+       depends on DRM && PCI
+       help
+         Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
+         graphics card.  If M is selected, the module will be called tdfx.
+
+config DRM_R128
+       tristate "ATI Rage 128"
+       depends on DRM && PCI
+       select FW_LOADER
+       help
+         Choose this option if you have an ATI Rage 128 graphics card.  If M
+         is selected, the module will be called r128.  AGP support for
+         this card is strongly suggested (unless you have a PCI version).
+
+config DRM_I810
+       tristate "Intel I810"
+       # !PREEMPT because of missing ioctl locking
+       depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+       help
+         Choose this option if you have an Intel I810 graphics card.  If M is
+         selected, the module will be called i810.  AGP support is required
+         for this driver to work.
+
+config DRM_MGA
+       tristate "Matrox g200/g400"
+       depends on DRM && PCI
+       select FW_LOADER
+       help
+         Choose this option if you have a Matrox G200, G400 or G450 graphics
+         card.  If M is selected, the module will be called mga.  AGP
+         support is required for this driver to work.
+
+config DRM_SIS
+       tristate "SiS video cards"
+       depends on DRM && AGP
+       depends on FB_SIS || FB_SIS=n
+       help
+         Choose this option if you have a SiS 630 or compatible video
+         chipset. If M is selected the module will be called sis. AGP
+         support is required for this driver to work.
+
+config DRM_VIA
+       tristate "Via unichrome video cards"
+       depends on DRM && PCI
+       help
+         Choose this option if you have a Via unichrome or compatible video
+         chipset. If M is selected the module will be called via.
+
+config DRM_SAVAGE
+       tristate "Savage video cards"
+       depends on DRM && PCI
+       help
+         Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+         chipset. If M is selected the module will be called savage.
+
+endif # DRM_LEGACY
index 0238bf8bc8c37458d06cb34903dff22a114ed6e2..25c720454017e69a004fccfd7efe8147cdf60b46 100644 (file)
@@ -12,7 +12,10 @@ drm-y       :=       drm_auth.o drm_bufs.o drm_cache.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
                drm_trace_points.o drm_global.o drm_prime.o \
                drm_rect.o drm_vma_manager.o drm_flip_work.o \
-               drm_modeset_lock.o drm_atomic.o drm_bridge.o
+               drm_modeset_lock.o drm_atomic.o drm_bridge.o \
+               drm_framebuffer.o drm_connector.o drm_blend.o \
+               drm_encoder.o drm_mode_object.o drm_property.o \
+               drm_plane.o drm_color_mgmt.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -24,7 +27,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
                drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
                drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
-               drm_simple_kms_helper.o drm_blend.o
+               drm_simple_kms_helper.o drm_modeset_helper.o
 
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
@@ -46,7 +49,7 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
 obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
 obj-$(CONFIG_DRM_MGA)  += mga/
 obj-$(CONFIG_DRM_I810) += i810/
-obj-$(CONFIG_DRM_I915)  += i915/
+obj-$(CONFIG_DRM_I915) += i915/
 obj-$(CONFIG_DRM_MGAG200) += mgag200/
 obj-$(CONFIG_DRM_VC4)  += vc4/
 obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
index 7335c0420c7034d1d66aa4a52946d79ddd25aba7..53cf3971dfc36a507e42e39dee2bbf492fe95caa 100644 (file)
@@ -1,3 +1,10 @@
+config DRM_AMDGPU_SI
+       bool "Enable amdgpu support for SI parts"
+       depends on DRM_AMDGPU
+       help
+         Choose this option if you want to enable experimental support
+         for SI asics.
+
 config DRM_AMDGPU_CIK
        bool "Enable amdgpu support for CIK parts"
        depends on DRM_AMDGPU
@@ -25,3 +32,5 @@ config DRM_AMDGPU_GART_DEBUGFS
          Selecting this option creates a debugfs file to inspect the mapped
          pages. Uses more memory for housekeeping, enable only for debugging.
 
+source "drivers/gpu/drm/amd/powerplay/Kconfig"
+source "drivers/gpu/drm/amd/acp/Kconfig"
index c7fcdcedaadbf5b98a333ad6e87796bccb653812..d15e9b080ce1b362227d5ceea4206a7554c277da 100644 (file)
@@ -30,6 +30,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
        ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
        amdgpu_amdkfd_gfx_v7.o
 
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
+
 amdgpu-y += \
        vi.o
 
@@ -50,15 +52,13 @@ amdgpu-y += \
 amdgpu-y += \
        amdgpu_dpm.o \
        amdgpu_powerplay.o \
-       cz_smc.o cz_dpm.o \
-       tonga_smc.o tonga_dpm.o \
-       fiji_smc.o fiji_dpm.o \
-       iceland_smc.o iceland_dpm.o
+       cz_smc.o cz_dpm.o
 
 # add DCE block
 amdgpu-y += \
        dce_v10_0.o \
-       dce_v11_0.o
+       dce_v11_0.o \
+       dce_virtual.o
 
 # add GFX block
 amdgpu-y += \
index 06192698bd96e786a9b95b24e4ee0892ce961627..b8d66670bb17eb5aa8261764f15918ebb9bde050 100644 (file)
@@ -90,6 +90,7 @@
 #define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3        0x25
 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK          0x27
+#define ENCODER_OBJECT_ID_VIRTUAL                 0x28
 
 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
 
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
 #define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+#define CONNECTOR_OBJECT_ID_VIRTUAL               0x17
 
 /* deleted */
 
 #define GRAPH_OBJECT_ENUM_ID5                     0x05
 #define GRAPH_OBJECT_ENUM_ID6                     0x06
 #define GRAPH_OBJECT_ENUM_ID7                     0x07
+#define GRAPH_OBJECT_ENUM_VIRTUAL                 0x08
 
 /****************************************************/
 /* Graphics Object ID Bit definition                */
                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                   ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
 
+#define ENCODER_VIRTUAL_ENUM_VIRTUAL            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Connector Object ID definition - Shared with BIOS */
 /****************************************************/
index 8ebc5f1eb4c0fed15da2f40b0024f74099488c54..72c68dbb982136b73f84ad881ca30a90ca2f8866 100644 (file)
 #include "amdgpu_ih.h"
 #include "amdgpu_irq.h"
 #include "amdgpu_ucode.h"
+#include "amdgpu_ttm.h"
 #include "amdgpu_gds.h"
 #include "amd_powerplay.h"
 #include "amdgpu_acp.h"
 
 #include "gpu_scheduler.h"
+#include "amdgpu_virt.h"
 
 /*
  * Modules parameters.
@@ -63,6 +65,7 @@
 extern int amdgpu_modeset;
 extern int amdgpu_vram_limit;
 extern int amdgpu_gart_size;
+extern int amdgpu_moverate;
 extern int amdgpu_benchmarking;
 extern int amdgpu_testing;
 extern int amdgpu_audio;
@@ -91,6 +94,9 @@ extern unsigned amdgpu_pcie_lane_cap;
 extern unsigned amdgpu_cg_mask;
 extern unsigned amdgpu_pg_mask;
 extern char *amdgpu_disable_cu;
+extern int amdgpu_sclk_deep_sleep_en;
+extern char *amdgpu_virtual_display;
+extern unsigned amdgpu_pp_feature_mask;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
@@ -105,7 +111,7 @@ extern char *amdgpu_disable_cu;
 #define AMDGPU_MAX_RINGS                       16
 #define AMDGPU_MAX_GFX_RINGS                   1
 #define AMDGPU_MAX_COMPUTE_RINGS               8
-#define AMDGPU_MAX_VCE_RINGS                   2
+#define AMDGPU_MAX_VCE_RINGS                   3
 
 /* max number of IP instances */
 #define AMDGPU_MAX_SDMA_INSTANCES              2
@@ -248,10 +254,9 @@ struct amdgpu_vm_pte_funcs {
                         uint64_t pe, uint64_t src,
                         unsigned count);
        /* write pte one entry at a time with addr mapping */
-       void (*write_pte)(struct amdgpu_ib *ib,
-                         const dma_addr_t *pages_addr, uint64_t pe,
-                         uint64_t addr, unsigned count,
-                         uint32_t incr, uint32_t flags);
+       void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+                         uint64_t value, unsigned count,
+                         uint32_t incr);
        /* for linear pte/pde updates without addr mapping */
        void (*set_pte_pde)(struct amdgpu_ib *ib,
                            uint64_t pe,
@@ -316,6 +321,10 @@ struct amdgpu_ring_funcs {
        /* note usage for clock and power gating */
        void (*begin_use)(struct amdgpu_ring *ring);
        void (*end_use)(struct amdgpu_ring *ring);
+       void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+       void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+       unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
+       unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
 };
 
 /*
@@ -396,46 +405,8 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
 /*
- * TTM.
+ * BO.
  */
-
-#define AMDGPU_TTM_LRU_SIZE    20
-
-struct amdgpu_mman_lru {
-       struct list_head                *lru[TTM_NUM_MEM_TYPES];
-       struct list_head                *swap_lru;
-};
-
-struct amdgpu_mman {
-       struct ttm_bo_global_ref        bo_global_ref;
-       struct drm_global_reference     mem_global_ref;
-       struct ttm_bo_device            bdev;
-       bool                            mem_global_referenced;
-       bool                            initialized;
-
-#if defined(CONFIG_DEBUG_FS)
-       struct dentry                   *vram;
-       struct dentry                   *gtt;
-#endif
-
-       /* buffer handling */
-       const struct amdgpu_buffer_funcs        *buffer_funcs;
-       struct amdgpu_ring                      *buffer_funcs_ring;
-       /* Scheduler entity for buffer moves */
-       struct amd_sched_entity                 entity;
-
-       /* custom LRU management */
-       struct amdgpu_mman_lru                  log2_size[AMDGPU_TTM_LRU_SIZE];
-};
-
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
-                      uint64_t src_offset,
-                      uint64_t dst_offset,
-                      uint32_t byte_count,
-                      struct reservation_object *resv,
-                      struct fence **fence);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
 struct amdgpu_bo_list_entry {
        struct amdgpu_bo                *robj;
        struct ttm_validate_buffer      tv;
@@ -498,10 +469,12 @@ struct amdgpu_bo {
        struct amdgpu_device            *adev;
        struct drm_gem_object           gem_base;
        struct amdgpu_bo                *parent;
+       struct amdgpu_bo                *shadow;
 
        struct ttm_bo_kmap_obj          dma_buf_vmap;
        struct amdgpu_mn                *mn;
        struct list_head                mn_list;
+       struct list_head                shadow_list;
 };
 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
 
@@ -646,11 +619,12 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                        int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist,
                     dma_addr_t *dma_addr, uint32_t flags);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
 
 /*
  * GPU MC structures, functions & helpers
@@ -677,6 +651,8 @@ struct amdgpu_mc {
        uint32_t                fw_version;
        struct amdgpu_irq_src   vm_fault;
        uint32_t                vram_type;
+       uint32_t                srbm_soft_reset;
+       struct amdgpu_mode_mc_save save;
 };
 
 /*
@@ -721,10 +697,11 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
  */
 
 struct amdgpu_flip_work {
-       struct work_struct              flip_work;
+       struct delayed_work             flip_work;
        struct work_struct              unpin_work;
        struct amdgpu_device            *adev;
        int                             crtc_id;
+       u32                             target_vblank;
        uint64_t                        base;
        struct drm_pending_vblank_event *event;
        struct amdgpu_bo                *old_rbo;
@@ -815,13 +792,17 @@ struct amdgpu_ring {
 /* maximum number of VMIDs */
 #define AMDGPU_NUM_VM  16
 
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE      0x3FFFF
+
 /* number of entries in page table */
 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
 
 /* PTBs (Page Table Blocks) need to be aligned to 32K */
 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
-#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
-#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
 
 #define AMDGPU_PTE_VALID       (1 << 0)
 #define AMDGPU_PTE_SYSTEM      (1 << 1)
@@ -833,10 +814,7 @@ struct amdgpu_ring {
 #define AMDGPU_PTE_READABLE    (1 << 5)
 #define AMDGPU_PTE_WRITEABLE   (1 << 6)
 
-/* PTE (Page Table Entry) fragment field for different page sizes */
-#define AMDGPU_PTE_FRAG_4KB    (0 << 7)
-#define AMDGPU_PTE_FRAG_64KB   (4 << 7)
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+#define AMDGPU_PTE_FRAG(x)     ((x & 0x1f) << 7)
 
 /* How to programm VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER     0
@@ -846,6 +824,7 @@ struct amdgpu_ring {
 struct amdgpu_vm_pt {
        struct amdgpu_bo_list_entry     entry;
        uint64_t                        addr;
+       uint64_t                        shadow_addr;
 };
 
 struct amdgpu_vm {
@@ -948,7 +927,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_job *job);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -957,7 +935,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                             struct amdgpu_sync *sync);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
-                       struct ttm_mem_reg *mem);
+                       bool clear);
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
                             struct amdgpu_bo *bo);
 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
@@ -992,6 +970,7 @@ struct amdgpu_ctx {
        spinlock_t              ring_lock;
        struct fence            **fences;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
+       bool preamble_presented;
 };
 
 struct amdgpu_ctx_mgr {
@@ -1195,6 +1174,10 @@ struct amdgpu_gfx {
        unsigned                        ce_ram_size;
        struct amdgpu_cu_info           cu_info;
        const struct amdgpu_gfx_funcs   *funcs;
+
+       /* reset mask */
+       uint32_t                        grbm_soft_reset;
+       uint32_t                        srbm_soft_reset;
 };
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1247,11 +1230,16 @@ struct amdgpu_cs_parser {
        struct fence                    *fence;
        uint64_t                        bytes_moved_threshold;
        uint64_t                        bytes_moved;
+       struct amdgpu_bo_list_entry     *evictable;
 
        /* user fence */
        struct amdgpu_bo_list_entry     uf_entry;
 };
 
+#define AMDGPU_PREAMBLE_IB_PRESENT          (1 << 0) /* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST    (1 << 1) /* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_HAVE_CTX_SWITCH              (1 << 2) /* bit set means context switch occured */
+
 struct amdgpu_job {
        struct amd_sched_job    base;
        struct amdgpu_device    *adev;
@@ -1260,9 +1248,10 @@ struct amdgpu_job {
        struct amdgpu_sync      sync;
        struct amdgpu_ib        *ibs;
        struct fence            *fence; /* the hw fence */
+       uint32_t                preamble_status;
        uint32_t                num_ibs;
        void                    *owner;
-       uint64_t                ctx;
+       uint64_t                fence_ctx; /* the fence_context this job uses */
        bool                    vm_needs_flush;
        unsigned                vm_id;
        uint64_t                vm_pd_addr;
@@ -1683,6 +1672,7 @@ struct amdgpu_uvd {
        bool                    address_64_bit;
        bool                    use_ctx_buf;
        struct amd_sched_entity entity;
+       uint32_t                srbm_soft_reset;
 };
 
 /*
@@ -1709,6 +1699,8 @@ struct amdgpu_vce {
        struct amdgpu_irq_src   irq;
        unsigned                harvest_config;
        struct amd_sched_entity entity;
+       uint32_t                srbm_soft_reset;
+       unsigned                num_rings;
 };
 
 /*
@@ -1726,9 +1718,14 @@ struct amdgpu_sdma_instance {
 
 struct amdgpu_sdma {
        struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+#ifdef CONFIG_DRM_AMDGPU_SI
+       //SI DMA has a difference trap irq number for the second engine
+       struct amdgpu_irq_src   trap_irq_1;
+#endif
        struct amdgpu_irq_src   trap_irq;
        struct amdgpu_irq_src   illegal_inst_irq;
        int                     num_instances;
+       uint32_t                    srbm_soft_reset;
 };
 
 /*
@@ -1830,6 +1827,7 @@ struct amdgpu_asic_funcs {
        bool (*read_disabled_bios)(struct amdgpu_device *adev);
        bool (*read_bios_from_rom)(struct amdgpu_device *adev,
                                   u8 *bios, u32 length_bytes);
+       void (*detect_hw_virtualization) (struct amdgpu_device *adev);
        int (*read_register)(struct amdgpu_device *adev, u32 se_num,
                             u32 sh_num, u32 reg_offset, u32 *value);
        void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -1839,8 +1837,9 @@ struct amdgpu_asic_funcs {
        /* MM block clocks */
        int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
        int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
-       /* query virtual capabilities */
-       u32 (*get_virtual_caps)(struct amdgpu_device *adev);
+       /* static power management */
+       int (*get_pcie_lanes)(struct amdgpu_device *adev);
+       void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
 };
 
 /*
@@ -1933,16 +1932,6 @@ struct amdgpu_atcs {
 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
 
-
-/* GPU virtualization */
-#define AMDGPU_VIRT_CAPS_SRIOV_EN       (1 << 0)
-#define AMDGPU_VIRT_CAPS_IS_VF          (1 << 1)
-struct amdgpu_virtualization {
-       bool supports_sr_iov;
-       bool is_virtual;
-       u32 caps;
-};
-
 /*
  * Core structure, functions and helpers.
  */
@@ -1956,6 +1945,7 @@ struct amdgpu_ip_block_status {
        bool valid;
        bool sw;
        bool hw;
+       bool hang;
 };
 
 struct amdgpu_device {
@@ -2014,6 +2004,8 @@ struct amdgpu_device {
        spinlock_t pcie_idx_lock;
        amdgpu_rreg_t                   pcie_rreg;
        amdgpu_wreg_t                   pcie_wreg;
+       amdgpu_rreg_t                   pciep_rreg;
+       amdgpu_wreg_t                   pciep_wreg;
        /* protects concurrent UVD register access */
        spinlock_t uvd_ctx_idx_lock;
        amdgpu_rreg_t                   uvd_ctx_rreg;
@@ -2054,7 +2046,16 @@ struct amdgpu_device {
        atomic64_t                      num_evictions;
        atomic_t                        gpu_reset_counter;
 
+       /* data for buffer migration throttling */
+       struct {
+               spinlock_t              lock;
+               s64                     last_update_us;
+               s64                     accum_us; /* accumulated microseconds */
+               u32                     log2_max_MBps;
+       } mm_stats;
+
        /* display */
+       bool                            enable_virtual_display;
        struct amdgpu_mode_info         mode_info;
        struct work_struct              hotplug_work;
        struct amdgpu_irq_src           crtc_irq;
@@ -2117,6 +2118,14 @@ struct amdgpu_device {
        struct kfd_dev          *kfd;
 
        struct amdgpu_virtualization virtualization;
+
+       /* link all shadow bo */
+       struct list_head                shadow_list;
+       struct mutex                    shadow_list_lock;
+       /* link all gtt */
+       spinlock_t                      gtt_list_lock;
+       struct list_head                gtt_list;
+
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2149,6 +2158,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
+#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
@@ -2192,6 +2203,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 #define REG_GET_FIELD(value, reg, field)                               \
        (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
 
+#define WREG32_FIELD(reg, field, val)  \
+       WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
 /*
  * BIOS helpers.
  */
@@ -2235,14 +2249,17 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
+#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
+#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
+#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
+#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
@@ -2257,9 +2274,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
+#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
+#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2291,6 +2312,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
 
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+       ((adev)->pp_enabled ? \
+               (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+               -EINVAL)
+
 #define amdgpu_dpm_get_temperature(adev) \
        ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
@@ -2342,11 +2368,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
              (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
              (adev)->pm.funcs->powergate_vce((adev), (g)))
 
-#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
-       ((adev)->pp_enabled ?                                           \
-             (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
-             (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
-
 #define amdgpu_dpm_get_current_power_state(adev) \
        (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
 
@@ -2387,6 +2408,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 
 /* Common functions */
 int amdgpu_gpu_reset(struct amdgpu_device *adev);
+bool amdgpu_need_backup(struct amdgpu_device *adev);
 void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
@@ -2412,6 +2434,10 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
+int amdgpu_ttm_global_init(struct amdgpu_device *adev);
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
                                             const u32 *registers,
                                             const u32 array_size);
@@ -2423,11 +2449,13 @@ void amdgpu_register_atpx_handler(void);
 void amdgpu_unregister_atpx_handler(void);
 bool amdgpu_has_atpx_dgpu_power_cntl(void);
 bool amdgpu_is_atpx_hybrid(void);
+bool amdgpu_atpx_dgpu_req_power_for_displays(void);
 #else
 static inline void amdgpu_register_atpx_handler(void) {}
 static inline void amdgpu_unregister_atpx_handler(void) {}
 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
 static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
+static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
 #endif
 
 /*
@@ -2444,8 +2472,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
                                 struct drm_file *file_priv);
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
                                struct drm_file *file_priv);
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
@@ -2491,6 +2519,7 @@ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
 struct amdgpu_bo_va_mapping *
 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                       uint64_t addr, struct amdgpu_bo **bo);
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
 
 #include "amdgpu_object.h"
 #endif
index 5cd7b736a9de7196bf40fbba354402f799e3beba..5796539a0bcb9c50a20871a0b822817fd4dc5265 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
@@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
 #endif
                }
        }
+       if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+               if ((adev->flags & AMD_IS_PX) &&
+                   amdgpu_atpx_dgpu_req_power_for_displays()) {
+                       pm_runtime_get_sync(adev->ddev->dev);
+                       /* Just fire off a uevent and let userspace tell us what to do */
+                       drm_helper_hpd_irq_event(adev->ddev);
+                       pm_runtime_mark_last_busy(adev->ddev->dev);
+                       pm_runtime_put_autosuspend(adev->ddev->dev);
+               }
+       }
        /* TODO: check other events */
 
        /* We've handled the event, stop the notifier chain. The ACPI interface
index d080d0807a5bb81fbb19f6e45926c2d8ff06050d..dba8a5b25e661ec80ae8b69f91541b157bb3535d 100644 (file)
@@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
        return r;
 }
 
-u32 pool_to_domain(enum kgd_memory_pool p)
-{
-       switch (p) {
-       case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
-       default: return AMDGPU_GEM_DOMAIN_GTT;
-       }
-}
-
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                        void **mem_obj, uint64_t *gpu_addr,
                        void **cpu_ptr)
index 362bedc9e50791ee6a9b1e60f4885a87ba158677..1a0a5f7cccbc2661348bc6d1263ac8fa9ac47c99 100644 (file)
@@ -103,11 +103,11 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id);
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-                               unsigned int timeout, uint32_t pipe_id,
+                               unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id);
 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-                               unsigned int timeout);
+                               unsigned int utimeout);
 static int kgd_address_watch_disable(struct kgd_dev *kgd);
 static int kgd_address_watch_execute(struct kgd_dev *kgd,
                                        unsigned int watch_point_id,
@@ -437,11 +437,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
 }
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-                               unsigned int timeout, uint32_t pipe_id,
+                               unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        uint32_t temp;
+       int timeout = utimeout;
 
        acquire_queue(kgd, pipe_id, queue_id);
        WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -452,9 +453,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
                temp = RREG32(mmCP_HQD_ACTIVE);
                if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
                        break;
-               if (timeout == 0) {
-                       pr_err("kfd: cp queue preemption time out (%dms)\n",
-                               temp);
+               if (timeout <= 0) {
+                       pr_err("kfd: cp queue preemption time out.\n");
                        release_queue(kgd);
                        return -ETIME;
                }
@@ -467,12 +467,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
 }
 
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-                               unsigned int timeout)
+                               unsigned int utimeout)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct cik_sdma_rlc_registers *m;
        uint32_t sdma_base_addr;
        uint32_t temp;
+       int timeout = utimeout;
 
        m = get_sdma_mqd(mqd);
        sdma_base_addr = get_sdma_base_addr(m);
@@ -485,7 +486,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
                temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
                if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
                        break;
-               if (timeout == 0)
+               if (timeout <= 0)
                        return -ETIME;
                msleep(20);
                timeout -= 20;
index 04b744d64b57a6355f38ff071f48836148880257..6697612239c2753414e67ec1af8c1e8a14bb387e 100644 (file)
@@ -62,10 +62,10 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                uint32_t pipe_id, uint32_t queue_id);
 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-                               unsigned int timeout, uint32_t pipe_id,
+                               unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id);
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-                               unsigned int timeout);
+                               unsigned int utimeout);
 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
 static int kgd_address_watch_disable(struct kgd_dev *kgd);
 static int kgd_address_watch_execute(struct kgd_dev *kgd,
@@ -349,11 +349,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
 }
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-                               unsigned int timeout, uint32_t pipe_id,
+                               unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        uint32_t temp;
+       int timeout = utimeout;
 
        acquire_queue(kgd, pipe_id, queue_id);
 
@@ -363,9 +364,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
                temp = RREG32(mmCP_HQD_ACTIVE);
                if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
                        break;
-               if (timeout == 0) {
-                       pr_err("kfd: cp queue preemption time out (%dms)\n",
-                               temp);
+               if (timeout <= 0) {
+                       pr_err("kfd: cp queue preemption time out.\n");
                        release_queue(kgd);
                        return -ETIME;
                }
@@ -378,12 +378,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
 }
 
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-                               unsigned int timeout)
+                               unsigned int utimeout)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct cik_sdma_rlc_registers *m;
        uint32_t sdma_base_addr;
        uint32_t temp;
+       int timeout = utimeout;
 
        m = get_sdma_mqd(mqd);
        sdma_base_addr = get_sdma_base_addr(m);
@@ -396,7 +397,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
                temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
                if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
                        break;
-               if (timeout == 0)
+               if (timeout <= 0)
                        return -ETIME;
                msleep(20);
                timeout -= 20;
index 983175363b0688df6ca181e612000173da9141bf..8e6bf548d68907f871952f0109186a03373cdd5f 100644 (file)
@@ -259,6 +259,33 @@ static const int object_connector_convert[] = {
        DRM_MODE_CONNECTOR_Unknown
 };
 
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       struct atom_context *ctx = mode_info->atom_context;
+       int index = GetIndexIntoMasterTable(DATA, Object_Header);
+       u16 size, data_offset;
+       u8 frev, crev;
+       ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+       ATOM_OBJECT_HEADER *obj_header;
+
+       if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+               return false;
+
+       if (crev < 2)
+               return false;
+
+       obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+       path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+           (ctx->bios + data_offset +
+            le16_to_cpu(obj_header->usDisplayPathTableOffset));
+
+       if (path_obj->ucNumOfDispPath)
+               return true;
+       else
+               return false;
+}
+
 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
 {
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
@@ -321,6 +348,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
                            (le16_to_cpu(path->usConnObjectId) &
                             OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
+                       /* Skip TV/CV support */
+                       if ((le16_to_cpu(path->usDeviceTag) ==
+                            ATOM_DEVICE_TV1_SUPPORT) ||
+                           (le16_to_cpu(path->usDeviceTag) ==
+                            ATOM_DEVICE_CV_SUPPORT))
+                               continue;
+
+                       if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
+                               DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
+                                         con_obj_id, le16_to_cpu(path->usDeviceTag));
+                               continue;
+                       }
+
                        connector_type =
                                object_connector_convert[con_obj_id];
                        connector_object_id = con_obj_id;
@@ -951,6 +991,48 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
                return -EINVAL;
 
        switch (crev) {
+       case 2:
+       case 3:
+       case 5:
+               /* r6xx, r7xx, evergreen, ni, si.
+                * TODO: add support for asic_type <= CHIP_RV770*/
+               if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+                       args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+                       dividers->post_div = args.v3.ucPostDiv;
+                       dividers->enable_post_div = (args.v3.ucCntlFlag &
+                                                    ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+                       dividers->enable_dithen = (args.v3.ucCntlFlag &
+                                                  ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+                       dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+                       dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+                       dividers->ref_div = args.v3.ucRefDiv;
+                       dividers->vco_mode = (args.v3.ucCntlFlag &
+                                             ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+               } else {
+                       /* for SI we use ComputeMemoryClockParam for memory plls */
+                       if (adev->asic_type >= CHIP_TAHITI)
+                               return -EINVAL;
+                       args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+                       if (strobe_mode)
+                               args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+                       dividers->post_div = args.v5.ucPostDiv;
+                       dividers->enable_post_div = (args.v5.ucCntlFlag &
+                                                    ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+                       dividers->enable_dithen = (args.v5.ucCntlFlag &
+                                                  ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+                       dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+                       dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+                       dividers->ref_div = args.v5.ucRefDiv;
+                       dividers->vco_mode = (args.v5.ucCntlFlag &
+                                             ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+               }
+               break;
        case 4:
                /* fusion */
                args.v4.ulClock = cpu_to_le32(clock);   /* 10 khz */
@@ -1095,6 +1177,32 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
        amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+                                         u16 *vddc, u16 *vddci, u16 *mvdd)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+       u8 frev, crev;
+       u16 data_offset;
+       union firmware_info *firmware_info;
+
+       *vddc = 0;
+       *vddci = 0;
+       *mvdd = 0;
+
+       if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                  &frev, &crev, &data_offset)) {
+               firmware_info =
+                       (union firmware_info *)(mode_info->atom_context->bios +
+                                               data_offset);
+               *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+               if ((frev == 2) && (crev >= 2)) {
+                       *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+                       *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
+               }
+       }
+}
+
 union set_voltage {
        struct _SET_VOLTAGE_PS_ALLOCATION alloc;
        struct _SET_VOLTAGE_PARAMETERS v1;
@@ -1102,6 +1210,52 @@ union set_voltage {
        struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
 };
 
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+                            u16 voltage_id, u16 *voltage)
+{
+       union set_voltage args;
+       int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+       u8 frev, crev;
+
+       if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+               return -EINVAL;
+
+       switch (crev) {
+       case 1:
+               return -EINVAL;
+       case 2:
+               args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+               args.v2.ucVoltageMode = 0;
+               args.v2.usVoltageLevel = 0;
+
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+               *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+               break;
+       case 3:
+               args.v3.ucVoltageType = voltage_type;
+               args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+               args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+               *voltage = le16_to_cpu(args.v3.usVoltageLevel);
+               break;
+       default:
+               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+                                                     u16 *voltage,
+                                                     u16 leakage_idx)
+{
+       return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
+}
+
 void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
                                 u16 voltage_level,
                                 u8 voltage_type)
@@ -1322,6 +1476,50 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL
        return NULL;
 }
 
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+                             u8 voltage_type,
+                             u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+       int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+       u8 frev, crev;
+       u16 data_offset, size;
+       union voltage_object_info *voltage_info;
+       union voltage_object *voltage_object = NULL;
+
+       if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+                                  &frev, &crev, &data_offset)) {
+               voltage_info = (union voltage_object_info *)
+                       (adev->mode_info.atom_context->bios + data_offset);
+
+               switch (frev) {
+               case 3:
+                       switch (crev) {
+                       case 1:
+                               voltage_object = (union voltage_object *)
+                                       amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
+                                                                     voltage_type,
+                                                                     VOLTAGE_OBJ_SVID2);
+                               if (voltage_object) {
+                                       *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+                                       *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+                               } else {
+                                       return -EINVAL;
+                               }
+                               break;
+                       default:
+                               DRM_ERROR("unknown voltage object table\n");
+                               return -EINVAL;
+                       }
+                       break;
+               default:
+                       DRM_ERROR("unknown voltage object table\n");
+                       return -EINVAL;
+               }
+
+       }
+       return 0;
+}
+
 bool
 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
                                u8 voltage_type, u8 voltage_mode)
index 8c2e69661799d0db7b390b00a48edade2b784cff..17356151db38a9ca91ed25cf1a3450c91352ba7f 100644 (file)
@@ -140,6 +140,8 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *
                                                          uint8_t id);
 void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
 
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
+
 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
 
 int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
@@ -206,5 +208,19 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+                            u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+                                                     u16 *voltage,
+                                                     u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+                                         u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
+                                      u8 clock_type,
+                                      u32 clock,
+                                      bool strobe_mode,
+                                      struct atom_clock_dividers *dividers);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+                             u8 voltage_type,
+                             u8 *svd_gpio_id, u8 *svc_gpio_id);
 #endif
index 49de92600074dd3f16f117553ae44368b6b1d0d8..dae35a96a694d0b6ffc3de5aae94e7344a69e565 100644 (file)
@@ -29,6 +29,7 @@ struct amdgpu_atpx {
        acpi_handle handle;
        struct amdgpu_atpx_functions functions;
        bool is_hybrid;
+       bool dgpu_req_power_for_displays;
 };
 
 static struct amdgpu_atpx_priv {
@@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) {
        return amdgpu_atpx_priv.atpx.is_hybrid;
 }
 
+bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
+       return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -200,19 +205,14 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
        atpx->is_hybrid = false;
        if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
                printk("ATPX Hybrid Graphics\n");
-#if 1
-               /* This is a temporary hack until the D3 cold support
-                * makes it upstream.  The ATPX power_control method seems
-                * to still work on even if the system should be using
-                * the new standardized hybrid D3 cold ACPI interface.
-                */
-               atpx->functions.power_cntl = true;
-#else
                atpx->functions.power_cntl = false;
-#endif
                atpx->is_hybrid = true;
        }
 
+       atpx->dgpu_req_power_for_displays = false;
+       if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS)
+               atpx->dgpu_req_power_for_displays = true;
+
        return 0;
 }
 
index 33e47a43ae321bded79822cfb03318c79a155211..345305235349c0c40a1812e6555900cfbea6b187 100644 (file)
@@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
        start_jiffies = jiffies;
        for (i = 0; i < n; i++) {
                struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-               r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+               r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
+                                      false);
                if (r)
                        goto exit_do_move;
                r = fence_wait(fence, false);
index bc0440f7a31d3be5018c6eacb01c5a719dcc8db6..7a8bfa34682fdd8a92959d43328a7de058b42b7f 100644 (file)
@@ -616,7 +616,7 @@ static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, un
        return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
 }
 
-int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
                                  enum amd_ip_block_type block_type,
                                  enum amd_clockgating_state state)
 {
@@ -637,7 +637,7 @@ int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
        return r;
 }
 
-int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
                                  enum amd_ip_block_type block_type,
                                  enum amd_powergating_state state)
 {
@@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
        return -EINVAL;
 }
 
+static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
+                                       enum cgs_ucode_id type)
+{
+       CGS_FUNC_ADEV;
+       uint16_t fw_version;
+
+       switch (type) {
+               case CGS_UCODE_ID_SDMA0:
+                       fw_version = adev->sdma.instance[0].fw_version;
+                       break;
+               case CGS_UCODE_ID_SDMA1:
+                       fw_version = adev->sdma.instance[1].fw_version;
+                       break;
+               case CGS_UCODE_ID_CP_CE:
+                       fw_version = adev->gfx.ce_fw_version;
+                       break;
+               case CGS_UCODE_ID_CP_PFP:
+                       fw_version = adev->gfx.pfp_fw_version;
+                       break;
+               case CGS_UCODE_ID_CP_ME:
+                       fw_version = adev->gfx.me_fw_version;
+                       break;
+               case CGS_UCODE_ID_CP_MEC:
+                       fw_version = adev->gfx.mec_fw_version;
+                       break;
+               case CGS_UCODE_ID_CP_MEC_JT1:
+                       fw_version = adev->gfx.mec_fw_version;
+                       break;
+               case CGS_UCODE_ID_CP_MEC_JT2:
+                       fw_version = adev->gfx.mec_fw_version;
+                       break;
+               case CGS_UCODE_ID_RLC_G:
+                       fw_version = adev->gfx.rlc_fw_version;
+                       break;
+               default:
+                       DRM_ERROR("firmware type %d do not have version\n", type);
+                       fw_version = 0;
+       }
+       return fw_version;
+}
+
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                        enum cgs_ucode_id type,
                                        struct cgs_firmware_info *info)
@@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                info->mc_addr = gpu_addr;
                info->image_size = data_size;
                info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+               info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
                info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
        } else {
                char fw_name[30] = {0};
@@ -848,6 +890,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
        case CGS_SYSTEM_INFO_GFX_SE_INFO:
                sys_info->value = adev->gfx.config.max_shader_engines;
                break;
+       case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
+               sys_info->value = adev->pdev->subsystem_device;
+               break;
+       case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
+               sys_info->value = adev->pdev->subsystem_vendor;
+               break;
        default:
                return -ENODEV;
        }
index ff0b55a65ca3fcb29d51a2b11bf46b14c4a848a0..decbba5ad438cde053c42fa1f5ca1d4125fdecd3 100644 (file)
@@ -1504,6 +1504,88 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
        .force = amdgpu_connector_dvi_force,
 };
 
+static struct drm_encoder *
+amdgpu_connector_virtual_encoder(struct drm_connector *connector)
+{
+       int enc_id = connector->encoder_ids[0];
+       struct drm_encoder *encoder;
+       int i;
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0)
+                       break;
+
+               encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+               if (!encoder)
+                       continue;
+
+               if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+                       return encoder;
+       }
+
+       /* pick the first one */
+       if (enc_id)
+               return drm_encoder_find(connector->dev, enc_id);
+       return NULL;
+}
+
+static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
+{
+       struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+
+       if (encoder) {
+               amdgpu_connector_add_common_modes(encoder, connector);
+       }
+
+       return 0;
+}
+
+static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
+                                          struct drm_display_mode *mode)
+{
+       return MODE_OK;
+}
+
+static int
+amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
+{
+       return 0;
+}
+
+static enum drm_connector_status
+
+amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+static int
+amdgpu_connector_virtual_set_property(struct drm_connector *connector,
+                                 struct drm_property *property,
+                                 uint64_t val)
+{
+       return 0;
+}
+
+static void amdgpu_connector_virtual_force(struct drm_connector *connector)
+{
+       return;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
+       .get_modes = amdgpu_connector_virtual_get_modes,
+       .mode_valid = amdgpu_connector_virtual_mode_valid,
+       .best_encoder = amdgpu_connector_virtual_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
+       .dpms = amdgpu_connector_virtual_dpms,
+       .detect = amdgpu_connector_virtual_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = amdgpu_connector_virtual_set_property,
+       .destroy = amdgpu_connector_destroy,
+       .force = amdgpu_connector_virtual_force,
+};
+
 void
 amdgpu_connector_add(struct amdgpu_device *adev,
                      uint32_t connector_id,
@@ -1888,6 +1970,17 @@ amdgpu_connector_add(struct amdgpu_device *adev,
                        connector->interlace_allowed = false;
                        connector->doublescan_allowed = false;
                        break;
+               case DRM_MODE_CONNECTOR_VIRTUAL:
+                       amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+                       if (!amdgpu_dig_connector)
+                               goto failed;
+                       amdgpu_connector->con_priv = amdgpu_dig_connector;
+                       drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
+                       drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
+                       subpixel_order = SubPixelHorizontalRGB;
+                       connector->interlace_allowed = false;
+                       connector->doublescan_allowed = false;
+                       break;
                }
        }
 
index 0307ff5887c593a079d8fd63fa3245fb683162f3..b8412bcbad2a2355ed13d78ebb0a0078ea75d48d 100644 (file)
@@ -91,6 +91,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
                                      uint32_t *offset)
 {
        struct drm_gem_object *gobj;
+       unsigned long size;
 
        gobj = drm_gem_object_lookup(p->filp, data->handle);
        if (gobj == NULL)
@@ -101,6 +102,11 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
        p->uf_entry.tv.shared = true;
        p->uf_entry.user_pages = NULL;
+
+       size = amdgpu_bo_size(p->uf_entry.robj);
+       if (size != PAGE_SIZE || (data->offset + 8) > size)
+               return -EINVAL;
+
        *offset = data->offset;
 
        drm_gem_object_unreference_unlocked(gobj);
@@ -235,70 +241,212 @@ free_chunk:
        return ret;
 }
 
-/* Returns how many bytes TTM can move per IB.
+/* Convert microseconds to bytes. */
+static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
+{
+       if (us <= 0 || !adev->mm_stats.log2_max_MBps)
+               return 0;
+
+       /* Since accum_us is incremented by a million per second, just
+        * multiply it by the number of MB/s to get the number of bytes.
+        */
+       return us << adev->mm_stats.log2_max_MBps;
+}
+
+static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
+{
+       if (!adev->mm_stats.log2_max_MBps)
+               return 0;
+
+       return bytes >> adev->mm_stats.log2_max_MBps;
+}
+
+/* Returns how many bytes TTM can move right now. If no bytes can be moved,
+ * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
+ * which means it can go over the threshold once. If that happens, the driver
+ * will be in debt and no other buffer migrations can be done until that debt
+ * is repaid.
+ *
+ * This approach allows moving a buffer of any size (it's important to allow
+ * that).
+ *
+ * The currency is simply time in microseconds and it increases as the clock
+ * ticks. The accumulated microseconds (us) are converted to bytes and
+ * returned.
  */
 static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 {
-       u64 real_vram_size = adev->mc.real_vram_size;
-       u64 vram_usage = atomic64_read(&adev->vram_usage);
+       s64 time_us, increment_us;
+       u64 max_bytes;
+       u64 free_vram, total_vram, used_vram;
 
-       /* This function is based on the current VRAM usage.
+       /* Allow a maximum of 200 accumulated ms. This is basically per-IB
+        * throttling.
         *
-        * - If all of VRAM is free, allow relocating the number of bytes that
-        *   is equal to 1/4 of the size of VRAM for this IB.
+        * It means that in order to get full max MBps, at least 5 IBs per
+        * second must be submitted and not more than 200ms apart from each
+        * other.
+        */
+       const s64 us_upper_bound = 200000;
 
-        * - If more than one half of VRAM is occupied, only allow relocating
-        *   1 MB of data for this IB.
-        *
-        * - From 0 to one half of used VRAM, the threshold decreases
-        *   linearly.
-        *         __________________
-        * 1/4 of -|\               |
-        * VRAM    | \              |
-        *         |  \             |
-        *         |   \            |
-        *         |    \           |
-        *         |     \          |
-        *         |      \         |
-        *         |       \________|1 MB
-        *         |----------------|
-        *    VRAM 0 %             100 %
-        *         used            used
-        *
-        * Note: It's a threshold, not a limit. The threshold must be crossed
-        * for buffer relocations to stop, so any buffer of an arbitrary size
-        * can be moved as long as the threshold isn't crossed before
-        * the relocation takes place. We don't want to disable buffer
-        * relocations completely.
+       if (!adev->mm_stats.log2_max_MBps)
+               return 0;
+
+       total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
+       used_vram = atomic64_read(&adev->vram_usage);
+       free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
+
+       spin_lock(&adev->mm_stats.lock);
+
+       /* Increase the amount of accumulated us. */
+       time_us = ktime_to_us(ktime_get());
+       increment_us = time_us - adev->mm_stats.last_update_us;
+       adev->mm_stats.last_update_us = time_us;
+       adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
+                                      us_upper_bound);
+
+       /* This prevents the short period of low performance when the VRAM
+        * usage is low and the driver is in debt or doesn't have enough
+        * accumulated us to fill VRAM quickly.
         *
-        * The idea is that buffers should be placed in VRAM at creation time
-        * and TTM should only do a minimum number of relocations during
-        * command submission. In practice, you need to submit at least
-        * a dozen IBs to move all buffers to VRAM if they are in GTT.
+        * The situation can occur in these cases:
+        * - a lot of VRAM is freed by userspace
+        * - the presence of a big buffer causes a lot of evictions
+        *   (solution: split buffers into smaller ones)
         *
-        * Also, things can get pretty crazy under memory pressure and actual
-        * VRAM usage can change a lot, so playing safe even at 50% does
-        * consistently increase performance.
+        * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
+        * accum_us to a positive number.
+        */
+       if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
+               s64 min_us;
+
+               /* Be more aggresive on dGPUs. Try to fill a portion of free
+                * VRAM now.
+                */
+               if (!(adev->flags & AMD_IS_APU))
+                       min_us = bytes_to_us(adev, free_vram / 4);
+               else
+                       min_us = 0; /* Reset accum_us on APUs. */
+
+               adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
+       }
+
+       /* This returns 0 if the driver is in debt to disallow (optional)
+        * buffer moves.
+        */
+       max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+       spin_unlock(&adev->mm_stats.lock);
+       return max_bytes;
+}
+
+/* Report how many bytes have really been moved for the last command
+ * submission. This can result in a debt that can stop buffer migrations
+ * temporarily.
+ */
+static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
+                                        u64 num_bytes)
+{
+       spin_lock(&adev->mm_stats.lock);
+       adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+       spin_unlock(&adev->mm_stats.lock);
+}
+
+static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+                                struct amdgpu_bo *bo)
+{
+       u64 initial_bytes_moved;
+       uint32_t domain;
+       int r;
+
+       if (bo->pin_count)
+               return 0;
+
+       /* Don't move this buffer if we have depleted our allowance
+        * to move it. Don't move anything if the threshold is zero.
         */
+       if (p->bytes_moved < p->bytes_moved_threshold)
+               domain = bo->prefered_domains;
+       else
+               domain = bo->allowed_domains;
+
+retry:
+       amdgpu_ttm_placement_from_domain(bo, domain);
+       initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+       p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+               initial_bytes_moved;
+
+       if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+               domain = bo->allowed_domains;
+               goto retry;
+       }
 
-       u64 half_vram = real_vram_size >> 1;
-       u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
-       u64 bytes_moved_threshold = half_free_vram >> 1;
-       return max(bytes_moved_threshold, 1024*1024ull);
+       return r;
 }
 
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+/* Last resort, try to evict something from the current working set */
+static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+                               struct amdgpu_bo_list_entry *lobj)
+{
+       uint32_t domain = lobj->robj->allowed_domains;
+       int r;
+
+       if (!p->evictable)
+               return false;
+
+       for (;&p->evictable->tv.head != &p->validated;
+            p->evictable = list_prev_entry(p->evictable, tv.head)) {
+
+               struct amdgpu_bo_list_entry *candidate = p->evictable;
+               struct amdgpu_bo *bo = candidate->robj;
+               u64 initial_bytes_moved;
+               uint32_t other;
+
+               /* If we reached our current BO we can forget it */
+               if (candidate == lobj)
+                       break;
+
+               other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+               /* Check if this BO is in one of the domains we need space for */
+               if (!(other & domain))
+                       continue;
+
+               /* Check if we can move this BO somewhere else */
+               other = bo->allowed_domains & ~domain;
+               if (!other)
+                       continue;
+
+               /* Good we can try to move this BO somewhere else */
+               amdgpu_ttm_placement_from_domain(bo, other);
+               initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+               r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+               p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+                       initial_bytes_moved;
+
+               if (unlikely(r))
+                       break;
+
+               p->evictable = list_prev_entry(p->evictable, tv.head);
+               list_move(&candidate->tv.head, &p->validated);
+
+               return true;
+       }
+
+       return false;
+}
+
+static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                            struct list_head *validated)
 {
        struct amdgpu_bo_list_entry *lobj;
-       u64 initial_bytes_moved;
        int r;
 
        list_for_each_entry(lobj, validated, tv.head) {
                struct amdgpu_bo *bo = lobj->robj;
                bool binding_userptr = false;
                struct mm_struct *usermm;
-               uint32_t domain;
 
                usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
                if (usermm && usermm != current->mm)
@@ -313,35 +461,19 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                        binding_userptr = true;
                }
 
-               if (bo->pin_count)
-                       continue;
-
-               /* Avoid moving this one if we have moved too many buffers
-                * for this IB already.
-                *
-                * Note that this allows moving at least one buffer of
-                * any size, because it doesn't take the current "bo"
-                * into account. We don't want to disallow buffer moves
-                * completely.
-                */
-               if (p->bytes_moved <= p->bytes_moved_threshold)
-                       domain = bo->prefered_domains;
-               else
-                       domain = bo->allowed_domains;
-
-       retry:
-               amdgpu_ttm_placement_from_domain(bo, domain);
-               initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
-               r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-               p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
-                              initial_bytes_moved;
+               if (p->evictable == lobj)
+                       p->evictable = NULL;
 
-               if (unlikely(r)) {
-                       if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
-                               domain = bo->allowed_domains;
-                               goto retry;
-                       }
+               do {
+                       r = amdgpu_cs_bo_validate(p, bo);
+               } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+               if (r)
                        return r;
+
+               if (bo->shadow) {
+                       r = amdgpu_cs_bo_validate(p, bo);
+                       if (r)
+                               return r;
                }
 
                if (binding_userptr) {
@@ -386,8 +518,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 
                r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
                                           &duplicates);
-               if (unlikely(r != 0))
+               if (unlikely(r != 0)) {
+                       DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
                        goto error_free_pages;
+               }
 
                /* Without a BO list we don't have userptr BOs */
                if (!p->bo_list)
@@ -427,9 +561,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                /* Unreserve everything again. */
                ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 
-               /* We tried to often, just abort */
+               /* We tried too many times, just abort */
                if (!--tries) {
                        r = -EDEADLK;
+                       DRM_ERROR("deadlock in %s\n", __func__);
                        goto error_free_pages;
                }
 
@@ -441,11 +576,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                                                         sizeof(struct page*));
                        if (!e->user_pages) {
                                r = -ENOMEM;
+                               DRM_ERROR("calloc failure in %s\n", __func__);
                                goto error_free_pages;
                        }
 
                        r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
                        if (r) {
+                               DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
                                drm_free_large(e->user_pages);
                                e->user_pages = NULL;
                                goto error_free_pages;
@@ -460,14 +597,23 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 
        p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
        p->bytes_moved = 0;
+       p->evictable = list_last_entry(&p->validated,
+                                      struct amdgpu_bo_list_entry,
+                                      tv.head);
 
        r = amdgpu_cs_list_validate(p, &duplicates);
-       if (r)
+       if (r) {
+               DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
                goto error_validate;
+       }
 
        r = amdgpu_cs_list_validate(p, &p->validated);
-       if (r)
+       if (r) {
+               DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
                goto error_validate;
+       }
+
+       amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
 
        fpriv->vm.last_eviction_counter =
                atomic64_read(&p->adev->num_evictions);
@@ -499,8 +645,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                }
        }
 
-       if (p->uf_entry.robj)
-               p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+       if (!r && p->uf_entry.robj) {
+               struct amdgpu_bo *uf = p->uf_entry.robj;
+
+               r = amdgpu_ttm_bind(uf->tbo.ttm, &uf->tbo.mem);
+               p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+       }
 
 error_validate:
        if (r) {
@@ -617,7 +767,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
                        if (bo_va == NULL)
                                continue;
 
-                       r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
+                       r = amdgpu_vm_bo_update(adev, bo_va, false);
                        if (r)
                                return r;
 
@@ -710,6 +860,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                if (r)
                        return r;
 
+               if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
+                       parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+                       if (!parser->ctx->preamble_presented) {
+                               parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+                               parser->ctx->preamble_presented = true;
+                       }
+               }
+
                if (parser->job->ring && parser->job->ring != ring)
                        return -EINVAL;
 
@@ -849,7 +1007,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        }
 
        job->owner = p->filp;
-       job->ctx = entity->fence_context;
+       job->fence_ctx = entity->fence_context;
        p->fence = fence_get(&job->base.s_fence->finished);
        cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
        job->uf_sequence = cs->out.handle;
@@ -1015,3 +1173,29 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 
        return NULL;
 }
+
+/**
+ * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
+ *
+ * @parser: command submission parser context
+ *
+ * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
+ */
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
+{
+       unsigned i;
+       int r;
+
+       if (!parser->bo_list)
+               return 0;
+
+       for (i = 0; i < parser->bo_list->num_entries; i++) {
+               struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+
+               r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
+               if (unlikely(r))
+                       return r;
+       }
+
+       return 0;
+}
index df7ab2458e5016d0da9da40b9658deb80f72d4dd..99a15cad67897b48b87685851fec31eebac8ce69 100644 (file)
 #include "atom.h"
 #include "amdgpu_atombios.h"
 #include "amd_pcie.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "si.h"
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #include "cik.h"
 #endif
 #include "vi.h"
 #include "bif/bif_4_1_d.h"
+#include <linux/pci.h>
+#include <linux/firmware.h>
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
 
 static const char *amdgpu_asic_name[] = {
+       "TAHITI",
+       "PITCAIRN",
+       "VERDE",
+       "OLAND",
+       "HAINAN",
        "BONAIRE",
        "KAVERI",
        "KABINI",
@@ -101,7 +111,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    bool always_indirect)
 {
        trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-       
+
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        else {
@@ -642,6 +652,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev)
 
 }
 
+static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
+{
+       if (amdgpu_sriov_vf(adev))
+               return false;
+
+       if (amdgpu_passthrough(adev)) {
+               /* for FIJI: In whole GPU pass-through virtualization case
+                * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
+                * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
+                * but if we force vPost do in pass-through case, the driver reload will hang.
+                * whether doing vPost depends on amdgpu_card_posted if smc version is above
+                * 00160e00 for FIJI.
+                */
+               if (adev->asic_type == CHIP_FIJI) {
+                       int err;
+                       uint32_t fw_ver;
+                       err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
+                       /* force vPost if error occured */
+                       if (err)
+                               return true;
+
+                       fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+                       if (fw_ver >= 0x00160e00)
+                               return !amdgpu_card_posted(adev);
+               }
+       } else {
+               /* in bare-metal case, amdgpu_card_posted return false
+                * after system reboot/boot, and return true if driver
+                * reloaded.
+                * we shouldn't do vPost after driver reload otherwise GPU
+                * could hang.
+                */
+               if (amdgpu_card_posted(adev))
+                       return false;
+       }
+
+       /* we assume vPost is neede for all other cases */
+       return true;
+}
+
 /**
  * amdgpu_dummy_page_init - init dummy page used by the driver
  *
@@ -1026,7 +1076,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                /* don't suspend or resume card normally */
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-               amdgpu_resume_kms(dev, true, true);
+               amdgpu_device_resume(dev, true, true);
 
                dev->pdev->d3_delay = d3_delay;
 
@@ -1036,7 +1086,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                printk(KERN_INFO "amdgpu: switched off\n");
                drm_kms_helper_poll_disable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               amdgpu_suspend_kms(dev, true, true);
+               amdgpu_device_suspend(dev, true, true);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
        }
 }
@@ -1181,10 +1231,38 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
        return 1;
 }
 
+static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+{
+       adev->enable_virtual_display = false;
+
+       if (amdgpu_virtual_display) {
+               struct drm_device *ddev = adev->ddev;
+               const char *pci_address_name = pci_name(ddev->pdev);
+               char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+
+               pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
+               pciaddstr_tmp = pciaddstr;
+               while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+                       if (!strcmp(pci_address_name, pciaddname)) {
+                               adev->enable_virtual_display = true;
+                               break;
+                       }
+               }
+
+               DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
+                                amdgpu_virtual_display, pci_address_name,
+                                adev->enable_virtual_display);
+
+               kfree(pciaddstr);
+       }
+}
+
 static int amdgpu_early_init(struct amdgpu_device *adev)
 {
        int i, r;
 
+       amdgpu_whether_enable_virtual_display(adev);
+
        switch (adev->asic_type) {
        case CHIP_TOPAZ:
        case CHIP_TONGA:
@@ -1202,6 +1280,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
                if (r)
                        return r;
                break;
+#ifdef CONFIG_DRM_AMDGPU_SI
+       case CHIP_VERDE:
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+       case CHIP_OLAND:
+       case CHIP_HAINAN:
+               adev->family = AMDGPU_FAMILY_SI;
+               r = si_set_ip_blocks(adev);
+               if (r)
+                       return r;
+               break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
        case CHIP_BONAIRE:
        case CHIP_HAWAII:
@@ -1318,6 +1408,9 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_block_status[i].valid)
                        continue;
+               if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
+                       adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
+                       continue;
                /* enable clockgating to save power */
                r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
                                                                    AMD_CG_STATE_GATE);
@@ -1433,13 +1526,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
        return 0;
 }
 
-static bool amdgpu_device_is_virtual(void)
+static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
 {
-#ifdef CONFIG_X86
-       return boot_cpu_has(X86_FEATURE_HYPERVISOR);
-#else
-       return false;
-#endif
+       if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+               adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
 }
 
 /**
@@ -1461,6 +1551,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 {
        int r, i;
        bool runtime = false;
+       u32 max_MBps;
 
        adev->shutdown = false;
        adev->dev = &pdev->dev;
@@ -1484,6 +1575,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->smc_wreg = &amdgpu_invalid_wreg;
        adev->pcie_rreg = &amdgpu_invalid_rreg;
        adev->pcie_wreg = &amdgpu_invalid_wreg;
+       adev->pciep_rreg = &amdgpu_invalid_rreg;
+       adev->pciep_wreg = &amdgpu_invalid_wreg;
        adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
        adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
        adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -1520,9 +1613,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        spin_lock_init(&adev->didt_idx_lock);
        spin_lock_init(&adev->gc_cac_idx_lock);
        spin_lock_init(&adev->audio_endpt_idx_lock);
+       spin_lock_init(&adev->mm_stats.lock);
+
+       INIT_LIST_HEAD(&adev->shadow_list);
+       mutex_init(&adev->shadow_list_lock);
+
+       INIT_LIST_HEAD(&adev->gtt_list);
+       spin_lock_init(&adev->gtt_list_lock);
+
+       if (adev->asic_type >= CHIP_BONAIRE) {
+               adev->rmmio_base = pci_resource_start(adev->pdev, 5);
+               adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+       } else {
+               adev->rmmio_base = pci_resource_start(adev->pdev, 2);
+               adev->rmmio_size = pci_resource_len(adev->pdev, 2);
+       }
 
-       adev->rmmio_base = pci_resource_start(adev->pdev, 5);
-       adev->rmmio_size = pci_resource_len(adev->pdev, 5);
        adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
        if (adev->rmmio == NULL) {
                return -ENOMEM;
@@ -1530,8 +1636,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
        DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
-       /* doorbell bar mapping */
-       amdgpu_doorbell_init(adev);
+       if (adev->asic_type >= CHIP_BONAIRE)
+               /* doorbell bar mapping */
+               amdgpu_doorbell_init(adev);
 
        /* io port mapping */
        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1579,25 +1686,24 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                goto failed;
        }
 
-       /* See if the asic supports SR-IOV */
-       adev->virtualization.supports_sr_iov =
-               amdgpu_atombios_has_gpu_virtualization_table(adev);
-
-       /* Check if we are executing in a virtualized environment */
-       adev->virtualization.is_virtual = amdgpu_device_is_virtual();
-       adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+       /* detect if we are with an SRIOV vbios */
+       amdgpu_device_detect_sriov_bios(adev);
 
        /* Post card if necessary */
-       if (!amdgpu_card_posted(adev) ||
-           (adev->virtualization.is_virtual &&
-            !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
+       if (amdgpu_vpost_needed(adev)) {
                if (!adev->bios) {
-                       dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
+                       dev_err(adev->dev, "no vBIOS found\n");
                        r = -EINVAL;
                        goto failed;
                }
-               DRM_INFO("GPU not posted. posting now...\n");
-               amdgpu_atom_asic_init(adev->mode_info.atom_context);
+               DRM_INFO("GPU posting now...\n");
+               r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+               if (r) {
+                       dev_err(adev->dev, "gpu post error!\n");
+                       goto failed;
+               }
+       } else {
+               DRM_INFO("GPU post is not needed\n");
        }
 
        /* Initialize clocks */
@@ -1628,6 +1734,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
        adev->accel_working = true;
 
+       /* Initialize the buffer migration limit. */
+       if (amdgpu_moverate >= 0)
+               max_MBps = amdgpu_moverate;
+       else
+               max_MBps = 8; /* Allow 8 MB/s. */
+       /* Get a log2 for easy divisions. */
+       adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
+
        amdgpu_fbdev_init(adev);
 
        r = amdgpu_ib_pool_init(adev);
@@ -1732,7 +1846,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        adev->rio_mem = NULL;
        iounmap(adev->rmmio);
        adev->rmmio = NULL;
-       amdgpu_doorbell_fini(adev);
+       if (adev->asic_type >= CHIP_BONAIRE)
+               amdgpu_doorbell_fini(adev);
        amdgpu_debugfs_regs_cleanup(adev);
        amdgpu_debugfs_remove_files(adev);
 }
@@ -1742,7 +1857,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  * Suspend & resume.
  */
 /**
- * amdgpu_suspend_kms - initiate device suspend
+ * amdgpu_device_suspend - initiate device suspend
  *
  * @pdev: drm dev pointer
  * @state: suspend state
@@ -1751,7 +1866,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  * Returns 0 for success or an error on failure.
  * Called at driver suspend.
  */
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 {
        struct amdgpu_device *adev;
        struct drm_crtc *crtc;
@@ -1819,6 +1934,10 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
                /* Shut down the device */
                pci_disable_device(dev->pdev);
                pci_set_power_state(dev->pdev, PCI_D3hot);
+       } else {
+               r = amdgpu_asic_reset(adev);
+               if (r)
+                       DRM_ERROR("amdgpu asic reset failed\n");
        }
 
        if (fbcon) {
@@ -1830,7 +1949,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 }
 
 /**
- * amdgpu_resume_kms - initiate device resume
+ * amdgpu_device_resume - initiate device resume
  *
  * @pdev: drm dev pointer
  *
@@ -1838,7 +1957,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
  * Returns 0 for success or an error on failure.
  * Called at driver resume.
  */
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 {
        struct drm_connector *connector;
        struct amdgpu_device *adev = dev->dev_private;
@@ -1848,22 +1967,26 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       if (fbcon) {
+       if (fbcon)
                console_lock();
-       }
+
        if (resume) {
                pci_set_power_state(dev->pdev, PCI_D0);
                pci_restore_state(dev->pdev);
-               if (pci_enable_device(dev->pdev)) {
+               r = pci_enable_device(dev->pdev);
+               if (r) {
                        if (fbcon)
                                console_unlock();
-                       return -1;
+                       return r;
                }
        }
 
        /* post card */
-       if (!amdgpu_card_posted(adev))
-               amdgpu_atom_asic_init(adev->mode_info.atom_context);
+       if (!amdgpu_card_posted(adev) || !resume) {
+               r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+               if (r)
+                       DRM_ERROR("amdgpu asic init failed\n");
+       }
 
        r = amdgpu_resume(adev);
        if (r)
@@ -1937,6 +2060,126 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        return 0;
 }
 
+static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+{
+       int i;
+       bool asic_hang = false;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_blocks[i].funcs->check_soft_reset)
+                       adev->ip_blocks[i].funcs->check_soft_reset(adev);
+               if (adev->ip_block_status[i].hang) {
+                       DRM_INFO("IP block:%d is hang!\n", i);
+                       asic_hang = true;
+               }
+       }
+       return asic_hang;
+}
+
+static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+{
+       int i, r = 0;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_block_status[i].hang &&
+                   adev->ip_blocks[i].funcs->pre_soft_reset) {
+                       r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+                       if (r)
+                               return r;
+               }
+       }
+
+       return 0;
+}
+
+static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+{
+       if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
+           adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
+           adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
+           adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
+               DRM_INFO("Some block need full reset!\n");
+               return true;
+       }
+       return false;
+}
+
+static int amdgpu_soft_reset(struct amdgpu_device *adev)
+{
+       int i, r = 0;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_block_status[i].hang &&
+                   adev->ip_blocks[i].funcs->soft_reset) {
+                       r = adev->ip_blocks[i].funcs->soft_reset(adev);
+                       if (r)
+                               return r;
+               }
+       }
+
+       return 0;
+}
+
+static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+{
+       int i, r = 0;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_block_status[i].hang &&
+                   adev->ip_blocks[i].funcs->post_soft_reset)
+                       r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
+bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+       if (adev->flags & AMD_IS_APU)
+               return false;
+
+       return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
+static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+                                          struct amdgpu_ring *ring,
+                                          struct amdgpu_bo *bo,
+                                          struct fence **fence)
+{
+       uint32_t domain;
+       int r;
+
+       if (!bo->shadow)
+               return 0;
+
+       r = amdgpu_bo_reserve(bo, false);
+       if (r)
+               return r;
+       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+       /* if bo has been evicted, then no need to recover */
+       if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+               r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
+                                                NULL, fence, true);
+               if (r) {
+                       DRM_ERROR("recover page table failed!\n");
+                       goto err;
+               }
+       }
+err:
+       amdgpu_bo_unreserve(bo);
+       return r;
+}
+
 /**
  * amdgpu_gpu_reset - reset the asic
  *
@@ -1949,6 +2192,12 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 {
        int i, r;
        int resched;
+       bool need_full_reset;
+
+       if (!amdgpu_check_soft_reset(adev)) {
+               DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+               return 0;
+       }
 
        atomic_inc(&adev->gpu_reset_counter);
 
@@ -1967,40 +2216,93 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
        /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
        amdgpu_fence_driver_force_completion(adev);
 
-       /* save scratch */
-       amdgpu_atombios_scratch_regs_save(adev);
-       r = amdgpu_suspend(adev);
+       need_full_reset = amdgpu_need_full_reset(adev);
 
-retry:
-       /* Disable fb access */
-       if (adev->mode_info.num_crtc) {
-               struct amdgpu_mode_mc_save save;
-               amdgpu_display_stop_mc_access(adev, &save);
-               amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+       if (!need_full_reset) {
+               amdgpu_pre_soft_reset(adev);
+               r = amdgpu_soft_reset(adev);
+               amdgpu_post_soft_reset(adev);
+               if (r || amdgpu_check_soft_reset(adev)) {
+                       DRM_INFO("soft reset failed, will fallback to full reset!\n");
+                       need_full_reset = true;
+               }
        }
 
-       r = amdgpu_asic_reset(adev);
-       /* post card */
-       amdgpu_atom_asic_init(adev->mode_info.atom_context);
+       if (need_full_reset) {
+               /* save scratch */
+               amdgpu_atombios_scratch_regs_save(adev);
+               r = amdgpu_suspend(adev);
 
-       if (!r) {
-               dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
-               r = amdgpu_resume(adev);
+retry:
+               /* Disable fb access */
+               if (adev->mode_info.num_crtc) {
+                       struct amdgpu_mode_mc_save save;
+                       amdgpu_display_stop_mc_access(adev, &save);
+                       amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+               }
+
+               r = amdgpu_asic_reset(adev);
+               /* post card */
+               amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
+               if (!r) {
+                       dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+                       r = amdgpu_resume(adev);
+               }
+               /* restore scratch */
+               amdgpu_atombios_scratch_regs_restore(adev);
        }
-       /* restore scratch */
-       amdgpu_atombios_scratch_regs_restore(adev);
        if (!r) {
+               amdgpu_irq_gpu_reset_resume_helper(adev);
+               if (need_full_reset && amdgpu_need_backup(adev)) {
+                       r = amdgpu_ttm_recover_gart(adev);
+                       if (r)
+                               DRM_ERROR("gart recovery failed!!!\n");
+               }
                r = amdgpu_ib_ring_tests(adev);
                if (r) {
                        dev_err(adev->dev, "ib ring test failed (%d).\n", r);
                        r = amdgpu_suspend(adev);
+                       need_full_reset = true;
                        goto retry;
                }
+               /**
+                * recovery vm page tables, since we cannot depend on VRAM is
+                * consistent after gpu full reset.
+                */
+               if (need_full_reset && amdgpu_need_backup(adev)) {
+                       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+                       struct amdgpu_bo *bo, *tmp;
+                       struct fence *fence = NULL, *next = NULL;
+
+                       DRM_INFO("recover vram bo from shadow\n");
+                       mutex_lock(&adev->shadow_list_lock);
+                       list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+                               amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+                               if (fence) {
+                                       r = fence_wait(fence, false);
+                                       if (r) {
+                                               WARN(r, "recovery from shadow isn't comleted\n");
+                                               break;
+                                       }
+                               }
 
+                               fence_put(fence);
+                               fence = next;
+                       }
+                       mutex_unlock(&adev->shadow_list_lock);
+                       if (fence) {
+                               r = fence_wait(fence, false);
+                               if (r)
+                                       WARN(r, "recovery from shadow isn't comleted\n");
+                       }
+                       fence_put(fence);
+               }
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = adev->rings[i];
                        if (!ring)
                                continue;
+
                        amd_sched_job_recovery(&ring->sched);
                        kthread_unpark(ring->sched.thread);
                }
@@ -2020,7 +2322,6 @@ retry:
                /* bad news, how to tell it to userspace ? */
                dev_info(adev->dev, "GPU reset failed\n");
        }
-       amdgpu_irq_gpu_reset_resume_helper(adev);
 
        return r;
 }
@@ -2178,22 +2479,26 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
        struct amdgpu_device *adev = f->f_inode->i_private;
        ssize_t result = 0;
        int r;
-       bool use_bank;
+       bool pm_pg_lock, use_bank;
        unsigned instance_bank, sh_bank, se_bank;
 
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
+       /* are we reading registers for which a PG lock is necessary? */
+       pm_pg_lock = (*pos >> 23) & 1;
+
        if (*pos & (1ULL << 62)) {
                se_bank = (*pos >> 24) & 0x3FF;
                sh_bank = (*pos >> 34) & 0x3FF;
                instance_bank = (*pos >> 44) & 0x3FF;
                use_bank = 1;
-               *pos &= 0xFFFFFF;
        } else {
                use_bank = 0;
        }
 
+       *pos &= 0x3FFFF;
+
        if (use_bank) {
                if (sh_bank >= adev->gfx.config.max_sh_per_se ||
                    se_bank >= adev->gfx.config.max_shader_engines)
@@ -2203,6 +2508,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
                                        sh_bank, instance_bank);
        }
 
+       if (pm_pg_lock)
+               mutex_lock(&adev->pm.mutex);
+
        while (size) {
                uint32_t value;
 
@@ -2228,6 +2536,9 @@ end:
                mutex_unlock(&adev->grbm_idx_mutex);
        }
 
+       if (pm_pg_lock)
+               mutex_unlock(&adev->pm.mutex);
+
        return result;
 }
 
@@ -2385,7 +2696,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
        while (size) {
                uint32_t value;
 
-               value = RREG32_SMC(*pos >> 2);
+               value = RREG32_SMC(*pos);
                r = put_user(value, (uint32_t *)buf);
                if (r)
                        return r;
@@ -2416,7 +2727,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
                if (r)
                        return r;
 
-               WREG32_SMC(*pos >> 2, value);
+               WREG32_SMC(*pos, value);
 
                result += 4;
                buf += 4;
@@ -2438,12 +2749,12 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+       config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
        if (!config)
                return -ENOMEM;
 
        /* version, increment each time something is added */
-       config[no_regs++] = 0;
+       config[no_regs++] = 2;
        config[no_regs++] = adev->gfx.config.max_shader_engines;
        config[no_regs++] = adev->gfx.config.max_tile_pipes;
        config[no_regs++] = adev->gfx.config.max_cu_per_sh;
@@ -2468,6 +2779,15 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
        config[no_regs++] = adev->gfx.config.gb_addr_config;
        config[no_regs++] = adev->gfx.config.num_rbs;
 
+       /* rev==1 */
+       config[no_regs++] = adev->rev_id;
+       config[no_regs++] = adev->pg_flags;
+       config[no_regs++] = adev->cg_flags;
+
+       /* rev==2 */
+       config[no_regs++] = adev->family;
+       config[no_regs++] = adev->external_rev_id;
+
        while (size && (*pos < no_regs * 4)) {
                uint32_t value;
 
@@ -2488,6 +2808,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
        return result;
 }
 
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+                                       size_t size, loff_t *pos)
+{
+       struct amdgpu_device *adev = f->f_inode->i_private;
+       int idx, r;
+       int32_t value;
+
+       if (size != 4 || *pos & 0x3)
+               return -EINVAL;
+
+       /* convert offset to sensor number */
+       idx = *pos >> 2;
+
+       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+               r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
+       else
+               return -EINVAL;
+
+       if (!r)
+               r = put_user(value, (int32_t *)buf);
+
+       return !r ? 4 : r;
+}
 
 static const struct file_operations amdgpu_debugfs_regs_fops = {
        .owner = THIS_MODULE,
@@ -2520,12 +2863,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = {
        .llseek = default_llseek
 };
 
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_debugfs_sensor_read,
+       .llseek = default_llseek
+};
+
 static const struct file_operations *debugfs_regs[] = {
        &amdgpu_debugfs_regs_fops,
        &amdgpu_debugfs_regs_didt_fops,
        &amdgpu_debugfs_regs_pcie_fops,
        &amdgpu_debugfs_regs_smc_fops,
        &amdgpu_debugfs_gca_config_fops,
+       &amdgpu_debugfs_sensors_fops,
 };
 
 static const char *debugfs_regs_names[] = {
@@ -2534,6 +2884,7 @@ static const char *debugfs_regs_names[] = {
        "amdgpu_regs_pcie",
        "amdgpu_regs_smc",
        "amdgpu_gca_config",
+       "amdgpu_sensors",
 };
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
index 76f96028313dcacececda5678977822f43c0e2c4..9af8d3c7ae8b135f703b0dc999c5f9df4522f149 100644 (file)
@@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
                container_of(cb, struct amdgpu_flip_work, cb);
 
        fence_put(f);
-       schedule_work(&work->flip_work);
+       schedule_work(&work->flip_work.work);
 }
 
 static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
@@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
 
 static void amdgpu_flip_work_func(struct work_struct *__work)
 {
+       struct delayed_work *delayed_work =
+               container_of(__work, struct delayed_work, work);
        struct amdgpu_flip_work *work =
-               container_of(__work, struct amdgpu_flip_work, flip_work);
+               container_of(delayed_work, struct amdgpu_flip_work, flip_work);
        struct amdgpu_device *adev = work->adev;
        struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
 
        struct drm_crtc *crtc = &amdgpuCrtc->base;
        unsigned long flags;
-       unsigned i, repcnt = 4;
-       int vpos, hpos, stat, min_udelay = 0;
-       struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+       unsigned i;
+       int vpos, hpos;
 
        if (amdgpu_flip_handle_fence(work, &work->excl))
                return;
@@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
                if (amdgpu_flip_handle_fence(work, &work->shared[i]))
                        return;
 
-       /* We borrow the event spin lock for protecting flip_status */
-       spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
-       /* If this happens to execute within the "virtually extended" vblank
-        * interval before the start of the real vblank interval then it needs
-        * to delay programming the mmio flip until the real vblank is entered.
-        * This prevents completing a flip too early due to the way we fudge
-        * our vblank counter and vblank timestamps in order to work around the
-        * problem that the hw fires vblank interrupts before actual start of
-        * vblank (when line buffer refilling is done for a frame). It
-        * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
-        * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
-        *
-        * In practice this won't execute very often unless on very fast
-        * machines because the time window for this to happen is very small.
+       /* Wait until we're out of the vertical blank period before the one
+        * targeted by the flip
         */
-       while (amdgpuCrtc->enabled && --repcnt) {
-               /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
-                * start in hpos, and to the "fudged earlier" vblank start in
-                * vpos.
-                */
-               stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
-                                                 GET_DISTANCE_TO_VBLANKSTART,
-                                                 &vpos, &hpos, NULL, NULL,
-                                                 &crtc->hwmode);
-
-               if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
-                   (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
-                   !(vpos >= 0 && hpos <= 0))
-                       break;
-
-               /* Sleep at least until estimated real start of hw vblank */
-               min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
-               if (min_udelay > vblank->framedur_ns / 2000) {
-                       /* Don't wait ridiculously long - something is wrong */
-                       repcnt = 0;
-                       break;
-               }
-               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-               usleep_range(min_udelay, 2 * min_udelay);
-               spin_lock_irqsave(&crtc->dev->event_lock, flags);
+       if (amdgpuCrtc->enabled &&
+           (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+                                       &vpos, &hpos, NULL, NULL,
+                                       &crtc->hwmode)
+            & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+           (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+           (int)(work->target_vblank -
+                 amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+               schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
+               return;
        }
 
-       if (!repcnt)
-               DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
-                                "framedur %d, linedur %d, stat %d, vpos %d, "
-                                "hpos %d\n", work->crtc_id, min_udelay,
-                                vblank->framedur_ns / 1000,
-                                vblank->linedur_ns / 1000, stat, vpos, hpos);
+       /* We borrow the event spin lock for protecting flip_status */
+       spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
        /* Do the flip (mmio) */
        adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
@@ -169,10 +138,10 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
        kfree(work);
 }
 
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
-                         struct drm_framebuffer *fb,
-                         struct drm_pending_vblank_event *event,
-                         uint32_t page_flip_flags)
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_pending_vblank_event *event,
+                                uint32_t page_flip_flags, uint32_t target)
 {
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
@@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
        if (work == NULL)
                return -ENOMEM;
 
-       INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
+       INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
        INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
 
        work->event = event;
@@ -237,12 +206,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
        amdgpu_bo_unreserve(new_rbo);
 
        work->base = base;
-
-       r = drm_crtc_vblank_get(crtc);
-       if (r) {
-               DRM_ERROR("failed to get vblank before flip\n");
-               goto pflip_cleanup;
-       }
+       work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+               amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
 
        /* we borrow the event spin lock for protecting flip_wrok */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                r = -EBUSY;
-               goto vblank_cleanup;
+               goto pflip_cleanup;
        }
 
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
@@ -262,12 +227,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
        /* update crtc fb */
        crtc->primary->fb = fb;
        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-       amdgpu_flip_work_func(&work->flip_work);
+       amdgpu_flip_work_func(&work->flip_work.work);
        return 0;
 
-vblank_cleanup:
-       drm_crtc_vblank_put(crtc);
-
 pflip_cleanup:
        if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
                DRM_ERROR("failed to reserve new rbo in error path\n");
@@ -335,7 +297,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set)
        return ret;
 }
 
-static const char *encoder_names[38] = {
+static const char *encoder_names[41] = {
        "NONE",
        "INTERNAL_LVDS",
        "INTERNAL_TMDS1",
@@ -374,6 +336,9 @@ static const char *encoder_names[38] = {
        "TRAVIS",
        "INTERNAL_VCE",
        "INTERNAL_UNIPHY3",
+       "HDMI_ANX9805",
+       "INTERNAL_AMCLK",
+       "VIRTUAL",
 };
 
 static const char *hpd_names[6] = {
index 9aa533cf4ad10f79535172eaa5edafb097da0154..7dbc7727e32bb6620c9c94fa1f452e14eec7cea5 100644 (file)
  * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
  *           at the end of IBs.
  * - 3.3.0 - Add VM support for UVD on supported hardware.
+ * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
+ * - 3.5.0 - Add support for new UVD_NO_OP register.
+ * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
+ * - 3.7.0 - Add support for VCE clock list packet
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       3
+#define KMS_DRIVER_MINOR       7
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
 int amdgpu_gart_size = -1; /* auto */
+int amdgpu_moverate = -1; /* auto */
 int amdgpu_benchmarking = 0;
 int amdgpu_testing = 0;
 int amdgpu_audio = -1;
@@ -84,11 +89,14 @@ int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_powerplay = -1;
 int amdgpu_powercontainment = 1;
+int amdgpu_sclk_deep_sleep_en = 1;
 unsigned amdgpu_pcie_gen_cap = 0;
 unsigned amdgpu_pcie_lane_cap = 0;
 unsigned amdgpu_cg_mask = 0xffffffff;
 unsigned amdgpu_pg_mask = 0xffffffff;
 char *amdgpu_disable_cu = NULL;
+char *amdgpu_virtual_display = NULL;
+unsigned amdgpu_pp_feature_mask = 0xffffffff;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -96,6 +104,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
 MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
 module_param_named(gartsize, amdgpu_gart_size, int, 0600);
 
+MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
+module_param_named(moverate, amdgpu_moverate, int, 0600);
+
 MODULE_PARM_DESC(benchmark, "Run benchmark");
 module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
 
@@ -168,8 +179,14 @@ module_param_named(powerplay, amdgpu_powerplay, int, 0444);
 
 MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
 module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
+
+MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
 #endif
 
+MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
+module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
+
 MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
 module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
 
@@ -185,7 +202,84 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
 MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
 module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
 
+MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+
 static const struct pci_device_id pciidlist[] = {
+#ifdef  CONFIG_DRM_AMDGPU_SI
+       {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+       {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+       {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+       {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
        /* Kaveri */
        {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@@ -341,7 +435,7 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
 #ifdef CONFIG_X86
        primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
-       remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
        kfree(ap);
 
        return 0;
@@ -383,32 +477,72 @@ amdgpu_pci_remove(struct pci_dev *pdev)
        drm_put_dev(dev);
 }
 
+static void
+amdgpu_pci_shutdown(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = dev->dev_private;
+
+       /* if we are running in a VM, make sure the device
+        * torn down properly on reboot/shutdown
+        */
+       if (amdgpu_passthrough(adev))
+               amdgpu_pci_remove(pdev);
+}
+
 static int amdgpu_pmops_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       return amdgpu_suspend_kms(drm_dev, true, true);
+       return amdgpu_device_suspend(drm_dev, true, true);
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       return amdgpu_resume_kms(drm_dev, true, true);
+
+       /* GPU comes up enabled by the bios on resume */
+       if (amdgpu_device_is_px(drm_dev)) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+       }
+
+       return amdgpu_device_resume(drm_dev, true, true);
 }
 
 static int amdgpu_pmops_freeze(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       return amdgpu_suspend_kms(drm_dev, false, true);
+       return amdgpu_device_suspend(drm_dev, false, true);
 }
 
 static int amdgpu_pmops_thaw(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       return amdgpu_device_resume(drm_dev, false, true);
+}
+
+static int amdgpu_pmops_poweroff(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       return amdgpu_device_suspend(drm_dev, true, true);
+}
+
+static int amdgpu_pmops_restore(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       return amdgpu_resume_kms(drm_dev, false, true);
+       return amdgpu_device_resume(drm_dev, false, true);
 }
 
 static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -426,7 +560,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
        drm_kms_helper_poll_disable(drm_dev);
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 
-       ret = amdgpu_suspend_kms(drm_dev, false, false);
+       ret = amdgpu_device_suspend(drm_dev, false, false);
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_ignore_hotplug(pdev);
@@ -459,7 +593,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
                return ret;
        pci_set_master(pdev);
 
-       ret = amdgpu_resume_kms(drm_dev, false, false);
+       ret = amdgpu_device_resume(drm_dev, false, false);
        drm_kms_helper_poll_enable(drm_dev);
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
@@ -513,8 +647,8 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
        .resume = amdgpu_pmops_resume,
        .freeze = amdgpu_pmops_freeze,
        .thaw = amdgpu_pmops_thaw,
-       .poweroff = amdgpu_pmops_freeze,
-       .restore = amdgpu_pmops_resume,
+       .poweroff = amdgpu_pmops_poweroff,
+       .restore = amdgpu_pmops_restore,
        .runtime_suspend = amdgpu_pmops_runtime_suspend,
        .runtime_resume = amdgpu_pmops_runtime_resume,
        .runtime_idle = amdgpu_pmops_runtime_idle,
@@ -596,6 +730,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
        .id_table = pciidlist,
        .probe = amdgpu_pci_probe,
        .remove = amdgpu_pci_remove,
+       .shutdown = amdgpu_pci_shutdown,
        .driver.pm = &amdgpu_pm_ops,
 };
 
index 919146780a15a91434f7383e666eedabdf402ca0..107fbb2d284723bcbd9fa38fd1b71b5d87aa48bc 100644 (file)
@@ -25,7 +25,7 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -48,8 +48,35 @@ struct amdgpu_fbdev {
        struct amdgpu_device *adev;
 };
 
+static int
+amdgpufb_open(struct fb_info *info, int user)
+{
+       struct amdgpu_fbdev *rfbdev = info->par;
+       struct amdgpu_device *adev = rfbdev->adev;
+       int ret = pm_runtime_get_sync(adev->ddev->dev);
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_mark_last_busy(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev->ddev->dev);
+               return ret;
+       }
+       return 0;
+}
+
+static int
+amdgpufb_release(struct fb_info *info, int user)
+{
+       struct amdgpu_fbdev *rfbdev = info->par;
+       struct amdgpu_device *adev = rfbdev->adev;
+
+       pm_runtime_mark_last_busy(adev->ddev->dev);
+       pm_runtime_put_autosuspend(adev->ddev->dev);
+       return 0;
+}
+
 static struct fb_ops amdgpufb_ops = {
        .owner = THIS_MODULE,
+       .fb_open = amdgpufb_open,
+       .fb_release = amdgpufb_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
index 921bce2df0b07cced181e9cb68693f989b7839fa..0feea347f680b1317dae1d37cbde7151bd271f9c 100644 (file)
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
  * Unbinds the requested pages from the gart page table and
  * replaces them with the dummy page (all asics).
  */
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                        int pages)
 {
        unsigned t;
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
  * (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist, dma_addr_t *dma_addr,
                     uint32_t flags)
 {
index 503d540981283b38e0c9cb41027a7a6ec3502cda..e73728d90388663e6539ad22cbd41fce41072437 100644 (file)
 #define AMDGPU_GWS_SHIFT       PAGE_SHIFT
 #define AMDGPU_OA_SHIFT                PAGE_SHIFT
 
-#define AMDGPU_PL_GDS          TTM_PL_PRIV0
-#define AMDGPU_PL_GWS          TTM_PL_PRIV1
-#define AMDGPU_PL_OA           TTM_PL_PRIV2
-
-#define AMDGPU_PL_FLAG_GDS             TTM_PL_FLAG_PRIV0
-#define AMDGPU_PL_FLAG_GWS             TTM_PL_FLAG_PRIV1
-#define AMDGPU_PL_FLAG_OA              TTM_PL_FLAG_PRIV2
-
 struct amdgpu_ring;
 struct amdgpu_bo;
 
index 31a676376d732aba3d2d15dd8dfe2af24be7f56b..c93a92a840ead18b8e1dd71547f922fad4ebc3ae 100644 (file)
@@ -186,10 +186,8 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
                         "AMDGPU i2c hw bus %s", name);
                i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
                ret = i2c_add_adapter(&i2c->adapter);
-               if (ret) {
-                       DRM_ERROR("Failed to register hw i2c %s\n", name);
+               if (ret)
                        goto out_free;
-               }
        } else {
                /* set the amdgpu bit adapter */
                snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
index a31d7ef3032c76965e61a3f3d222938d55074286..6a6c86c9c1694eb475b9671c47cfaa4738473ed8 100644 (file)
@@ -124,7 +124,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        bool skip_preamble, need_ctx_switch;
        unsigned patch_offset = ~0;
        struct amdgpu_vm *vm;
-       uint64_t ctx;
+       uint64_t fence_ctx;
+       uint32_t status = 0, alloc_size;
 
        unsigned i;
        int r = 0;
@@ -135,14 +136,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* ring tests don't use a job */
        if (job) {
                vm = job->vm;
-               ctx = job->ctx;
+               fence_ctx = job->fence_ctx;
        } else {
                vm = NULL;
-               ctx = 0;
+               fence_ctx = 0;
        }
 
        if (!ring->ready) {
-               dev_err(adev->dev, "couldn't schedule ib\n");
+               dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
                return -EINVAL;
        }
 
@@ -151,7 +152,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                return -EINVAL;
        }
 
-       r = amdgpu_ring_alloc(ring, 256 * num_ibs);
+       alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
+               num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+
+       r = amdgpu_ring_alloc(ring, alloc_size);
        if (r) {
                dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
                return r;
@@ -174,13 +178,22 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* always set cond_exec_polling to CONTINUE */
        *ring->cond_exe_cpu_addr = 1;
 
-       skip_preamble = ring->current_ctx == ctx;
-       need_ctx_switch = ring->current_ctx != ctx;
+       skip_preamble = ring->current_ctx == fence_ctx;
+       need_ctx_switch = ring->current_ctx != fence_ctx;
+       if (job && ring->funcs->emit_cntxcntl) {
+               if (need_ctx_switch)
+                       status |= AMDGPU_HAVE_CTX_SWITCH;
+               status |= job->preamble_status;
+               amdgpu_ring_emit_cntxcntl(ring, status);
+       }
+
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
 
                /* drop preamble IBs if we don't have a context switch */
-               if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
+               if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
+                       skip_preamble &&
+                       !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
                        continue;
 
                amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
@@ -209,7 +222,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
-       ring->current_ctx = ctx;
+       ring->current_ctx = fence_ctx;
+       if (ring->funcs->emit_switch_buffer)
+               amdgpu_ring_emit_switch_buffer(ring);
        amdgpu_ring_commit(ring);
        return 0;
 }
@@ -280,7 +295,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 {
        unsigned i;
-       int r;
+       int r, ret = 0;
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
@@ -301,10 +316,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                        } else {
                                /* still not good, but we can live with it */
                                DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+                               ret = r;
                        }
                }
        }
-       return 0;
+       return ret;
 }
 
 /*
index 534fc04e80fd5a2aeba6aae78862f73be586dbcc..3ab4c65ecc8b4efc7446e26a3c082e723038611c 100644 (file)
@@ -40,32 +40,15 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
 
        /* Allocate ring buffer */
        if (adev->irq.ih.ring_obj == NULL) {
-               r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
-                                    PAGE_SIZE, true,
-                                    AMDGPU_GEM_DOMAIN_GTT, 0,
-                                    NULL, NULL, &adev->irq.ih.ring_obj);
+               r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
+                                           PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+                                           &adev->irq.ih.ring_obj,
+                                           &adev->irq.ih.gpu_addr,
+                                           (void **)&adev->irq.ih.ring);
                if (r) {
                        DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
                        return r;
                }
-               r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
-               if (unlikely(r != 0))
-                       return r;
-               r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
-                                 AMDGPU_GEM_DOMAIN_GTT,
-                                 &adev->irq.ih.gpu_addr);
-               if (r) {
-                       amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-                       DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
-                       return r;
-               }
-               r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
-                                  (void **)&adev->irq.ih.ring);
-               amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-               if (r) {
-                       DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
-                       return r;
-               }
        }
        return 0;
 }
@@ -136,8 +119,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
  */
 void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
 {
-       int r;
-
        if (adev->irq.ih.use_bus_addr) {
                if (adev->irq.ih.ring) {
                        /* add 8 bytes for the rptr/wptr shadows and
@@ -149,17 +130,9 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
                        adev->irq.ih.ring = NULL;
                }
        } else {
-               if (adev->irq.ih.ring_obj) {
-                       r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
-                       if (likely(r == 0)) {
-                               amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
-                               amdgpu_bo_unpin(adev->irq.ih.ring_obj);
-                               amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-                       }
-                       amdgpu_bo_unref(&adev->irq.ih.ring_obj);
-                       adev->irq.ih.ring = NULL;
-                       adev->irq.ih.ring_obj = NULL;
-               }
+               amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
+                                     &adev->irq.ih.gpu_addr,
+                                     (void **)&adev->irq.ih.ring);
                amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
                amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
        }
index 7ef09352e5347c9ce761b07328bb8a3c8c7d9514..f016464035b8705dd80825f1a95addf79dd9c223 100644 (file)
@@ -70,6 +70,7 @@ struct amdgpu_irq {
        /* gen irq stuff */
        struct irq_domain               *domain; /* GPU irq controller domain */
        unsigned                        virq[AMDGPU_MAX_IRQ_SRC_ID];
+       uint32_t                        srbm_soft_reset;
 };
 
 void amdgpu_irq_preinstall(struct drm_device *dev);
index 6674d40eb3abb251a1e9baae117f0c1a1850f940..8c5807994073cedf8c2128a6ded356938596786c 100644 (file)
@@ -91,7 +91,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
                amdgpu_ib_free(job->adev, &job->ibs[i], f);
 }
 
-void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
        struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
@@ -124,7 +124,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                return r;
 
        job->owner = owner;
-       job->ctx = entity->fence_context;
+       job->fence_ctx = entity->fence_context;
        *f = fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
        amd_sched_entity_push_job(&job->base);
index d942654a1de0f08a4cf2970fce080cd9999b6807..c2c7fb140338061f77cc3d2560e2f99f46e37346 100644 (file)
@@ -292,14 +292,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        type = AMD_IP_BLOCK_TYPE_UVD;
                        ring_mask = adev->uvd.ring.ready ? 1 : 0;
                        ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 8;
+                       ib_size_alignment = 16;
                        break;
                case AMDGPU_HW_IP_VCE:
                        type = AMD_IP_BLOCK_TYPE_VCE;
-                       for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
+                       for (i = 0; i < adev->vce.num_rings; i++)
                                ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
                        ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 8;
+                       ib_size_alignment = 1;
                        break;
                default:
                        return -EINVAL;
@@ -373,6 +373,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
        case AMDGPU_INFO_NUM_BYTES_MOVED:
                ui64 = atomic64_read(&adev->num_bytes_moved);
                return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+       case AMDGPU_INFO_NUM_EVICTIONS:
+               ui64 = atomic64_read(&adev->num_evictions);
+               return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
        case AMDGPU_INFO_VRAM_USAGE:
                ui64 = atomic64_read(&adev->vram_usage);
                return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
@@ -539,12 +542,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                return r;
 
        fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
-       if (unlikely(!fpriv))
-               return -ENOMEM;
+       if (unlikely(!fpriv)) {
+               r = -ENOMEM;
+               goto out_suspend;
+       }
 
        r = amdgpu_vm_init(adev, &fpriv->vm);
-       if (r)
-               goto error_free;
+       if (r) {
+               kfree(fpriv);
+               goto out_suspend;
+       }
 
        mutex_init(&fpriv->bo_list_lock);
        idr_init(&fpriv->bo_list_handles);
@@ -553,12 +560,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 
        file_priv->driver_priv = fpriv;
 
+out_suspend:
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
-       return 0;
-
-error_free:
-       kfree(fpriv);
 
        return r;
 }
@@ -597,6 +601,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 
        kfree(fpriv);
        file_priv->driver_priv = NULL;
+
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 }
 
 /**
@@ -611,6 +618,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
                                struct drm_file *file_priv)
 {
+       pm_runtime_get_sync(dev->dev);
 }
 
 /*
index 6b1d7d3065645719787255395487c04f0dc8b11e..7b0eff7d060b82faba0945499ccdaac06ea255da 100644 (file)
@@ -39,6 +39,8 @@
 #include <drm/drm_plane_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
+#include <linux/hrtimer.h>
+#include "amdgpu_irq.h"
 
 struct amdgpu_bo;
 struct amdgpu_device;
@@ -339,6 +341,8 @@ struct amdgpu_mode_info {
        int                     num_dig; /* number of dig blocks */
        int                     disp_priority;
        const struct amdgpu_display_funcs *funcs;
+       struct hrtimer vblank_timer;
+       enum amdgpu_interrupt_state vsync_timer_enabled;
 };
 
 #define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -587,10 +591,10 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
 void amdgpu_print_display_setup(struct drm_device *dev);
 int amdgpu_modeset_create_props(struct amdgpu_device *adev);
 int amdgpu_crtc_set_config(struct drm_mode_set *set);
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
-                         struct drm_framebuffer *fb,
-                         struct drm_pending_vblank_event *event,
-                         uint32_t page_flip_flags);
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_pending_vblank_event *event,
+                                uint32_t page_flip_flags, uint32_t target);
 extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
 
 #endif
index 6f0873c75a25650feb9c65330f1a3b133ab64f50..428aa00025e4875637afaa51b1b1e6ebba2799b8 100644 (file)
 #include "amdgpu_trace.h"
 
 
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
 
 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
                                                struct ttm_mem_reg *mem)
 {
-       u64 ret = 0;
-       if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
-               ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
-                          adev->mc.visible_vram_size ?
-                          adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
-                          mem->size;
-       }
-       return ret;
+       if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
+               return 0;
+
+       return ((mem->start << PAGE_SHIFT) + mem->size) >
+               adev->mc.visible_vram_size ?
+               adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
+               mem->size;
 }
 
 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
@@ -99,6 +96,11 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 
        drm_gem_object_release(&bo->gem_base);
        amdgpu_bo_unref(&bo->parent);
+       if (!list_empty(&bo->shadow_list)) {
+               mutex_lock(&bo->adev->shadow_list_lock);
+               list_del_init(&bo->shadow_list);
+               mutex_unlock(&bo->adev->shadow_list_lock);
+       }
        kfree(bo->metadata);
        kfree(bo);
 }
@@ -112,84 +114,93 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 
 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
                                      struct ttm_placement *placement,
-                                     struct ttm_place *placements,
+                                     struct ttm_place *places,
                                      u32 domain, u64 flags)
 {
-       u32 c = 0, i;
-
-       placement->placement = placements;
-       placement->busy_placement = placements;
+       u32 c = 0;
 
        if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+               unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+
                if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
-                       adev->mc.visible_vram_size < adev->mc.real_vram_size) {
-                       placements[c].fpfn =
-                               adev->mc.visible_vram_size >> PAGE_SHIFT;
-                       placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-                               TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
+                   !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+                   adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+                       places[c].fpfn = visible_pfn;
+                       places[c].lpfn = 0;
+                       places[c].flags = TTM_PL_FLAG_WC |
+                               TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
+                               TTM_PL_FLAG_TOPDOWN;
+                       c++;
                }
-               placements[c].fpfn = 0;
-               placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+
+               places[c].fpfn = 0;
+               places[c].lpfn = 0;
+               places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
                        TTM_PL_FLAG_VRAM;
-               if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
-                       placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
+               if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+                       places[c].lpfn = visible_pfn;
+               else
+                       places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+               c++;
        }
 
        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
-               if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
-                       placements[c].fpfn = 0;
-                       placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+               places[c].fpfn = 0;
+               places[c].lpfn = 0;
+               places[c].flags = TTM_PL_FLAG_TT;
+               if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+                       places[c].flags |= TTM_PL_FLAG_WC |
                                TTM_PL_FLAG_UNCACHED;
-               } else {
-                       placements[c].fpfn = 0;
-                       placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
-               }
+               else
+                       places[c].flags |= TTM_PL_FLAG_CACHED;
+               c++;
        }
 
        if (domain & AMDGPU_GEM_DOMAIN_CPU) {
-               if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
-                       placements[c].fpfn = 0;
-                       placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
+               places[c].fpfn = 0;
+               places[c].lpfn = 0;
+               places[c].flags = TTM_PL_FLAG_SYSTEM;
+               if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+                       places[c].flags |= TTM_PL_FLAG_WC |
                                TTM_PL_FLAG_UNCACHED;
-               } else {
-                       placements[c].fpfn = 0;
-                       placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
-               }
+               else
+                       places[c].flags |= TTM_PL_FLAG_CACHED;
+               c++;
        }
 
        if (domain & AMDGPU_GEM_DOMAIN_GDS) {
-               placements[c].fpfn = 0;
-               placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-                       AMDGPU_PL_FLAG_GDS;
+               places[c].fpfn = 0;
+               places[c].lpfn = 0;
+               places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+               c++;
        }
+
        if (domain & AMDGPU_GEM_DOMAIN_GWS) {
-               placements[c].fpfn = 0;
-               placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-                       AMDGPU_PL_FLAG_GWS;
+               places[c].fpfn = 0;
+               places[c].lpfn = 0;
+               places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+               c++;
        }
+
        if (domain & AMDGPU_GEM_DOMAIN_OA) {
-               placements[c].fpfn = 0;
-               placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-                       AMDGPU_PL_FLAG_OA;
+               places[c].fpfn = 0;
+               places[c].lpfn = 0;
+               places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+               c++;
        }
 
        if (!c) {
-               placements[c].fpfn = 0;
-               placements[c++].flags = TTM_PL_MASK_CACHING |
-                       TTM_PL_FLAG_SYSTEM;
+               places[c].fpfn = 0;
+               places[c].lpfn = 0;
+               places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+               c++;
        }
+
        placement->num_placement = c;
-       placement->num_busy_placement = c;
+       placement->placement = places;
 
-       for (i = 0; i < c; i++) {
-               if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
-                       (placements[i].flags & TTM_PL_FLAG_VRAM) &&
-                       !placements[i].fpfn)
-                       placements[i].lpfn =
-                               adev->mc.visible_vram_size >> PAGE_SHIFT;
-               else
-                       placements[i].lpfn = 0;
-       }
+       placement->num_busy_placement = c;
+       placement->busy_placement = places;
 }
 
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
@@ -211,6 +222,98 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
        bo->placement.busy_placement = bo->placements;
 }
 
+/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+                           unsigned long size, int align,
+                           u32 domain, struct amdgpu_bo **bo_ptr,
+                           u64 *gpu_addr, void **cpu_addr)
+{
+       int r;
+
+       r = amdgpu_bo_create(adev, size, align, true, domain,
+                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                            NULL, NULL, bo_ptr);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
+               return r;
+       }
+
+       r = amdgpu_bo_reserve(*bo_ptr, false);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
+               goto error_free;
+       }
+
+       r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+       if (r) {
+               dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+               goto error_unreserve;
+       }
+
+       if (cpu_addr) {
+               r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+               if (r) {
+                       dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
+                       goto error_unreserve;
+               }
+       }
+
+       amdgpu_bo_unreserve(*bo_ptr);
+
+       return 0;
+
+error_unreserve:
+       amdgpu_bo_unreserve(*bo_ptr);
+
+error_free:
+       amdgpu_bo_unref(bo_ptr);
+
+       return r;
+}
+
+/**
+ * amdgpu_bo_free_kernel - free BO for kernel use
+ *
+ * @bo: amdgpu BO to free
+ *
+ * unmaps and unpin a BO for kernel internal use.
+ */
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+                          void **cpu_addr)
+{
+       if (*bo == NULL)
+               return;
+
+       if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
+               if (cpu_addr)
+                       amdgpu_bo_kunmap(*bo);
+
+               amdgpu_bo_unpin(*bo);
+               amdgpu_bo_unreserve(*bo);
+       }
+       amdgpu_bo_unref(bo);
+
+       if (gpu_addr)
+               *gpu_addr = 0;
+
+       if (cpu_addr)
+               *cpu_addr = NULL;
+}
+
 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
                                unsigned long size, int byte_align,
                                bool kernel, u32 domain, u64 flags,
@@ -250,6 +353,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
        }
        bo->adev = adev;
        INIT_LIST_HEAD(&bo->list);
+       INIT_LIST_HEAD(&bo->shadow_list);
        INIT_LIST_HEAD(&bo->va);
        bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
                                         AMDGPU_GEM_DOMAIN_GTT |
@@ -277,11 +381,79 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
        if (unlikely(r != 0)) {
                return r;
        }
+
+       if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+           bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+               struct fence *fence;
+
+               if (adev->mman.buffer_funcs_ring == NULL ||
+                  !adev->mman.buffer_funcs_ring->ready) {
+                       r = -EBUSY;
+                       goto fail_free;
+               }
+
+               r = amdgpu_bo_reserve(bo, false);
+               if (unlikely(r != 0))
+                       goto fail_free;
+
+               amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+               r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+               if (unlikely(r != 0))
+                       goto fail_unreserve;
+
+               amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+               amdgpu_bo_fence(bo, fence, false);
+               amdgpu_bo_unreserve(bo);
+               fence_put(bo->tbo.moving);
+               bo->tbo.moving = fence_get(fence);
+               fence_put(fence);
+       }
        *bo_ptr = bo;
 
        trace_amdgpu_bo_create(bo);
 
        return 0;
+
+fail_unreserve:
+       amdgpu_bo_unreserve(bo);
+fail_free:
+       amdgpu_bo_unref(&bo);
+       return r;
+}
+
+static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+                                  unsigned long size, int byte_align,
+                                  struct amdgpu_bo *bo)
+{
+       struct ttm_placement placement = {0};
+       struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+       int r;
+
+       if (bo->shadow)
+               return 0;
+
+       bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
+       memset(&placements, 0,
+              (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+       amdgpu_ttm_placement_init(adev, &placement,
+                                 placements, AMDGPU_GEM_DOMAIN_GTT,
+                                 AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+       r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
+                                       AMDGPU_GEM_DOMAIN_GTT,
+                                       AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+                                       NULL, &placement,
+                                       bo->tbo.resv,
+                                       &bo->shadow);
+       if (!r) {
+               bo->shadow->parent = amdgpu_bo_ref(bo);
+               mutex_lock(&adev->shadow_list_lock);
+               list_add_tail(&bo->shadow_list, &adev->shadow_list);
+               mutex_unlock(&adev->shadow_list_lock);
+       }
+
+       return r;
 }
 
 int amdgpu_bo_create(struct amdgpu_device *adev,
@@ -293,6 +465,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 {
        struct ttm_placement placement = {0};
        struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+       int r;
 
        memset(&placements, 0,
               (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
@@ -300,9 +473,83 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        amdgpu_ttm_placement_init(adev, &placement,
                                  placements, domain, flags);
 
-       return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
-                                          domain, flags, sg, &placement,
-                                          resv, bo_ptr);
+       r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+                                       domain, flags, sg, &placement,
+                                       resv, bo_ptr);
+       if (r)
+               return r;
+
+       if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
+               r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+               if (r)
+                       amdgpu_bo_unref(bo_ptr);
+       }
+
+       return r;
+}
+
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+                              struct amdgpu_ring *ring,
+                              struct amdgpu_bo *bo,
+                              struct reservation_object *resv,
+                              struct fence **fence,
+                              bool direct)
+
+{
+       struct amdgpu_bo *shadow = bo->shadow;
+       uint64_t bo_addr, shadow_addr;
+       int r;
+
+       if (!shadow)
+               return -EINVAL;
+
+       bo_addr = amdgpu_bo_gpu_offset(bo);
+       shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+       r = reservation_object_reserve_shared(bo->tbo.resv);
+       if (r)
+               goto err;
+
+       r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
+                              amdgpu_bo_size(bo), resv, fence,
+                              direct);
+       if (!r)
+               amdgpu_bo_fence(bo, *fence, true);
+
+err:
+       return r;
+}
+
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+                                 struct amdgpu_ring *ring,
+                                 struct amdgpu_bo *bo,
+                                 struct reservation_object *resv,
+                                 struct fence **fence,
+                                 bool direct)
+
+{
+       struct amdgpu_bo *shadow = bo->shadow;
+       uint64_t bo_addr, shadow_addr;
+       int r;
+
+       if (!shadow)
+               return -EINVAL;
+
+       bo_addr = amdgpu_bo_gpu_offset(bo);
+       shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+       r = reservation_object_reserve_shared(bo->tbo.resv);
+       if (r)
+               goto err;
+
+       r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
+                              amdgpu_bo_size(bo), resv, fence,
+                              direct);
+       if (!r)
+               amdgpu_bo_fence(bo, *fence, true);
+
+err:
+       return r;
 }
 
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -380,16 +627,17 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                return -EINVAL;
 
        if (bo->pin_count) {
+               uint32_t mem_type = bo->tbo.mem.mem_type;
+
+               if (domain != amdgpu_mem_type_to_domain(mem_type))
+                       return -EINVAL;
+
                bo->pin_count++;
                if (gpu_addr)
                        *gpu_addr = amdgpu_bo_gpu_offset(bo);
 
                if (max_offset != 0) {
-                       u64 domain_start;
-                       if (domain == AMDGPU_GEM_DOMAIN_VRAM)
-                               domain_start = bo->adev->mc.vram_start;
-                       else
-                               domain_start = bo->adev->mc.gtt_start;
+                       u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
                        WARN_ON_ONCE(max_offset <
                                     (amdgpu_bo_gpu_offset(bo) - domain_start));
                }
@@ -401,7 +649,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                /* force to pin into visible video ram */
                if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
                    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
-                   (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
+                   (!max_offset || max_offset >
+                    bo->adev->mc.visible_vram_size)) {
                        if (WARN_ON_ONCE(min_offset >
                                         bo->adev->mc.visible_vram_size))
                                return -EINVAL;
@@ -420,19 +669,28 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
        }
 
        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
-       if (likely(r == 0)) {
-               bo->pin_count = 1;
-               if (gpu_addr != NULL)
-                       *gpu_addr = amdgpu_bo_gpu_offset(bo);
-               if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
-                       bo->adev->vram_pin_size += amdgpu_bo_size(bo);
-                       if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                               bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
-               } else
-                       bo->adev->gart_pin_size += amdgpu_bo_size(bo);
-       } else {
+       if (unlikely(r)) {
                dev_err(bo->adev->dev, "%p pin failed\n", bo);
+               goto error;
        }
+       r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
+       if (unlikely(r)) {
+               dev_err(bo->adev->dev, "%p bind failed\n", bo);
+               goto error;
+       }
+
+       bo->pin_count = 1;
+       if (gpu_addr != NULL)
+               *gpu_addr = amdgpu_bo_gpu_offset(bo);
+       if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+               bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+                       bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+       } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+               bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+       }
+
+error:
        return r;
 }
 
@@ -457,16 +715,20 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
        }
        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
-       if (likely(r == 0)) {
-               if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
-                       bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
-                       if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                               bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
-               } else
-                       bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
-       } else {
+       if (unlikely(r)) {
                dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+               goto error;
+       }
+
+       if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+               bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+                       bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+       } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+               bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
        }
+
+error:
        return r;
 }
 
@@ -637,7 +899,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        for (i = 0; i < abo->placement.num_placement; i++) {
                /* Force into visible VRAM */
                if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
-                   (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
+                   (!abo->placements[i].lpfn ||
+                    abo->placements[i].lpfn > lpfn))
                        abo->placements[i].lpfn = lpfn;
        }
        r = ttm_bo_validate(bo, &abo->placement, false, false);
@@ -674,3 +937,24 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
        else
                reservation_object_add_excl_fence(resv, fence);
 }
+
+/**
+ * amdgpu_bo_gpu_offset - return GPU offset of bo
+ * @bo:        amdgpu object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+{
+       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
+                    !amdgpu_ttm_is_bound(bo->tbo.ttm));
+       WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+                    !bo->pin_count);
+       WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+
+       return bo->tbo.offset;
+}
index bdb01d932548992099f5b77ed42de3fd2442a25e..8255034d73eb10061c32a175b02b03f131587280 100644 (file)
@@ -31,6 +31,8 @@
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
 
+#define AMDGPU_BO_INVALID_OFFSET       LONG_MAX
+
 /**
  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
  * @mem_type:  ttm memory type
@@ -85,21 +87,6 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
        ttm_bo_unreserve(&bo->tbo);
 }
 
-/**
- * amdgpu_bo_gpu_offset - return GPU offset of bo
- * @bo:        amdgpu object for which we query the offset
- *
- * Returns current GPU offset of the object.
- *
- * Note: object should either be pinned or reserved when calling this
- * function, it might be useful to add check for this for debugging.
- */
-static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
-{
-       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-       return bo->tbo.offset;
-}
-
 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
 {
        return bo->tbo.num_pages << PAGE_SHIFT;
@@ -139,6 +126,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
                                struct ttm_placement *placement,
                                struct reservation_object *resv,
                                struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+                           unsigned long size, int align,
+                           u32 domain, struct amdgpu_bo **bo_ptr,
+                           u64 *gpu_addr, void **cpu_addr);
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+                          void **cpu_addr);
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
@@ -165,6 +158,19 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
                     bool shared);
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+                              struct amdgpu_ring *ring,
+                              struct amdgpu_bo *bo,
+                              struct reservation_object *resv,
+                              struct fence **fence, bool direct);
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+                                 struct amdgpu_ring *ring,
+                                 struct amdgpu_bo *bo,
+                                 struct reservation_object *resv,
+                                 struct fence **fence,
+                                 bool direct);
+
 
 /*
  * sub allocation
index d15314957732e616d2697a0b830dea276d30d62a..8e67c1210d7c0bf082b986e9f05149dd79818d3d 100644 (file)
@@ -25,6 +25,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 #include "atombios_encoders.h"
+#include "amdgpu_pll.h"
 #include <asm/div64.h>
 #include <linux/gcd.h>
 
index ff63b88b0ffaf721a58d63f81bb47ab227ac93b9..accc908bdc880b2bacdc5ba3486b624420a7e2f6 100644 (file)
@@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        char *table = NULL;
-       int size, i;
+       int size;
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_get_pp_table(adev, &table);
@@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
        if (size >= PAGE_SIZE)
                size = PAGE_SIZE - 1;
 
-       for (i = 0; i < size; i++) {
-               sprintf(buf + i, "%02x", table[i]);
-       }
-       sprintf(buf + i, "\n");
+       memcpy(buf, table, size);
 
        return size;
 }
@@ -1106,54 +1103,46 @@ force:
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 {
-       if (adev->pp_enabled)
+       if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+               /* enable/disable UVD */
+               mutex_lock(&adev->pm.mutex);
                amdgpu_dpm_powergate_uvd(adev, !enable);
-       else {
-               if (adev->pm.funcs->powergate_uvd) {
+               mutex_unlock(&adev->pm.mutex);
+       } else {
+               if (enable) {
                        mutex_lock(&adev->pm.mutex);
-                       /* enable/disable UVD */
-                       amdgpu_dpm_powergate_uvd(adev, !enable);
+                       adev->pm.dpm.uvd_active = true;
+                       adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
                        mutex_unlock(&adev->pm.mutex);
                } else {
-                       if (enable) {
-                               mutex_lock(&adev->pm.mutex);
-                               adev->pm.dpm.uvd_active = true;
-                               adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
-                               mutex_unlock(&adev->pm.mutex);
-                       } else {
-                               mutex_lock(&adev->pm.mutex);
-                               adev->pm.dpm.uvd_active = false;
-                               mutex_unlock(&adev->pm.mutex);
-                       }
-                       amdgpu_pm_compute_clocks(adev);
+                       mutex_lock(&adev->pm.mutex);
+                       adev->pm.dpm.uvd_active = false;
+                       mutex_unlock(&adev->pm.mutex);
                }
-
+               amdgpu_pm_compute_clocks(adev);
        }
 }
 
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 {
-       if (adev->pp_enabled)
+       if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+               /* enable/disable VCE */
+               mutex_lock(&adev->pm.mutex);
                amdgpu_dpm_powergate_vce(adev, !enable);
-       else {
-               if (adev->pm.funcs->powergate_vce) {
+               mutex_unlock(&adev->pm.mutex);
+       } else {
+               if (enable) {
                        mutex_lock(&adev->pm.mutex);
-                       amdgpu_dpm_powergate_vce(adev, !enable);
+                       adev->pm.dpm.vce_active = true;
+                       /* XXX select vce level based on ring/task */
+                       adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
                        mutex_unlock(&adev->pm.mutex);
                } else {
-                       if (enable) {
-                               mutex_lock(&adev->pm.mutex);
-                               adev->pm.dpm.vce_active = true;
-                               /* XXX select vce level based on ring/task */
-                               adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
-                               mutex_unlock(&adev->pm.mutex);
-                       } else {
-                               mutex_lock(&adev->pm.mutex);
-                               adev->pm.dpm.vce_active = false;
-                               mutex_unlock(&adev->pm.mutex);
-                       }
-                       amdgpu_pm_compute_clocks(adev);
+                       mutex_lock(&adev->pm.mutex);
+                       adev->pm.dpm.vce_active = false;
+                       mutex_unlock(&adev->pm.mutex);
                }
+               amdgpu_pm_compute_clocks(adev);
        }
 }
 
@@ -1333,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
  */
 #if defined(CONFIG_DEBUG_FS)
 
+static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
+{
+       int32_t value;
+
+       /* sanity check PP is enabled */
+       if (!(adev->powerplay.pp_funcs &&
+             adev->powerplay.pp_funcs->read_sensor))
+             return -EINVAL;
+
+       /* GPU Clocks */
+       seq_printf(m, "GFX Clocks and Power:\n");
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
+               seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
+               seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
+               seq_printf(m, "\t%u mV (VDDGFX)\n", value);
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
+               seq_printf(m, "\t%u mV (VDDNB)\n", value);
+       seq_printf(m, "\n");
+
+       /* GPU Temp */
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
+               seq_printf(m, "GPU Temperature: %u C\n", value/1000);
+
+       /* GPU Load */
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
+               seq_printf(m, "GPU Load: %u %%\n", value);
+       seq_printf(m, "\n");
+
+       /* UVD clocks */
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
+               if (!value) {
+                       seq_printf(m, "UVD: Disabled\n");
+               } else {
+                       seq_printf(m, "UVD: Enabled\n");
+                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
+                               seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
+                               seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+               }
+       }
+       seq_printf(m, "\n");
+
+       /* VCE clocks */
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
+               if (!value) {
+                       seq_printf(m, "VCE: Disabled\n");
+               } else {
+                       seq_printf(m, "VCE: Enabled\n");
+                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
+                               seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+               }
+       }
+
+       return 0;
+}
+
 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1348,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
                seq_printf(m, "PX asic powered off\n");
        } else if (adev->pp_enabled) {
-               amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+               return amdgpu_debugfs_pm_info_pp(m, adev);
        } else {
                mutex_lock(&adev->pm.mutex);
                if (adev->pm.funcs->debugfs_print_current_performance_level)
-                       amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+                       adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
                else
                        seq_printf(m, "Debugfs support not implemented for this asic\n");
                mutex_unlock(&adev->pm.mutex);
index c5738a22b69041356db2ef7e488defce76db6f30..68ad24101a3646bdc7caffa32660fa19b9476e83 100644 (file)
@@ -30,6 +30,7 @@
 #include "amdgpu_pm.h"
 #include <drm/amdgpu_drm.h>
 #include "amdgpu_powerplay.h"
+#include "si_dpm.h"
 #include "cik_dpm.h"
 #include "vi_dpm.h"
 
@@ -52,8 +53,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
                pp_init->chip_family = adev->family;
                pp_init->chip_id = adev->asic_type;
                pp_init->device = amdgpu_cgs_create_device(adev);
-               pp_init->powercontainment_enabled = amdgpu_powercontainment;
-
                ret = amd_powerplay_init(pp_init, amd_pp);
                kfree(pp_init);
 #endif
@@ -61,6 +60,15 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
                amd_pp->pp_handle = (void *)adev;
 
                switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+               case CHIP_TAHITI:
+               case CHIP_PITCAIRN:
+               case CHIP_VERDE:
+               case CHIP_OLAND:
+               case CHIP_HAINAN:
+                       amd_pp->ip_funcs = &si_dpm_ip_funcs;
+               break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
                case CHIP_BONAIRE:
                case CHIP_HAWAII:
@@ -72,15 +80,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
                        amd_pp->ip_funcs = &kv_dpm_ip_funcs;
                        break;
 #endif
-               case CHIP_TOPAZ:
-                       amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
-                       break;
-               case CHIP_TONGA:
-                       amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
-                       break;
-               case CHIP_FIJI:
-                       amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
-                       break;
                case CHIP_CARRIZO:
                case CHIP_STONEY:
                        amd_pp->ip_funcs = &cz_dpm_ip_funcs;
@@ -102,15 +101,14 @@ static int amdgpu_pp_early_init(void *handle)
        switch (adev->asic_type) {
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
-               adev->pp_enabled = true;
-               break;
        case CHIP_TONGA:
        case CHIP_FIJI:
-               adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
+       case CHIP_TOPAZ:
+               adev->pp_enabled = true;
                break;
        case CHIP_CARRIZO:
        case CHIP_STONEY:
-               adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+               adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
                break;
        /* These chips don't have powerplay implemenations */
        case CHIP_BONAIRE:
@@ -118,7 +116,6 @@ static int amdgpu_pp_early_init(void *handle)
        case CHIP_KABINI:
        case CHIP_MULLINS:
        case CHIP_KAVERI:
-       case CHIP_TOPAZ:
        default:
                adev->pp_enabled = false;
                break;
index 85aeb0a804bbcef1a5860f7bcca3ba2c822386b8..777f11b63b4c4b4574059c7942394550f3fc771b 100644 (file)
@@ -222,33 +222,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
-               r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
-                                    AMDGPU_GEM_DOMAIN_GTT, 0,
-                                    NULL, NULL, &ring->ring_obj);
+               r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+                                           AMDGPU_GEM_DOMAIN_GTT,
+                                           &ring->ring_obj,
+                                           &ring->gpu_addr,
+                                           (void **)&ring->ring);
                if (r) {
                        dev_err(adev->dev, "(%d) ring create failed\n", r);
                        return r;
                }
-               r = amdgpu_bo_reserve(ring->ring_obj, false);
-               if (unlikely(r != 0))
-                       return r;
-               r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
-                                       &ring->gpu_addr);
-               if (r) {
-                       amdgpu_bo_unreserve(ring->ring_obj);
-                       dev_err(adev->dev, "(%d) ring pin failed\n", r);
-                       return r;
-               }
-               r = amdgpu_bo_kmap(ring->ring_obj,
-                                      (void **)&ring->ring);
-
                memset((void *)ring->ring, 0, ring->ring_size);
-
-               amdgpu_bo_unreserve(ring->ring_obj);
-               if (r) {
-                       dev_err(adev->dev, "(%d) ring map failed\n", r);
-                       return r;
-               }
        }
        ring->ptr_mask = (ring->ring_size / 4) - 1;
        ring->max_dw = max_dw;
@@ -269,28 +252,17 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-       int r;
-       struct amdgpu_bo *ring_obj;
-
-       ring_obj = ring->ring_obj;
        ring->ready = false;
-       ring->ring = NULL;
-       ring->ring_obj = NULL;
 
        amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
        amdgpu_wb_free(ring->adev, ring->fence_offs);
        amdgpu_wb_free(ring->adev, ring->rptr_offs);
        amdgpu_wb_free(ring->adev, ring->wptr_offs);
 
-       if (ring_obj) {
-               r = amdgpu_bo_reserve(ring_obj, false);
-               if (likely(r == 0)) {
-                       amdgpu_bo_kunmap(ring_obj);
-                       amdgpu_bo_unpin(ring_obj);
-                       amdgpu_bo_unreserve(ring_obj);
-               }
-               amdgpu_bo_unref(&ring_obj);
-       }
+       amdgpu_bo_free_kernel(&ring->ring_obj,
+                             &ring->gpu_addr,
+                             (void **)&ring->ring);
+
        amdgpu_debugfs_ring_fini(ring);
 }
 
index 05a53f4fc3349e8b5075d56e37c45dcc5da49cf7..b827c75e95deb1295f6ec14a5a781fc6b922939c 100644 (file)
@@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                amdgpu_bo_kunmap(gtt_obj[i]);
 
                r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
-                                      size, NULL, &fence);
+                                      size, NULL, &fence, false);
 
                if (r) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                amdgpu_bo_kunmap(vram_obj);
 
                r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
-                                      size, NULL, &fence);
+                                      size, NULL, &fence, false);
 
                if (r) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
index b7742e62972a61aae553dca11477e89ab8be6e2e..160a094e1a934e9269a32ccdadf71fdcae796d32 100644 (file)
@@ -34,6 +34,7 @@
 #include <ttm/ttm_placement.h>
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_memory.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include <linux/seq_file.h>
@@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
        ttm_mem_global_release(ref->object);
 }
 
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 {
        struct drm_global_reference *global_ref;
        struct amdgpu_ring *ring;
@@ -88,10 +89,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
        global_ref->init = &amdgpu_ttm_mem_global_init;
        global_ref->release = &amdgpu_ttm_mem_global_release;
        r = drm_global_item_ref(global_ref);
-       if (r != 0) {
+       if (r) {
                DRM_ERROR("Failed setting up TTM memory accounting "
                          "subsystem.\n");
-               return r;
+               goto error_mem;
        }
 
        adev->mman.bo_global_ref.mem_glob =
@@ -102,26 +103,30 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
        global_ref->init = &ttm_bo_global_init;
        global_ref->release = &ttm_bo_global_release;
        r = drm_global_item_ref(global_ref);
-       if (r != 0) {
+       if (r) {
                DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&adev->mman.mem_global_ref);
-               return r;
+               goto error_bo;
        }
 
        ring = adev->mman.buffer_funcs_ring;
        rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
        r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
                                  rq, amdgpu_sched_jobs);
-       if (r != 0) {
+       if (r) {
                DRM_ERROR("Failed setting up TTM BO move run queue.\n");
-               drm_global_item_unref(&adev->mman.mem_global_ref);
-               drm_global_item_unref(&adev->mman.bo_global_ref.ref);
-               return r;
+               goto error_entity;
        }
 
        adev->mman.mem_global_referenced = true;
 
        return 0;
+
+error_entity:
+       drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+error_bo:
+       drm_global_item_unref(&adev->mman.mem_global_ref);
+error_mem:
+       return r;
 }
 
 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
@@ -196,6 +201,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
                .lpfn = 0,
                .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
        };
+       unsigned i;
 
        if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
                placement->placement = &placements;
@@ -207,10 +213,25 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
        rbo = container_of(bo, struct amdgpu_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               if (rbo->adev->mman.buffer_funcs_ring->ready == false)
+               if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
                        amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
-               else
+               } else {
                        amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
+                       for (i = 0; i < rbo->placement.num_placement; ++i) {
+                               if (!(rbo->placements[i].flags &
+                                     TTM_PL_FLAG_TT))
+                                       continue;
+
+                               if (rbo->placements[i].lpfn)
+                                       continue;
+
+                               /* set an upper limit to force directly
+                                * allocating address space for the BO.
+                                */
+                               rbo->placements[i].lpfn =
+                                       rbo->adev->mc.gtt_size >> PAGE_SHIFT;
+                       }
+               }
                break;
        case TTM_PL_TT:
        default:
@@ -225,7 +246,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 
        if (amdgpu_ttm_tt_get_usermm(bo->ttm))
                return -EPERM;
-       return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+       return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+                                         filp->private_data);
 }
 
 static void amdgpu_move_null(struct ttm_buffer_object *bo,
@@ -251,26 +273,30 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
        adev = amdgpu_get_adev(bo->bdev);
        ring = adev->mman.buffer_funcs_ring;
-       old_start = old_mem->start << PAGE_SHIFT;
-       new_start = new_mem->start << PAGE_SHIFT;
+       old_start = (u64)old_mem->start << PAGE_SHIFT;
+       new_start = (u64)new_mem->start << PAGE_SHIFT;
 
        switch (old_mem->mem_type) {
-       case TTM_PL_VRAM:
-               old_start += adev->mc.vram_start;
-               break;
        case TTM_PL_TT:
-               old_start += adev->mc.gtt_start;
+               r = amdgpu_ttm_bind(bo->ttm, old_mem);
+               if (r)
+                       return r;
+
+       case TTM_PL_VRAM:
+               old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
                break;
        default:
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
                return -EINVAL;
        }
        switch (new_mem->mem_type) {
-       case TTM_PL_VRAM:
-               new_start += adev->mc.vram_start;
-               break;
        case TTM_PL_TT:
-               new_start += adev->mc.gtt_start;
+               r = amdgpu_ttm_bind(bo->ttm, new_mem);
+               if (r)
+                       return r;
+
+       case TTM_PL_VRAM:
+               new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
                break;
        default:
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -285,7 +311,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
        r = amdgpu_copy_buffer(ring, old_start, new_start,
                               new_mem->num_pages * PAGE_SIZE, /* bytes */
-                              bo->resv, &fence);
+                              bo->resv, &fence, false);
        if (r)
                return r;
 
@@ -314,7 +340,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements.fpfn = 0;
-       placements.lpfn = 0;
+       placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
        placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
                             interruptible, no_wait_gpu);
@@ -335,7 +361,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
        ttm_bo_mem_put(bo, &tmp_mem);
        return r;
@@ -361,14 +387,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements.fpfn = 0;
-       placements.lpfn = 0;
+       placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
        placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
                             interruptible, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -435,8 +461,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, interruptible,
-                                      no_wait_gpu, new_mem);
+               r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
                if (r) {
                        return r;
                }
@@ -524,6 +549,7 @@ struct amdgpu_ttm_tt {
        spinlock_t              guptasklock;
        struct list_head        guptasks;
        atomic_t                mmu_invalidations;
+       struct list_head        list;
 };
 
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
@@ -641,7 +667,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
                                   struct ttm_mem_reg *bo_mem)
 {
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
-       uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
        int r;
 
        if (gtt->userptr) {
@@ -651,7 +676,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
                        return r;
                }
        }
-       gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+       gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
        if (!ttm->num_pages) {
                WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
                     ttm->num_pages, bo_mem, ttm);
@@ -662,14 +687,62 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
            bo_mem->mem_type == AMDGPU_PL_OA)
                return -EINVAL;
 
+       return 0;
+}
+
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+       return gtt && !list_empty(&gtt->list);
+}
+
+int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       uint32_t flags;
+       int r;
+
+       if (!ttm || amdgpu_ttm_is_bound(ttm))
+               return 0;
+
+       flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
        r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
                ttm->pages, gtt->ttm.dma_address, flags);
 
        if (r) {
-               DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
-                         ttm->num_pages, (unsigned)gtt->offset);
+               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+                         ttm->num_pages, gtt->offset);
                return r;
        }
+       spin_lock(&gtt->adev->gtt_list_lock);
+       list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+       spin_unlock(&gtt->adev->gtt_list_lock);
+       return 0;
+}
+
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+{
+       struct amdgpu_ttm_tt *gtt, *tmp;
+       struct ttm_mem_reg bo_mem;
+       uint32_t flags;
+       int r;
+
+       bo_mem.mem_type = TTM_PL_TT;
+       spin_lock(&adev->gtt_list_lock);
+       list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+               flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
+               r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+                                    gtt->ttm.ttm.pages, gtt->ttm.dma_address,
+                                    flags);
+               if (r) {
+                       spin_unlock(&adev->gtt_list_lock);
+                       DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+                                 gtt->ttm.ttm.num_pages, gtt->offset);
+                       return r;
+               }
+       }
+       spin_unlock(&adev->gtt_list_lock);
        return 0;
 }
 
@@ -677,6 +750,9 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
+       if (!amdgpu_ttm_is_bound(ttm))
+               return 0;
+
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
        if (gtt->adev->gart.ready)
                amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
@@ -684,6 +760,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
        if (gtt->userptr)
                amdgpu_ttm_tt_unpin_userptr(ttm);
 
+       spin_lock(&gtt->adev->gtt_list_lock);
+       list_del_init(&gtt->list);
+       spin_unlock(&gtt->adev->gtt_list_lock);
+
        return 0;
 }
 
@@ -720,6 +800,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
                kfree(gtt);
                return NULL;
        }
+       INIT_LIST_HEAD(&gtt->list);
        return &gtt->ttm.ttm;
 }
 
@@ -950,6 +1031,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
        struct list_head *res = lru->lru[tbo->mem.mem_type];
 
        lru->lru[tbo->mem.mem_type] = &tbo->lru;
+       while ((++lru)->lru[tbo->mem.mem_type] == res)
+               lru->lru[tbo->mem.mem_type] = &tbo->lru;
 
        return res;
 }
@@ -960,6 +1043,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
        struct list_head *res = lru->swap_lru;
 
        lru->swap_lru = &tbo->swap;
+       while ((++lru)->swap_lru == res)
+               lru->swap_lru = &tbo->swap;
 
        return res;
 }
@@ -987,10 +1072,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        unsigned i, j;
        int r;
 
-       r = amdgpu_ttm_global_init(adev);
-       if (r) {
-               return r;
-       }
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&adev->mman.bdev,
                               adev->mman.bo_global_ref.ref.object,
@@ -1011,6 +1092,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
        }
 
+       for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+               adev->mman.guard.lru[j] = NULL;
+       adev->mman.guard.swap_lru = NULL;
+
        adev->mman.initialized = true;
        r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
                                adev->mc.real_vram_size >> PAGE_SHIFT);
@@ -1151,7 +1236,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
                       uint64_t dst_offset,
                       uint32_t byte_count,
                       struct reservation_object *resv,
-                      struct fence **fence)
+                      struct fence **fence, bool direct_submit)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
@@ -1193,10 +1278,81 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
                byte_count -= cur_size_in_bytes;
        }
 
+       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+       if (direct_submit) {
+               r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
+                                      NULL, NULL, fence);
+               job->fence = fence_get(*fence);
+               if (r)
+                       DRM_ERROR("Error scheduling IBs (%d)\n", r);
+               amdgpu_job_free(job);
+       } else {
+               r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+                                     AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+               if (r)
+                       goto error_free;
+       }
+
+       return r;
+
+error_free:
+       amdgpu_job_free(job);
+       return r;
+}
+
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+               uint32_t src_data,
+               struct reservation_object *resv,
+               struct fence **fence)
+{
+       struct amdgpu_device *adev = bo->adev;
+       struct amdgpu_job *job;
+       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+       uint32_t max_bytes, byte_count;
+       uint64_t dst_offset;
+       unsigned int num_loops, num_dw;
+       unsigned int i;
+       int r;
+
+       byte_count = bo->tbo.num_pages << PAGE_SHIFT;
+       max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+       num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+       num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+       /* for IB padding */
+       while (num_dw & 0x7)
+               num_dw++;
+
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       if (r)
+               return r;
+
+       if (resv) {
+               r = amdgpu_sync_resv(adev, &job->sync, resv,
+                               AMDGPU_FENCE_OWNER_UNDEFINED);
+               if (r) {
+                       DRM_ERROR("sync failed (%d).\n", r);
+                       goto error_free;
+               }
+       }
+
+       dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
+       for (i = 0; i < num_loops; i++) {
+               uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+               amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+                               dst_offset, cur_size_in_bytes);
+
+               dst_offset += cur_size_in_bytes;
+               byte_count -= cur_size_in_bytes;
+       }
+
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
        r = amdgpu_job_submit(job, ring, &adev->mman.entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+                       AMDGPU_FENCE_OWNER_UNDEFINED, fence);
        if (r)
                goto error_free;
 
@@ -1387,3 +1543,8 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
 
 #endif
 }
+
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
+{
+       return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
new file mode 100644 (file)
index 0000000..3ee825f
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_TTM_H__
+#define __AMDGPU_TTM_H__
+
+#include "gpu_scheduler.h"
+
+#define AMDGPU_PL_GDS          (TTM_PL_PRIV + 0)
+#define AMDGPU_PL_GWS          (TTM_PL_PRIV + 1)
+#define AMDGPU_PL_OA           (TTM_PL_PRIV + 2)
+
+#define AMDGPU_PL_FLAG_GDS             (TTM_PL_FLAG_PRIV << 0)
+#define AMDGPU_PL_FLAG_GWS             (TTM_PL_FLAG_PRIV << 1)
+#define AMDGPU_PL_FLAG_OA              (TTM_PL_FLAG_PRIV << 2)
+
+#define AMDGPU_TTM_LRU_SIZE    20
+
+struct amdgpu_mman_lru {
+       struct list_head                *lru[TTM_NUM_MEM_TYPES];
+       struct list_head                *swap_lru;
+};
+
+struct amdgpu_mman {
+       struct ttm_bo_global_ref        bo_global_ref;
+       struct drm_global_reference     mem_global_ref;
+       struct ttm_bo_device            bdev;
+       bool                            mem_global_referenced;
+       bool                            initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+       struct dentry                   *vram;
+       struct dentry                   *gtt;
+#endif
+
+       /* buffer handling */
+       const struct amdgpu_buffer_funcs        *buffer_funcs;
+       struct amdgpu_ring                      *buffer_funcs_ring;
+       /* Scheduler entity for buffer moves */
+       struct amd_sched_entity                 entity;
+
+       /* custom LRU management */
+       struct amdgpu_mman_lru                  log2_size[AMDGPU_TTM_LRU_SIZE];
+       /* guard for log2_size array, don't add anything in between */
+       struct amdgpu_mman_lru                  guard;
+};
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+                      uint64_t src_offset,
+                      uint64_t dst_offset,
+                      uint32_t byte_count,
+                      struct reservation_object *resv,
+                      struct fence **fence, bool direct_submit);
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+                       uint32_t src_data,
+                       struct reservation_object *resv,
+                       struct fence **fence);
+
+int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
+int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+#endif
index 5cc95f1a7dab955ea39f93f0dbb1110129c582d0..7a05f79818f1a6a3cfbb3179ef03d36bd92436ec 100644 (file)
@@ -247,35 +247,28 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
        const struct common_firmware_header *header = NULL;
 
        err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
-                       AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+                              AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
        if (err) {
                dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
-               err = -ENOMEM;
                goto failed;
        }
 
        err = amdgpu_bo_reserve(*bo, false);
        if (err) {
-               amdgpu_bo_unref(bo);
                dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
-               goto failed;
+               goto failed_reserve;
        }
 
        err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
        if (err) {
-               amdgpu_bo_unreserve(*bo);
-               amdgpu_bo_unref(bo);
                dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
-               goto failed;
+               goto failed_pin;
        }
 
        err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
        if (err) {
                dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
-               amdgpu_bo_unpin(*bo);
-               amdgpu_bo_unreserve(*bo);
-               amdgpu_bo_unref(bo);
-               goto failed;
+               goto failed_kmap;
        }
 
        amdgpu_bo_unreserve(*bo);
@@ -290,10 +283,16 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
                        fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
                }
        }
+       return 0;
 
+failed_kmap:
+       amdgpu_bo_unpin(*bo);
+failed_pin:
+       amdgpu_bo_unreserve(*bo);
+failed_reserve:
+       amdgpu_bo_unref(bo);
 failed:
-       if (err)
-               adev->firmware.smu_load = false;
+       adev->firmware.smu_load = false;
 
        return err;
 }
index b11f4e8868d7652503713b508da2c8820a99c36b..cee7bc9a2314dce0c315f768c14dd498e67810dc 100644 (file)
@@ -201,39 +201,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                  +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
                  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
-       r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
-                            AMDGPU_GEM_DOMAIN_VRAM,
-                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-                            NULL, NULL, &adev->uvd.vcpu_bo);
+       r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+                                   AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
+                                   &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
        if (r) {
                dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
                return r;
        }
 
-       r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-       if (r) {
-               amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-               dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
-               return r;
-       }
-
-       r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
-                         &adev->uvd.gpu_addr);
-       if (r) {
-               amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-               amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-               dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
-               return r;
-       }
-
-       r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
-       if (r) {
-               dev_err(adev->dev, "(%d) UVD map failed\n", r);
-               return r;
-       }
-
-       amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-
        ring = &adev->uvd.ring;
        rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
        r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
@@ -274,22 +249,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
-       int r;
-
        kfree(adev->uvd.saved_bo);
 
        amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
-       if (adev->uvd.vcpu_bo) {
-               r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-               if (!r) {
-                       amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-                       amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-                       amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-               }
-
-               amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-       }
+       amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
+                             &adev->uvd.gpu_addr,
+                             (void **)&adev->uvd.cpu_addr);
 
        amdgpu_ring_fini(&adev->uvd.ring);
 
@@ -323,7 +289,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
        if (!adev->uvd.saved_bo)
                return -ENOMEM;
 
-       memcpy(adev->uvd.saved_bo, ptr, size);
+       memcpy_fromio(adev->uvd.saved_bo, ptr, size);
 
        return 0;
 }
@@ -340,7 +306,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
        ptr = adev->uvd.cpu_addr;
 
        if (adev->uvd.saved_bo != NULL) {
-               memcpy(ptr, adev->uvd.saved_bo, size);
+               memcpy_toio(ptr, adev->uvd.saved_bo, size);
                kfree(adev->uvd.saved_bo);
                adev->uvd.saved_bo = NULL;
        } else {
@@ -349,11 +315,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 
                hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
                offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-               memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
-                       (adev->uvd.fw->size) - offset);
+               memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
+                           le32_to_cpu(hdr->ucode_size_bytes));
                size -= le32_to_cpu(hdr->ucode_size_bytes);
                ptr += le32_to_cpu(hdr->ucode_size_bytes);
-               memset(ptr, 0, size);
+               memset_io(ptr, 0, size);
        }
 
        return 0;
@@ -843,6 +809,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
                                return r;
                        break;
                case mmUVD_ENGINE_CNTL:
+               case mmUVD_NO_OP:
                        break;
                default:
                        DRM_ERROR("Invalid reg 0x%X!\n", reg);
@@ -915,6 +882,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
                return -EINVAL;
        }
 
+       r = amdgpu_cs_sysvm_access_required(parser);
+       if (r)
+               return r;
+
        ctx.parser = parser;
        ctx.buf_sizes = buf_sizes;
        ctx.ib_idx = ib_idx;
@@ -981,8 +952,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
        ib->ptr[3] = addr >> 32;
        ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
        ib->ptr[5] = 0;
-       for (i = 6; i < 16; ++i)
-               ib->ptr[i] = PACKET2(0);
+       for (i = 6; i < 16; i += 2) {
+               ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0);
+               ib->ptr[i+1] = 0;
+       }
        ib->length_dw = 16;
 
        if (direct) {
@@ -1114,15 +1087,9 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, uvd.idle_work.work);
-       unsigned i, fences, handles = 0;
+       unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
 
-       fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
-
-       for (i = 0; i < adev->uvd.max_handles; ++i)
-               if (atomic_read(&adev->uvd.handles[i]))
-                       ++handles;
-
-       if (fences == 0 && handles == 0) {
+       if (fences == 0) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_uvd(adev, false);
                } else {
@@ -1187,7 +1154,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
                r = 0;
        }
 
-error:
        fence_put(fence);
+
+error:
        return r;
 }
index 05865ce35351919cda021e1d5d121dabeda6758e..06b94c13c2c9f93f1b89c5480043e57cea32e006 100644 (file)
@@ -282,8 +282,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
 
        hdr = (const struct common_firmware_header *)adev->vce.fw->data;
        offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-       memcpy(cpu_addr, (adev->vce.fw->data) + offset,
-               (adev->vce.fw->size) - offset);
+       memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+                   adev->vce.fw->size - offset);
 
        amdgpu_bo_kunmap(adev->vce.vcpu_bo);
 
@@ -634,7 +634,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
        uint32_t allocated = 0;
        uint32_t tmp, handle = 0;
        uint32_t *size = &tmp;
-       int i, r = 0, idx = 0;
+       int i, r, idx = 0;
+
+       r = amdgpu_cs_sysvm_access_required(p);
+       if (r)
+               return r;
 
        while (idx < ib->length_dw) {
                uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
@@ -687,6 +691,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                case 0x04000008: /* rdo */
                case 0x04000009: /* vui */
                case 0x05000002: /* auxiliary buffer */
+               case 0x05000009: /* clock table */
                        break;
 
                case 0x03000001: /* encode */
@@ -799,6 +804,18 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
        amdgpu_ring_write(ring, VCE_CMD_END);
 }
 
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               4; /* amdgpu_vce_ring_emit_ib */
+}
+
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               6; /* amdgpu_vce_ring_emit_fence  x1 no user fence */
+}
+
 /**
  * amdgpu_vce_ring_test_ring - test if VCE ring is working
  *
@@ -850,8 +867,8 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        struct fence *fence = NULL;
        long r;
 
-       /* skip vce ring1 ib test for now, since it's not reliable */
-       if (ring == &ring->adev->vce.ring[1])
+       /* skip vce ring1/2 ib test for now, since it's not reliable */
+       if (ring != &ring->adev->vce.ring[0])
                return 0;
 
        r = amdgpu_vce_get_create_msg(ring, 1, NULL);
index 63f83d0d985c0fc981de361ee746865c079373b5..12729d2852df42e3c4706bb9b5246085b13549c8 100644 (file)
@@ -42,5 +42,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
new file mode 100644 (file)
index 0000000..2c37a37
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+#ifndef AMDGPU_VIRT_H
+#define AMDGPU_VIRT_H
+
+#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS  (1 << 0) /* vBIOS is sr-iov ready */
+#define AMDGPU_SRIOV_CAPS_ENABLE_IOV   (1 << 1) /* sr-iov is enabled on this GPU */
+#define AMDGPU_SRIOV_CAPS_IS_VF        (1 << 2) /* this GPU is a virtual function */
+#define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
+/* GPU virtualization */
+struct amdgpu_virtualization {
+       uint32_t virtual_caps;
+};
+
+#define amdgpu_sriov_enabled(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
+
+#define amdgpu_sriov_vf(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
+
+#define amdgpu_sriov_bios(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
+
+#define amdgpu_passthrough(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
+
+static inline bool is_virtual_machine(void)
+{
+#ifdef CONFIG_X86
+       return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+       return false;
+#endif
+}
+
+#endif
\ No newline at end of file
index 8e642fc48df45c665053a3b835d0abe803d7124b..a6a48ed9562e89cf2d229ba0153afc7907ac8dfa 100644 (file)
  * SI supports 16.
  */
 
-/* Special value that no flush is necessary */
-#define AMDGPU_VM_NO_FLUSH (~0ll)
-
 /* Local structure. Encapsulate some VM table update parameters to reduce
  * the number of function parameters
  */
-struct amdgpu_vm_update_params {
+struct amdgpu_pte_update_params {
+       /* amdgpu device we do this update for */
+       struct amdgpu_device *adev;
        /* address where to copy page table entries from */
        uint64_t src;
-       /* DMA addresses to use for mapping */
-       dma_addr_t *pages_addr;
        /* indirect buffer to fill with commands */
        struct amdgpu_ib *ib;
+       /* Function which actually does the update */
+       void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
+                    uint64_t addr, unsigned count, uint32_t incr,
+                    uint32_t flags);
+       /* indicate update pt or its shadow */
+       bool shadow;
 };
 
 /**
@@ -467,10 +470,9 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 }
 
 /**
- * amdgpu_vm_update_pages - helper to call the right asic function
+ * amdgpu_vm_do_set_ptes - helper to call the right asic function
  *
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
  * @pe: addr of the page entry
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
@@ -480,34 +482,46 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
  * Traces the parameters and calls the right asic functions
  * to setup the page table using the DMA.
  */
-static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
-                                  struct amdgpu_vm_update_params
-                                       *vm_update_params,
-                                  uint64_t pe, uint64_t addr,
-                                  unsigned count, uint32_t incr,
-                                  uint32_t flags)
+static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
+                                 uint64_t pe, uint64_t addr,
+                                 unsigned count, uint32_t incr,
+                                 uint32_t flags)
 {
        trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
 
-       if (vm_update_params->src) {
-               amdgpu_vm_copy_pte(adev, vm_update_params->ib,
-                       pe, (vm_update_params->src + (addr >> 12) * 8), count);
-
-       } else if (vm_update_params->pages_addr) {
-               amdgpu_vm_write_pte(adev, vm_update_params->ib,
-                       vm_update_params->pages_addr,
-                       pe, addr, count, incr, flags);
-
-       } else if (count < 3) {
-               amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
-                                   count, incr, flags);
+       if (count < 3) {
+               amdgpu_vm_write_pte(params->adev, params->ib, pe,
+                                   addr | flags, count, incr);
 
        } else {
-               amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
+               amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
                                      count, incr, flags);
        }
 }
 
+/**
+ * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
+                                  uint64_t pe, uint64_t addr,
+                                  unsigned count, uint32_t incr,
+                                  uint32_t flags)
+{
+       trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
+
+       amdgpu_vm_copy_pte(params->adev, params->ib, pe,
+                          (params->src + (addr >> 12) * 8), count);
+}
+
 /**
  * amdgpu_vm_clear_bo - initially clear the page dir/table
  *
@@ -523,12 +537,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        struct amdgpu_ring *ring;
        struct fence *fence = NULL;
        struct amdgpu_job *job;
-       struct amdgpu_vm_update_params vm_update_params;
+       struct amdgpu_pte_update_params params;
        unsigned entries;
        uint64_t addr;
        int r;
 
-       memset(&vm_update_params, 0, sizeof(vm_update_params));
        ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
        r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -546,9 +559,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        if (r)
                goto error;
 
-       vm_update_params.ib = &job->ibs[0];
-       amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
-                              0, 0);
+       memset(&params, 0, sizeof(params));
+       params.adev = adev;
+       params.ib = &job->ibs[0];
+       amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 
        WARN_ON(job->ibs[0].length_dw > 64);
@@ -577,55 +591,41 @@ error:
  * Look up the physical address of the page that the pte resolves
  * to and return the pointer for the page table entry.
  */
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 {
        uint64_t result;
 
-       if (pages_addr) {
-               /* page table offset */
-               result = pages_addr[addr >> PAGE_SHIFT];
+       /* page table offset */
+       result = pages_addr[addr >> PAGE_SHIFT];
 
-               /* in case cpu page size != gpu page size*/
-               result |= addr & (~PAGE_MASK);
-
-       } else {
-               /* No mapping required */
-               result = addr;
-       }
+       /* in case cpu page size != gpu page size*/
+       result |= addr & (~PAGE_MASK);
 
        result &= 0xFFFFFFFFFFFFF000ULL;
 
        return result;
 }
 
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-                                   struct amdgpu_vm *vm)
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+                                        struct amdgpu_vm *vm,
+                                        bool shadow)
 {
        struct amdgpu_ring *ring;
-       struct amdgpu_bo *pd = vm->page_directory;
-       uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
+       struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+               vm->page_directory;
+       uint64_t pd_addr;
        uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
        uint64_t last_pde = ~0, last_pt = ~0;
        unsigned count = 0, pt_idx, ndw;
        struct amdgpu_job *job;
-       struct amdgpu_vm_update_params vm_update_params;
+       struct amdgpu_pte_update_params params;
        struct fence *fence = NULL;
 
        int r;
 
-       memset(&vm_update_params, 0, sizeof(vm_update_params));
+       if (!pd)
+               return 0;
+       pd_addr = amdgpu_bo_gpu_offset(pd);
        ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
        /* padding, etc. */
@@ -638,7 +638,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
        if (r)
                return r;
 
-       vm_update_params.ib = &job->ibs[0];
+       memset(&params, 0, sizeof(params));
+       params.adev = adev;
+       params.ib = &job->ibs[0];
 
        /* walk over the address space and update the page directory */
        for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -649,19 +651,25 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                        continue;
 
                pt = amdgpu_bo_gpu_offset(bo);
-               if (vm->page_tables[pt_idx].addr == pt)
-                       continue;
-               vm->page_tables[pt_idx].addr = pt;
+               if (!shadow) {
+                       if (vm->page_tables[pt_idx].addr == pt)
+                               continue;
+                       vm->page_tables[pt_idx].addr = pt;
+               } else {
+                       if (vm->page_tables[pt_idx].shadow_addr == pt)
+                               continue;
+                       vm->page_tables[pt_idx].shadow_addr = pt;
+               }
 
                pde = pd_addr + pt_idx * 8;
                if (((last_pde + 8 * count) != pde) ||
-                   ((last_pt + incr * count) != pt)) {
+                   ((last_pt + incr * count) != pt) ||
+                   (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
 
                        if (count) {
-                               amdgpu_vm_update_pages(adev, &vm_update_params,
-                                                      last_pde, last_pt,
-                                                      count, incr,
-                                                      AMDGPU_PTE_VALID);
+                               amdgpu_vm_do_set_ptes(&params, last_pde,
+                                                     last_pt, count, incr,
+                                                     AMDGPU_PTE_VALID);
                        }
 
                        count = 1;
@@ -673,15 +681,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
        }
 
        if (count)
-               amdgpu_vm_update_pages(adev, &vm_update_params,
-                                       last_pde, last_pt,
-                                       count, incr, AMDGPU_PTE_VALID);
+               amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
+                                     count, incr, AMDGPU_PTE_VALID);
 
-       if (vm_update_params.ib->length_dw != 0) {
-               amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+       if (params.ib->length_dw != 0) {
+               amdgpu_ring_pad_ib(ring, params.ib);
                amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
                                 AMDGPU_FENCE_OWNER_VM);
-               WARN_ON(vm_update_params.ib->length_dw > ndw);
+               WARN_ON(params.ib->length_dw > ndw);
                r = amdgpu_job_submit(job, ring, &vm->entity,
                                      AMDGPU_FENCE_OWNER_VM, &fence);
                if (r)
@@ -703,92 +710,33 @@ error_free:
        return r;
 }
 
-/**
- * amdgpu_vm_frag_ptes - add fragment information to PTEs
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
  *
  * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @pe_start: first PTE to handle
- * @pe_end: last PTE to handle
- * @addr: addr those PTEs should point to
- * @flags: hw mapping flags
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
  */
-static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
-                               struct amdgpu_vm_update_params
-                                       *vm_update_params,
-                               uint64_t pe_start, uint64_t pe_end,
-                               uint64_t addr, uint32_t flags)
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+                                   struct amdgpu_vm *vm)
 {
-       /**
-        * The MC L1 TLB supports variable sized pages, based on a fragment
-        * field in the PTE. When this field is set to a non-zero value, page
-        * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
-        * flags are considered valid for all PTEs within the fragment range
-        * and corresponding mappings are assumed to be physically contiguous.
-        *
-        * The L1 TLB can store a single PTE for the whole fragment,
-        * significantly increasing the space available for translation
-        * caching. This leads to large improvements in throughput when the
-        * TLB is under pressure.
-        *
-        * The L2 TLB distributes small and large fragments into two
-        * asymmetric partitions. The large fragment cache is significantly
-        * larger. Thus, we try to use large fragments wherever possible.
-        * Userspace can support this by aligning virtual base address and
-        * allocation size to the fragment size.
-        */
-
-       /* SI and newer are optimized for 64KB */
-       uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
-       uint64_t frag_align = 0x80;
-
-       uint64_t frag_start = ALIGN(pe_start, frag_align);
-       uint64_t frag_end = pe_end & ~(frag_align - 1);
-
-       unsigned count;
-
-       /* Abort early if there isn't anything to do */
-       if (pe_start == pe_end)
-               return;
-
-       /* system pages are non continuously */
-       if (vm_update_params->src || vm_update_params->pages_addr ||
-               !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
-
-               count = (pe_end - pe_start) / 8;
-               amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
-                                      addr, count, AMDGPU_GPU_PAGE_SIZE,
-                                      flags);
-               return;
-       }
-
-       /* handle the 4K area at the beginning */
-       if (pe_start != frag_start) {
-               count = (frag_start - pe_start) / 8;
-               amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
-                                      count, AMDGPU_GPU_PAGE_SIZE, flags);
-               addr += AMDGPU_GPU_PAGE_SIZE * count;
-       }
-
-       /* handle the area in the middle */
-       count = (frag_end - frag_start) / 8;
-       amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
-                              AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
+       int r;
 
-       /* handle the 4K area at the end */
-       if (frag_end != pe_end) {
-               addr += AMDGPU_GPU_PAGE_SIZE * count;
-               count = (pe_end - frag_end) / 8;
-               amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
-                                      count, AMDGPU_GPU_PAGE_SIZE, flags);
-       }
+       r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+       if (r)
+               return r;
+       return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
 }
 
 /**
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
  * @vm: requested vm
  * @start: start of GPU address range
  * @end: end of GPU address range
@@ -797,16 +745,14 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
  *
  * Update the page tables in the range @start - @end.
  */
-static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
-                                 struct amdgpu_vm_update_params
-                                       *vm_update_params,
+static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                                  struct amdgpu_vm *vm,
                                  uint64_t start, uint64_t end,
                                  uint64_t dst, uint32_t flags)
 {
        const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 
-       uint64_t cur_pe_start, cur_pe_end, cur_dst;
+       uint64_t cur_pe_start, cur_nptes, cur_dst;
        uint64_t addr; /* next GPU address to be updated */
        uint64_t pt_idx;
        struct amdgpu_bo *pt;
@@ -817,7 +763,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
        addr = start;
        pt_idx = addr >> amdgpu_vm_block_size;
        pt = vm->page_tables[pt_idx].entry.robj;
-
+       if (params->shadow) {
+               if (!pt->shadow)
+                       return;
+               pt = vm->page_tables[pt_idx].entry.robj->shadow;
+       }
        if ((addr & ~mask) == (end & ~mask))
                nptes = end - addr;
        else
@@ -825,7 +775,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 
        cur_pe_start = amdgpu_bo_gpu_offset(pt);
        cur_pe_start += (addr & mask) * 8;
-       cur_pe_end = cur_pe_start + 8 * nptes;
+       cur_nptes = nptes;
        cur_dst = dst;
 
        /* for next ptb*/
@@ -836,6 +786,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
        while (addr < end) {
                pt_idx = addr >> amdgpu_vm_block_size;
                pt = vm->page_tables[pt_idx].entry.robj;
+               if (params->shadow) {
+                       if (!pt->shadow)
+                               return;
+                       pt = vm->page_tables[pt_idx].entry.robj->shadow;
+               }
 
                if ((addr & ~mask) == (end & ~mask))
                        nptes = end - addr;
@@ -845,19 +800,19 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
                next_pe_start = amdgpu_bo_gpu_offset(pt);
                next_pe_start += (addr & mask) * 8;
 
-               if (cur_pe_end == next_pe_start) {
+               if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
+                   ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
                        /* The next ptb is consecutive to current ptb.
-                        * Don't call amdgpu_vm_frag_ptes now.
+                        * Don't call the update function now.
                         * Will update two ptbs together in future.
                        */
-                       cur_pe_end += 8 * nptes;
+                       cur_nptes += nptes;
                } else {
-                       amdgpu_vm_frag_ptes(adev, vm_update_params,
-                                           cur_pe_start, cur_pe_end,
-                                           cur_dst, flags);
+                       params->func(params, cur_pe_start, cur_dst, cur_nptes,
+                                    AMDGPU_GPU_PAGE_SIZE, flags);
 
                        cur_pe_start = next_pe_start;
-                       cur_pe_end = next_pe_start + 8 * nptes;
+                       cur_nptes = nptes;
                        cur_dst = dst;
                }
 
@@ -866,8 +821,79 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
                dst += nptes * AMDGPU_GPU_PAGE_SIZE;
        }
 
-       amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
-                           cur_pe_end, cur_dst, flags);
+       params->func(params, cur_pe_start, cur_dst, cur_nptes,
+                    AMDGPU_GPU_PAGE_SIZE, flags);
+}
+
+/*
+ * amdgpu_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @dst: addr those PTEs should point to
+ * @flags: hw mapping flags
+ */
+static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params        *params,
+                               struct amdgpu_vm *vm,
+                               uint64_t start, uint64_t end,
+                               uint64_t dst, uint32_t flags)
+{
+       /**
+        * The MC L1 TLB supports variable sized pages, based on a fragment
+        * field in the PTE. When this field is set to a non-zero value, page
+        * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+        * flags are considered valid for all PTEs within the fragment range
+        * and corresponding mappings are assumed to be physically contiguous.
+        *
+        * The L1 TLB can store a single PTE for the whole fragment,
+        * significantly increasing the space available for translation
+        * caching. This leads to large improvements in throughput when the
+        * TLB is under pressure.
+        *
+        * The L2 TLB distributes small and large fragments into two
+        * asymmetric partitions. The large fragment cache is significantly
+        * larger. Thus, we try to use large fragments wherever possible.
+        * Userspace can support this by aligning virtual base address and
+        * allocation size to the fragment size.
+        */
+
+       const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
+
+       uint64_t frag_start = ALIGN(start, frag_align);
+       uint64_t frag_end = end & ~(frag_align - 1);
+
+       uint32_t frag;
+
+       /* system pages are non continuously */
+       if (params->src || !(flags & AMDGPU_PTE_VALID) ||
+           (frag_start >= frag_end)) {
+
+               amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
+               return;
+       }
+
+       /* use more than 64KB fragment size if possible */
+       frag = lower_32_bits(frag_start | frag_end);
+       frag = likely(frag) ? __ffs(frag) : 31;
+
+       /* handle the 4K area at the beginning */
+       if (start != frag_start) {
+               amdgpu_vm_update_ptes(params, vm, start, frag_start,
+                                     dst, flags);
+               dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
+       }
+
+       /* handle the area in the middle */
+       amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
+                             flags | AMDGPU_PTE_FRAG(frag));
+
+       /* handle the 4K area at the end */
+       if (frag_end != end) {
+               dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
+               amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
+       }
 }
 
 /**
@@ -900,14 +926,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        void *owner = AMDGPU_FENCE_OWNER_VM;
        unsigned nptes, ncmds, ndw;
        struct amdgpu_job *job;
-       struct amdgpu_vm_update_params vm_update_params;
+       struct amdgpu_pte_update_params params;
        struct fence *f = NULL;
        int r;
 
+       memset(&params, 0, sizeof(params));
+       params.adev = adev;
+       params.src = src;
+
        ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-       memset(&vm_update_params, 0, sizeof(vm_update_params));
-       vm_update_params.src = src;
-       vm_update_params.pages_addr = pages_addr;
+
+       memset(&params, 0, sizeof(params));
+       params.adev = adev;
+       params.src = src;
 
        /* sync to everything on unmapping */
        if (!(flags & AMDGPU_PTE_VALID))
@@ -924,30 +955,52 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        /* padding, etc. */
        ndw = 64;
 
-       if (vm_update_params.src) {
+       if (src) {
                /* only copy commands needed */
                ndw += ncmds * 7;
 
-       } else if (vm_update_params.pages_addr) {
-               /* header for write data commands */
-               ndw += ncmds * 4;
+               params.func = amdgpu_vm_do_copy_ptes;
 
-               /* body of write data command */
+       } else if (pages_addr) {
+               /* copy commands needed */
+               ndw += ncmds * 7;
+
+               /* and also PTEs */
                ndw += nptes * 2;
 
+               params.func = amdgpu_vm_do_copy_ptes;
+
        } else {
                /* set page commands needed */
                ndw += ncmds * 10;
 
                /* two extra commands for begin/end of fragment */
                ndw += 2 * 10;
+
+               params.func = amdgpu_vm_do_set_ptes;
        }
 
        r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
        if (r)
                return r;
 
-       vm_update_params.ib = &job->ibs[0];
+       params.ib = &job->ibs[0];
+
+       if (!src && pages_addr) {
+               uint64_t *pte;
+               unsigned i;
+
+               /* Put the PTEs at the end of the IB. */
+               i = ndw - nptes * 2;
+               pte= (uint64_t *)&(job->ibs->ptr[i]);
+               params.src = job->ibs->gpu_addr + i * 4;
+
+               for (i = 0; i < nptes; ++i) {
+                       pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
+                                                   AMDGPU_GPU_PAGE_SIZE);
+                       pte[i] |= flags;
+               }
+       }
 
        r = amdgpu_sync_fence(adev, &job->sync, exclusive);
        if (r)
@@ -962,11 +1015,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        if (r)
                goto error_free;
 
-       amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
-                             last + 1, addr, flags);
+       params.shadow = true;
+       amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
+       params.shadow = false;
+       amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
 
-       amdgpu_ring_pad_ib(ring, vm_update_params.ib);
-       WARN_ON(vm_update_params.ib->length_dw > ndw);
+       amdgpu_ring_pad_ib(ring, params.ib);
+       WARN_ON(params.ib->length_dw > ndw);
        r = amdgpu_job_submit(job, ring, &vm->entity,
                              AMDGPU_FENCE_OWNER_VM, &f);
        if (r)
@@ -1062,28 +1117,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @bo_va: requested BO and VM object
- * @mem: ttm mem
+ * @clear: if true clear the entries
  *
  * Fill in the page table entries for @bo_va.
  * Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
  */
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
-                       struct ttm_mem_reg *mem)
+                       bool clear)
 {
        struct amdgpu_vm *vm = bo_va->vm;
        struct amdgpu_bo_va_mapping *mapping;
        dma_addr_t *pages_addr = NULL;
        uint32_t gtt_flags, flags;
+       struct ttm_mem_reg *mem;
        struct fence *exclusive;
        uint64_t addr;
        int r;
 
-       if (mem) {
+       if (clear) {
+               mem = NULL;
+               addr = 0;
+               exclusive = NULL;
+       } else {
                struct ttm_dma_tt *ttm;
 
+               mem = &bo_va->bo->tbo.mem;
                addr = (u64)mem->start << PAGE_SHIFT;
                switch (mem->mem_type) {
                case TTM_PL_TT:
@@ -1101,13 +1160,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                }
 
                exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
-       } else {
-               addr = 0;
-               exclusive = NULL;
        }
 
        flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-       gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
+       gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+               adev == bo_va->bo->adev) ? flags : 0;
 
        spin_lock(&vm->status_lock);
        if (!list_empty(&bo_va->vm_status))
@@ -1134,7 +1191,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        spin_lock(&vm->status_lock);
        list_splice_init(&bo_va->invalids, &bo_va->valids);
        list_del_init(&bo_va->vm_status);
-       if (!mem)
+       if (clear)
                list_add(&bo_va->vm_status, &vm->cleared);
        spin_unlock(&vm->status_lock);
 
@@ -1197,7 +1254,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
                        struct amdgpu_bo_va, vm_status);
                spin_unlock(&vm->status_lock);
 
-               r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, true);
                if (r)
                        return r;
 
@@ -1342,7 +1399,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
                                     AMDGPU_GPU_PAGE_SIZE, true,
                                     AMDGPU_GEM_DOMAIN_VRAM,
-                                    AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+                                    AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+                                    AMDGPU_GEM_CREATE_SHADOW,
                                     NULL, resv, &pt);
                if (r)
                        goto error_free;
@@ -1535,13 +1593,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        r = amd_sched_entity_init(&ring->sched, &vm->entity,
                                  rq, amdgpu_sched_jobs);
        if (r)
-               return r;
+               goto err;
 
        vm->page_directory_fence = NULL;
 
        r = amdgpu_bo_create(adev, pd_size, align, true,
                             AMDGPU_GEM_DOMAIN_VRAM,
-                            AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+                            AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+                            AMDGPU_GEM_CREATE_SHADOW,
                             NULL, NULL, &vm->page_directory);
        if (r)
                goto error_free_sched_entity;
@@ -1565,6 +1624,9 @@ error_free_page_directory:
 error_free_sched_entity:
        amd_sched_entity_fini(&ring->sched, &vm->entity);
 
+err:
+       drm_free_large(vm->page_tables);
+
        return r;
 }
 
@@ -1597,10 +1659,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                kfree(mapping);
        }
 
-       for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
+       for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
+               if (vm->page_tables[i].entry.robj &&
+                   vm->page_tables[i].entry.robj->shadow)
+                       amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow);
                amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
+       }
        drm_free_large(vm->page_tables);
 
+       if (vm->page_directory->shadow)
+               amdgpu_bo_unref(&vm->page_directory->shadow);
        amdgpu_bo_unref(&vm->page_directory);
        fence_put(vm->page_directory_fence);
 }
index 49a39b1a0a9637290b8b874eabef5ffba84c04ab..f7d236f95e74262dd6970050de140c1b01f0b017 100644 (file)
@@ -497,7 +497,13 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
                         * SetPixelClock provides the dividers
                         */
                        args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
-                       args.v6.ucPpll = ATOM_EXT_PLL1;
+                       if (adev->asic_type == CHIP_TAHITI ||
+                           adev->asic_type == CHIP_PITCAIRN ||
+                           adev->asic_type == CHIP_VERDE ||
+                           adev->asic_type == CHIP_OLAND)
+                               args.v6.ucPpll = ATOM_PPLL0;
+                       else
+                               args.v6.ucPpll = ATOM_EXT_PLL1;
                        break;
                default:
                        DRM_ERROR("Unknown table version %d %d\n", frev, crev);
index 7f85c2c1d68156a4d91bd4b18332df6f1886fb7c..f81068ba4cc67c9c89e2c817b00320a567b8c5f8 100644 (file)
@@ -88,7 +88,6 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
 
        /* timeout */
        if (args.v2.ucReplyStatus == 1) {
-               DRM_DEBUG_KMS("dp_aux_ch timeout\n");
                r = -ETIMEDOUT;
                goto done;
        }
@@ -339,22 +338,21 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
 {
        struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
        u8 msg[DP_DPCD_SIZE];
-       int ret, i;
+       int ret;
 
-       for (i = 0; i < 7; i++) {
-               ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-                                      DP_DPCD_SIZE);
-               if (ret == DP_DPCD_SIZE) {
-                       memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+       ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV,
+                              msg, DP_DPCD_SIZE);
+       if (ret == DP_DPCD_SIZE) {
+               memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-                       DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-                                     dig_connector->dpcd);
+               DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+                             dig_connector->dpcd);
 
-                       amdgpu_atombios_dp_probe_oui(amdgpu_connector);
+               amdgpu_atombios_dp_probe_oui(amdgpu_connector);
 
-                       return 0;
-               }
+               return 0;
        }
+
        dig_connector->dpcd[0] = 0;
        return -EINVAL;
 }
index bc56c8a181e628b575861233bd705dd986f9f061..b374653bd6cf39849db7570583f7cd2b082c3518 100644 (file)
@@ -27,6 +27,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 #include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
 
 #define TARGET_HW_I2C_CLOCK 50
 
index e2f0e5d58d5cc05abd29ed65037e38a060fd3145..1d8c375a3561c9f872a4d87c0b435048f5a4b10b 100644 (file)
@@ -5396,7 +5396,7 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
        amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
                       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
 
-       ci_dpm_powergate_uvd(adev, false);
+       ci_dpm_powergate_uvd(adev, true);
 
        if (!amdgpu_ci_is_smc_running(adev))
                return;
@@ -5779,6 +5779,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
        default: BUG();
        }
 
@@ -5873,7 +5874,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
        pi->pcie_dpm_key_disabled = 0;
        pi->thermal_sclk_dpm_enabled = 0;
 
-       pi->caps_sclk_ds = true;
+       if (amdgpu_sclk_deep_sleep_en)
+               pi->caps_sclk_ds = true;
+       else
+               pi->caps_sclk_ds = false;
 
        pi->mclk_strobe_mode_threshold = 40000;
        pi->mclk_stutter_mode_threshold = 40000;
@@ -6032,7 +6036,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
 
        pi->caps_dynamic_ac_timing = true;
 
-       pi->uvd_power_gated = false;
+       pi->uvd_power_gated = true;
 
        /* make sure dc limits are valid */
        if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
@@ -6175,8 +6179,6 @@ static int ci_dpm_late_init(void *handle)
        if (ret)
                return ret;
 
-       ci_dpm_powergate_uvd(adev, true);
-
        return 0;
 }
 
index 4efc901f658c0e60d1d6e96a11bb083e761c4140..a845b6a93b79bed19cd8027f867ca65460cd27bf 100644 (file)
@@ -67,6 +67,7 @@
 
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_powerplay.h"
+#include "dce_virtual.h"
 
 /*
  * Indirect registers accessor
@@ -962,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
-static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
-{
-       /* CIK does not support SR-IOV */
-       return 0;
-}
-
 static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
        {mmGRBM_STATUS, false},
        {mmGB_ADDR_CONFIG, false},
@@ -1640,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
                >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
 }
 
+static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+       if (is_virtual_machine()) /* passthrough mode */
+               adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
 static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -1708,6 +1709,74 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 8,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 7,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &gfx_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_sdma_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 4,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &uvd_v4_2_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v2_0_ip_funcs,
+       },
+};
+
 static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -1776,6 +1845,74 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 8,
+               .minor = 5,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 7,
+               .minor = 3,
+               .rev = 0,
+               .funcs = &gfx_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_sdma_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 4,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &uvd_v4_2_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v2_0_ip_funcs,
+       },
+};
+
 static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -1844,6 +1981,74 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 8,
+               .minor = 3,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 7,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &gfx_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_sdma_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 4,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &uvd_v4_2_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v2_0_ip_funcs,
+       },
+};
+
 static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -1912,6 +2117,74 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 8,
+               .minor = 3,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 7,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &gfx_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_sdma_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 4,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &uvd_v4_2_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v2_0_ip_funcs,
+       },
+};
+
 static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -1980,32 +2253,128 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 8,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 7,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &gfx_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cik_sdma_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 4,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &uvd_v4_2_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v2_0_ip_funcs,
+       },
+};
+
 int cik_set_ip_blocks(struct amdgpu_device *adev)
 {
-       switch (adev->asic_type) {
-       case CHIP_BONAIRE:
-               adev->ip_blocks = bonaire_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
-               break;
-       case CHIP_HAWAII:
-               adev->ip_blocks = hawaii_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
-               break;
-       case CHIP_KAVERI:
-               adev->ip_blocks = kaveri_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
-               break;
-       case CHIP_KABINI:
-               adev->ip_blocks = kabini_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
-               break;
-       case CHIP_MULLINS:
-               adev->ip_blocks = mullins_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
-               break;
-       default:
-               /* FIXME: not supported yet */
-               return -EINVAL;
+       if (adev->enable_virtual_display) {
+               switch (adev->asic_type) {
+               case CHIP_BONAIRE:
+                       adev->ip_blocks = bonaire_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
+                       break;
+               case CHIP_HAWAII:
+                       adev->ip_blocks = hawaii_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
+                       break;
+               case CHIP_KAVERI:
+                       adev->ip_blocks = kaveri_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
+                       break;
+               case CHIP_KABINI:
+                       adev->ip_blocks = kabini_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
+                       break;
+               case CHIP_MULLINS:
+                       adev->ip_blocks = mullins_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
+                       break;
+               default:
+                       /* FIXME: not supported yet */
+                       return -EINVAL;
+               }
+       } else {
+               switch (adev->asic_type) {
+               case CHIP_BONAIRE:
+                       adev->ip_blocks = bonaire_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
+                       break;
+               case CHIP_HAWAII:
+                       adev->ip_blocks = hawaii_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
+                       break;
+               case CHIP_KAVERI:
+                       adev->ip_blocks = kaveri_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
+                       break;
+               case CHIP_KABINI:
+                       adev->ip_blocks = kabini_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
+                       break;
+               case CHIP_MULLINS:
+                       adev->ip_blocks = mullins_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
+                       break;
+               default:
+                       /* FIXME: not supported yet */
+                       return -EINVAL;
+               }
        }
 
        return 0;
@@ -2015,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
 {
        .read_disabled_bios = &cik_read_disabled_bios,
        .read_bios_from_rom = &cik_read_bios_from_rom,
+       .detect_hw_virtualization = cik_detect_hw_virtualization,
        .read_register = &cik_read_register,
        .reset = &cik_asic_reset,
        .set_vga_state = &cik_vga_set_state,
        .get_xclk = &cik_get_xclk,
        .set_uvd_clocks = &cik_set_uvd_clocks,
        .set_vce_clocks = &cik_set_vce_clocks,
-       .get_virtual_caps = &cik_get_virtual_caps,
 };
 
 static int cik_common_early_init(void *handle)
index ee6466912497b7cfc608db429e144d3a2dc3387e..cb952acc71339e31ac613a896fd1c268b0f327dc 100644 (file)
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
 
 MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
 MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -694,24 +695,16 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
                                 uint64_t pe, uint64_t src,
                                 unsigned count)
 {
-       while (count) {
-               unsigned bytes = count * 8;
-               if (bytes > 0x1FFFF8)
-                       bytes = 0x1FFFF8;
-
-               ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
-                       SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-               ib->ptr[ib->length_dw++] = bytes;
-               ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-               ib->ptr[ib->length_dw++] = lower_32_bits(src);
-               ib->ptr[ib->length_dw++] = upper_32_bits(src);
-               ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-               pe += bytes;
-               src += bytes;
-               count -= bytes / 8;
-       }
+       unsigned bytes = count * 8;
+
+       ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+               SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+       ib->ptr[ib->length_dw++] = bytes;
+       ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+       ib->ptr[ib->length_dw++] = lower_32_bits(src);
+       ib->ptr[ib->length_dw++] = upper_32_bits(src);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -719,39 +712,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
-                                 const dma_addr_t *pages_addr, uint64_t pe,
-                                 uint64_t addr, unsigned count,
-                                 uint32_t incr, uint32_t flags)
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+                                 uint64_t value, unsigned count,
+                                 uint32_t incr)
 {
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count * 2;
-               if (ndw > 0xFFFFE)
-                       ndw = 0xFFFFE;
-
-               /* for non-physically contiguous pages (system) */
-               ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
-                       SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-               ib->ptr[ib->length_dw++] = pe;
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = ndw;
-               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       value = amdgpu_vm_map_gart(pages_addr, addr);
-                       addr += incr;
-                       value |= flags;
-                       ib->ptr[ib->length_dw++] = value;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               }
+       unsigned ndw = count * 2;
+
+       ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+               SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = ndw;
+       for (; ndw > 0; ndw -= 2) {
+               ib->ptr[ib->length_dw++] = lower_32_bits(value);
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               value += incr;
        }
 }
 
@@ -767,40 +748,21 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
  *
  * Update the page tables using sDMA (CIK).
  */
-static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
-                                   uint64_t pe,
+static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
                                    uint64_t addr, unsigned count,
                                    uint32_t incr, uint32_t flags)
 {
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count;
-               if (ndw > 0x7FFFF)
-                       ndw = 0x7FFFF;
-
-               if (flags & AMDGPU_PTE_VALID)
-                       value = addr;
-               else
-                       value = 0;
-
-               /* for physically contiguous pages (vram) */
-               ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
-               ib->ptr[ib->length_dw++] = pe; /* dst addr */
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = flags; /* mask */
-               ib->ptr[ib->length_dw++] = 0;
-               ib->ptr[ib->length_dw++] = value; /* value */
-               ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               ib->ptr[ib->length_dw++] = incr; /* increment size */
-               ib->ptr[ib->length_dw++] = 0;
-               ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-               pe += ndw * 8;
-               addr += ndw * incr;
-               count -= ndw;
-       }
+       /* for physically contiguous pages (vram) */
+       ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = flags; /* mask */
+       ib->ptr[ib->length_dw++] = 0;
+       ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = incr; /* increment size */
+       ib->ptr[ib->length_dw++] = 0;
+       ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -886,6 +848,22 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 }
 
+static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               7 + 4; /* cik_sdma_ring_emit_ib */
+}
+
+static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               6 + /* cik_sdma_ring_emit_hdp_flush */
+               3 + /* cik_sdma_ring_emit_hdp_invalidate */
+               6 + /* cik_sdma_ring_emit_pipeline_sync */
+               12 + /* cik_sdma_ring_emit_vm_flush */
+               9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
                                 bool enable)
 {
@@ -1037,6 +1015,8 @@ static int cik_sdma_resume(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cik_sdma_soft_reset(handle);
+
        return cik_sdma_hw_init(adev);
 }
 
@@ -1259,6 +1239,8 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
        .test_ib = cik_sdma_ring_test_ib,
        .insert_nop = cik_sdma_ring_insert_nop,
        .pad_ib = cik_sdma_ring_pad_ib,
+       .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
+       .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
index c4f6f00d62bcb21d0daf8e7bedba17c3c0a5eb21..8659852aea9ef05db22c5b5da03125dbc4903195 100644 (file)
@@ -562,4 +562,40 @@ enum {
        MTYPE_NONCACHED = 3
 };
 
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x)                         ((x) << 0)
+#define RB_MAP_PKR0_MASK                       (0x3 << 0)
+#define RB_MAP_PKR1(x)                         ((x) << 2)
+#define RB_MAP_PKR1_MASK                       (0x3 << 2)
+#define RB_XSEL2(x)                            ((x) << 4)
+#define RB_XSEL2_MASK                          (0x3 << 4)
+#define RB_XSEL                                        (1 << 6)
+#define RB_YSEL                                        (1 << 7)
+#define PKR_MAP(x)                             ((x) << 8)
+#define PKR_MAP_MASK                           (0x3 << 8)
+#define PKR_XSEL(x)                            ((x) << 10)
+#define PKR_XSEL_MASK                          (0x3 << 10)
+#define PKR_YSEL(x)                            ((x) << 12)
+#define PKR_YSEL_MASK                          (0x3 << 12)
+#define SC_MAP(x)                              ((x) << 16)
+#define SC_MAP_MASK                            (0x3 << 16)
+#define SC_XSEL(x)                             ((x) << 18)
+#define SC_XSEL_MASK                           (0x3 << 18)
+#define SC_YSEL(x)                             ((x) << 20)
+#define SC_YSEL_MASK                           (0x3 << 20)
+#define SE_MAP(x)                              ((x) << 24)
+#define SE_MAP_MASK                            (0x3 << 24)
+#define SE_XSEL(x)                             ((x) << 26)
+#define SE_XSEL_MASK                           (0x3 << 26)
+#define SE_YSEL(x)                             ((x) << 28)
+#define SE_YSEL_MASK                           (0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x)                         ((x) << 0)
+#define SE_PAIR_MAP_MASK                       (0x3 << 0)
+#define SE_PAIR_XSEL(x)                                ((x) << 2)
+#define SE_PAIR_XSEL_MASK                      (0x3 << 2)
+#define SE_PAIR_YSEL(x)                                ((x) << 4)
+#define SE_PAIR_YSEL_MASK                      (0x3 << 4)
+
 #endif
index 2a11413ed54a763cb1f2f0befa39eaacccc109c9..f80a0834e889e8ff07b77846f28fcb619632ca14 100644 (file)
@@ -44,6 +44,7 @@
 
 static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
 static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
+static void cz_dpm_fini(struct amdgpu_device *adev);
 
 static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
 {
@@ -350,6 +351,8 @@ static int cz_parse_power_table(struct amdgpu_device *adev)
 
                ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
                if (ps == NULL) {
+                       for (j = 0; j < i; j++)
+                               kfree(adev->pm.dpm.ps[j].ps_priv);
                        kfree(adev->pm.dpm.ps);
                        return -ENOMEM;
                }
@@ -409,11 +412,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
 
        ret = amdgpu_get_platform_caps(adev);
        if (ret)
-               return ret;
+               goto err;
 
        ret = amdgpu_parse_extended_power_table(adev);
        if (ret)
-               return ret;
+               goto err;
 
        pi->sram_end = SMC_RAM_END;
 
@@ -435,7 +438,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
                pi->caps_td_ramping = true;
                pi->caps_tcp_ramping = true;
        }
-       pi->caps_sclk_ds = true;
+       if (amdgpu_sclk_deep_sleep_en)
+               pi->caps_sclk_ds = true;
+       else
+               pi->caps_sclk_ds = false;
+
        pi->voting_clients = 0x00c00033;
        pi->auto_thermal_throttling_enabled = true;
        pi->bapm_enabled = false;
@@ -463,23 +470,26 @@ static int cz_dpm_init(struct amdgpu_device *adev)
 
        ret = cz_parse_sys_info_table(adev);
        if (ret)
-               return ret;
+               goto err;
 
        cz_patch_voltage_values(adev);
        cz_construct_boot_state(adev);
 
        ret = cz_parse_power_table(adev);
        if (ret)
-               return ret;
+               goto err;
 
        ret = cz_process_firmware_header(adev);
        if (ret)
-               return ret;
+               goto err;
 
        pi->dpm_enabled = true;
        pi->uvd_dynamic_pg = false;
 
        return 0;
+err:
+       cz_dpm_fini(adev);
+       return ret;
 }
 
 static void cz_dpm_fini(struct amdgpu_device *adev)
@@ -668,17 +678,12 @@ static void cz_reset_ap_mask(struct amdgpu_device *adev)
        struct cz_power_info *pi = cz_get_pi(adev);
 
        pi->active_process_mask = 0;
-
 }
 
 static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
                                                        void **table)
 {
-       int ret = 0;
-
-       ret = cz_smu_download_pptable(adev, table);
-
-       return ret;
+       return cz_smu_download_pptable(adev, table);
 }
 
 static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
@@ -818,9 +823,9 @@ static void cz_init_sclk_limit(struct amdgpu_device *adev)
        pi->sclk_dpm.hard_min_clk = 0;
        cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
        level = cz_get_argument(adev);
-       if (level < table->count)
+       if (level < table->count) {
                clock = table->entries[level].clk;
-       else {
+       else {
                DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
                clock = table->entries[table->count - 1].clk;
        }
@@ -846,9 +851,9 @@ static void cz_init_uvd_limit(struct amdgpu_device *adev)
        pi->uvd_dpm.hard_min_clk = 0;
        cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
        level = cz_get_argument(adev);
-       if (level < table->count)
+       if (level < table->count) {
                clock = table->entries[level].vclk;
-       else {
+       else {
                DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
                clock = table->entries[table->count - 1].vclk;
        }
@@ -874,9 +879,9 @@ static void cz_init_vce_limit(struct amdgpu_device *adev)
        pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
        cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
        level = cz_get_argument(adev);
-       if (level < table->count)
+       if (level < table->count) {
                clock = table->entries[level].ecclk;
-       else {
+       else {
                /* future BIOS would fix this error */
                DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
                clock = table->entries[table->count - 1].ecclk;
@@ -903,9 +908,9 @@ static void cz_init_acp_limit(struct amdgpu_device *adev)
        pi->acp_dpm.hard_min_clk = 0;
        cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
        level = cz_get_argument(adev);
-       if (level < table->count)
+       if (level < table->count) {
                clock = table->entries[level].clk;
-       else {
+       else {
                DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
                clock = table->entries[table->count - 1].clk;
        }
@@ -930,7 +935,6 @@ static void cz_init_sclk_threshold(struct amdgpu_device *adev)
        struct cz_power_info *pi = cz_get_pi(adev);
 
        pi->low_sclk_interrupt_threshold = 0;
-
 }
 
 static void cz_dpm_setup_asic(struct amdgpu_device *adev)
@@ -1203,7 +1207,7 @@ static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
        int ret;
 
        if (pi->caps_sq_ramping || pi->caps_db_ramping ||
-                       pi->caps_td_ramping || pi->caps_tcp_ramping) {
+           pi->caps_td_ramping || pi->caps_tcp_ramping) {
                if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
                        ret = cz_disable_cgpg(adev);
                        if (ret) {
@@ -1277,7 +1281,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
        ps->force_high = false;
        ps->need_dfs_bypass = true;
        pi->video_start = new_rps->dclk || new_rps->vclk ||
-                               new_rps->evclk || new_rps->ecclk;
+                         new_rps->evclk || new_rps->ecclk;
 
        if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
                        ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
@@ -1335,7 +1339,6 @@ static int cz_dpm_enable(struct amdgpu_device *adev)
        }
 
        cz_reset_acp_boot_level(adev);
-
        cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
 
        return 0;
@@ -1665,7 +1668,6 @@ static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
        struct amdgpu_ps *ps = &pi->requested_rps;
 
        cz_update_current_ps(adev, ps);
-
 }
 
 static int cz_dpm_force_highest(struct amdgpu_device *adev)
@@ -2108,29 +2110,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
                        /* disable clockgating so we can properly shut down the block */
                        ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
                                                            AMD_CG_STATE_UNGATE);
+                       if (ret) {
+                               DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
+                               return;
+                       }
+
                        /* shutdown the UVD block */
                        ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
                                                            AMD_PG_STATE_GATE);
-                       /* XXX: check for errors */
+
+                       if (ret) {
+                               DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n");
+                               return;
+                       }
                }
                cz_update_uvd_dpm(adev, gate);
-               if (pi->caps_uvd_pg)
+               if (pi->caps_uvd_pg) {
                        /* power off the UVD block */
-                       cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+                       ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+                       if (ret) {
+                               DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n");
+                               return;
+                       }
+               }
        } else {
                if (pi->caps_uvd_pg) {
                        /* power on the UVD block */
                        if (pi->uvd_dynamic_pg)
-                               cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
+                               ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
                        else
-                               cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+                               ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+
+                       if (ret) {
+                               DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n");
+                               return;
+                       }
+
                        /* re-init the UVD block */
                        ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
                                                            AMD_PG_STATE_UNGATE);
+
+                       if (ret) {
+                               DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n");
+                               return;
+                       }
+
                        /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
                        ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
                                                            AMD_CG_STATE_GATE);
-                       /* XXX: check for errors */
+                       if (ret) {
+                               DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
+                               return;
+                       }
                }
                cz_update_uvd_dpm(adev, gate);
        }
@@ -2168,7 +2199,6 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev)
        /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
        if (pi->caps_stable_power_state) {
                pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
-
        } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
                /* leave it as set by user */
                /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
index ac7fee7b7ecaf1054b35ce11925f7124aabaec58..aed7033c09735f86a8982d6bc07d8c29c5aab775 100644 (file)
@@ -29,6 +29,8 @@
 #include "cz_smumgr.h"
 #include "smu_ucode_xfer_cz.h"
 #include "amdgpu_ucode.h"
+#include "cz_dpm.h"
+#include "vi_dpm.h"
 
 #include "smu/smu_8_0_d.h"
 #include "smu/smu_8_0_sh_mask.h"
@@ -48,7 +50,7 @@ static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
        return priv;
 }
 
-int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
+static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
 {
        int i;
        u32 content = 0, tmp;
@@ -99,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
        return 0;
 }
 
-int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
-                                               u16 msg, u32 parameter)
-{
-       WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
-       return cz_send_msg_to_smc_async(adev, msg);
-}
-
 int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
                                                u16 msg, u32 parameter)
 {
@@ -140,7 +135,7 @@ int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
        return 0;
 }
 
-int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
                                                u32 value, u32 limit)
 {
        int ret;
index c1b04e9aab574301acd7594537a9b397d9a5cf7c..9d38fe0519e8f4c99730fb3f0c9e6d7ba77fa3d8 100644 (file)
@@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
  */
 static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 {
-       unsigned i = 0;
+       unsigned i = 100;
 
        if (crtc >= adev->mode_info.num_crtc)
                return;
@@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
         * wait for another frame.
         */
        while (dce_v10_0_is_in_vblank(adev, crtc)) {
-               if (i++ % 100 == 0) {
+               if (i++ == 100) {
+                       i = 0;
                        if (!dce_v10_0_is_counter_moving(adev, crtc))
                                break;
                }
        }
 
        while (!dce_v10_0_is_in_vblank(adev, crtc)) {
-               if (i++ % 100 == 0) {
+               if (i++ == 100) {
+                       i = 0;
                        if (!dce_v10_0_is_counter_moving(adev, crtc))
                                break;
                }
@@ -646,8 +648,8 @@ static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
 
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
-                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
+                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
                                WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
@@ -712,6 +714,45 @@ static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
        WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
+{
+       int num_crtc = 0;
+
+       switch (adev->asic_type) {
+       case CHIP_FIJI:
+       case CHIP_TONGA:
+               num_crtc = 6;
+               break;
+       default:
+               num_crtc = 0;
+       }
+       return num_crtc;
+}
+
+void dce_v10_0_disable_dce(struct amdgpu_device *adev)
+{
+       /*Disable VGA render and enabled crtc, if has DCE engine*/
+       if (amdgpu_atombios_has_dce_engine_info(adev)) {
+               u32 tmp;
+               int crtc_enabled, i;
+
+               dce_v10_0_set_vga_render_state(adev, false);
+
+               /*Disable crtc*/
+               for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
+                       crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+                                                                        CRTC_CONTROL, CRTC_MASTER_EN);
+                       if (crtc_enabled) {
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+                               tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+                               WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       }
+               }
+       }
+}
+
 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
@@ -2071,6 +2112,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
        u32 tmp, viewport_w, viewport_h;
        int r;
        bool bypass_lut = false;
+       char *format_name;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -2182,8 +2224,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
                bypass_lut = true;
                break;
        default:
-               DRM_ERROR("Unsupported screen format %s\n",
-                       drm_get_format_name(target_fb->pixel_format));
+               format_name = drm_get_format_name(target_fb->pixel_format);
+               DRM_ERROR("Unsupported screen format %s\n", format_name);
+               kfree(format_name);
                return -EINVAL;
        }
 
@@ -2275,8 +2318,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
               (viewport_w << 16) | viewport_h);
 
-       /* set pageflip to happen only at start of vblank interval (front porch) */
-       WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+       /* set pageflip to happen anywhere in vblank interval */
+       WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2698,7 +2741,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
        .gamma_set = dce_v10_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
        .destroy = dce_v10_0_crtc_destroy,
-       .page_flip = amdgpu_crtc_page_flip,
+       .page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2962,10 +3005,11 @@ static int dce_v10_0_early_init(void *handle)
        dce_v10_0_set_display_funcs(adev);
        dce_v10_0_set_irq_funcs(adev);
 
+       adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
+
        switch (adev->asic_type) {
        case CHIP_FIJI:
        case CHIP_TONGA:
-               adev->mode_info.num_crtc = 6; /* XXX 7??? */
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 7;
                break;
@@ -3141,11 +3185,26 @@ static int dce_v10_0_wait_for_idle(void *handle)
        return 0;
 }
 
+static int dce_v10_0_check_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (dce_v10_0_is_display_hung(adev))
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
+       else
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
+
+       return 0;
+}
+
 static int dce_v10_0_soft_reset(void *handle)
 {
        u32 srbm_soft_reset = 0, tmp;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
+               return 0;
+
        if (dce_v10_0_is_display_hung(adev))
                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
 
@@ -3512,6 +3571,7 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = {
        .resume = dce_v10_0_resume,
        .is_idle = dce_v10_0_is_idle,
        .wait_for_idle = dce_v10_0_wait_for_idle,
+       .check_soft_reset = dce_v10_0_check_soft_reset,
        .soft_reset = dce_v10_0_soft_reset,
        .set_clockgating_state = dce_v10_0_set_clockgating_state,
        .set_powergating_state = dce_v10_0_set_powergating_state,
index 1bfa48ddd8a619f9acfe66a4174a3858e24dea10..e3dc04d293e48759e63d4ae65d27d3028fbb0ad3 100644 (file)
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
 
+void dce_v10_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
index d4bf133908b17ae2c58e5156acf2175d493f6b58..b93eba0779507d95682cd52eb1ab861588ffe20d 100644 (file)
@@ -673,6 +673,53 @@ static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
        WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
+{
+       int num_crtc = 0;
+
+       switch (adev->asic_type) {
+       case CHIP_CARRIZO:
+               num_crtc = 3;
+               break;
+       case CHIP_STONEY:
+               num_crtc = 2;
+               break;
+       case CHIP_POLARIS10:
+               num_crtc = 6;
+               break;
+       case CHIP_POLARIS11:
+               num_crtc = 5;
+               break;
+       default:
+               num_crtc = 0;
+       }
+       return num_crtc;
+}
+
+void dce_v11_0_disable_dce(struct amdgpu_device *adev)
+{
+       /*Disable VGA render and enabled crtc, if has DCE engine*/
+       if (amdgpu_atombios_has_dce_engine_info(adev)) {
+               u32 tmp;
+               int crtc_enabled, i;
+
+               dce_v11_0_set_vga_render_state(adev, false);
+
+               /*Disable crtc*/
+               for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
+                       crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+                                                                        CRTC_CONTROL, CRTC_MASTER_EN);
+                       if (crtc_enabled) {
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+                               tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+                               WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       }
+               }
+       }
+}
+
 static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
@@ -2046,6 +2093,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
        u32 tmp, viewport_w, viewport_h;
        int r;
        bool bypass_lut = false;
+       char *format_name;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -2157,8 +2205,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
                bypass_lut = true;
                break;
        default:
-               DRM_ERROR("Unsupported screen format %s\n",
-                       drm_get_format_name(target_fb->pixel_format));
+               format_name = drm_get_format_name(target_fb->pixel_format);
+               DRM_ERROR("Unsupported screen format %s\n", format_name);
+               kfree(format_name);
                return -EINVAL;
        }
 
@@ -2250,8 +2299,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
               (viewport_w << 16) | viewport_h);
 
-       /* set pageflip to happen only at start of vblank interval (front porch) */
-       WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+       /* set pageflip to happen anywhere in vblank interval */
+       WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2708,7 +2757,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
        .gamma_set = dce_v11_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
        .destroy = dce_v11_0_crtc_destroy,
-       .page_flip = amdgpu_crtc_page_flip,
+       .page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2999,24 +3048,22 @@ static int dce_v11_0_early_init(void *handle)
        dce_v11_0_set_display_funcs(adev);
        dce_v11_0_set_irq_funcs(adev);
 
+       adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
+
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
-               adev->mode_info.num_crtc = 3;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 9;
                break;
        case CHIP_STONEY:
-               adev->mode_info.num_crtc = 2;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 9;
                break;
        case CHIP_POLARIS10:
-               adev->mode_info.num_crtc = 6;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 6;
                break;
        case CHIP_POLARIS11:
-               adev->mode_info.num_crtc = 5;
                adev->mode_info.num_hpd = 5;
                adev->mode_info.num_dig = 5;
                break;
index 84e4618f5253fe747f6f186870cb742073bbf96c..1f58a65ba2ef5339368400bd3c399db0d2505282 100644 (file)
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
 
+void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
new file mode 100644 (file)
index 0000000..eb8f96a
--- /dev/null
@@ -0,0 +1,3172 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#include "si/si_reg.h"
+#include "si/sid.h"
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[6] =
+{
+       SI_CRTC0_REGISTER_OFFSET,
+       SI_CRTC1_REGISTER_OFFSET,
+       SI_CRTC2_REGISTER_OFFSET,
+       SI_CRTC3_REGISTER_OFFSET,
+       SI_CRTC4_REGISTER_OFFSET,
+       SI_CRTC5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+       SI_CRTC0_REGISTER_OFFSET,
+       SI_CRTC1_REGISTER_OFFSET,
+       SI_CRTC2_REGISTER_OFFSET,
+       SI_CRTC3_REGISTER_OFFSET,
+       SI_CRTC4_REGISTER_OFFSET,
+       SI_CRTC5_REGISTER_OFFSET,
+       (0x13830 - 0x7030) >> 2,
+};
+
+static const struct {
+       uint32_t        reg;
+       uint32_t        vblank;
+       uint32_t        vline;
+       uint32_t        hpd;
+
+} interrupt_status_offsets[6] = { {
+       .reg = DISP_INTERRUPT_STATUS,
+       .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const uint32_t hpd_int_control_offsets[6] = {
+       DC_HPD1_INT_CONTROL,
+       DC_HPD2_INT_CONTROL,
+       DC_HPD3_INT_CONTROL,
+       DC_HPD4_INT_CONTROL,
+       DC_HPD5_INT_CONTROL,
+       DC_HPD6_INT_CONTROL,
+};
+
+static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
+                                    u32 block_offset, u32 reg)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
+       return 0;
+}
+
+static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
+                                     u32 block_offset, u32 reg, u32 v)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
+}
+
+static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+       if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+               return true;
+       else
+               return false;
+}
+
+static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+       u32 pos1, pos2;
+
+       pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+       pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       if (pos1 != pos2)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v6_0_wait_for_vblank - vblank wait asic callback.
+ *
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+       unsigned i = 100;
+
+       if (crtc >= adev->mode_info.num_crtc)
+               return;
+
+       if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+               return;
+
+       /* depending on when we hit vblank, we may be close to active; if so,
+        * wait for another frame.
+        */
+       while (dce_v6_0_is_in_vblank(adev, crtc)) {
+               if (i++ == 100) {
+                       i = 0;
+                       if (!dce_v6_0_is_counter_moving(adev, crtc))
+                               break;
+               }
+       }
+
+       while (!dce_v6_0_is_in_vblank(adev, crtc)) {
+               if (i++ == 100) {
+                       i = 0;
+                       if (!dce_v6_0_is_counter_moving(adev, crtc))
+                               break;
+               }
+       }
+}
+
+static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+       if (crtc >= adev->mode_info.num_crtc)
+               return 0;
+       else
+               return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Enable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Disable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
+/**
+ * dce_v6_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v6_0_page_flip(struct amdgpu_device *adev,
+                              int crtc_id, u64 crtc_base, bool async)
+{
+       struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+       /* flip at hsync for async, default is vsync */
+       WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+              EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+       /* update the scanout addresses */
+       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(crtc_base));
+       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32)crtc_base);
+
+       /* post the write */
+       RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+}
+
+static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+                                       u32 *vbl, u32 *position)
+{
+       if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+               return -EINVAL;
+       *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+       *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       return 0;
+
+}
+
+/**
+ * dce_v6_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
+                              enum amdgpu_hpd_id hpd)
+{
+       bool connected = false;
+
+       switch (hpd) {
+       case AMDGPU_HPD_1:
+               if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_2:
+               if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_3:
+               if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_4:
+               if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_5:
+               if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_6:
+               if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       default:
+               break;
+       }
+
+       return connected;
+}
+
+/**
+ * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
+                                     enum amdgpu_hpd_id hpd)
+{
+       u32 tmp;
+       bool connected = dce_v6_0_hpd_sense(adev, hpd);
+
+       switch (hpd) {
+       case AMDGPU_HPD_1:
+               tmp = RREG32(DC_HPD1_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD1_INT_CONTROL, tmp);
+               break;
+       case AMDGPU_HPD_2:
+               tmp = RREG32(DC_HPD2_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD2_INT_CONTROL, tmp);
+               break;
+       case AMDGPU_HPD_3:
+               tmp = RREG32(DC_HPD3_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD3_INT_CONTROL, tmp);
+               break;
+       case AMDGPU_HPD_4:
+               tmp = RREG32(DC_HPD4_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD4_INT_CONTROL, tmp);
+               break;
+       case AMDGPU_HPD_5:
+               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD5_INT_CONTROL, tmp);
+                       break;
+       case AMDGPU_HPD_6:
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD6_INT_CONTROL, tmp);
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * dce_v6_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_connector *connector;
+       u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+               DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+                   connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+                       /* don't try to enable hpd on eDP or LVDS avoid breaking the
+                        * aux dp channel on imac and help (but not completely fix)
+                        * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+                        * also avoid interrupt storms during dpms.
+                        */
+                       continue;
+               }
+               switch (amdgpu_connector->hpd.hpd) {
+               case AMDGPU_HPD_1:
+                       WREG32(DC_HPD1_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_2:
+                       WREG32(DC_HPD2_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_3:
+                       WREG32(DC_HPD3_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_4:
+                       WREG32(DC_HPD4_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_5:
+                       WREG32(DC_HPD5_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_6:
+                       WREG32(DC_HPD6_CONTROL, tmp);
+                       break;
+               default:
+                       break;
+               }
+               dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+               amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+       }
+
+}
+
+/**
+ * dce_v6_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               switch (amdgpu_connector->hpd.hpd) {
+               case AMDGPU_HPD_1:
+                       WREG32(DC_HPD1_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_2:
+                       WREG32(DC_HPD2_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_3:
+                       WREG32(DC_HPD3_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_4:
+                       WREG32(DC_HPD4_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_5:
+                       WREG32(DC_HPD5_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_6:
+                       WREG32(DC_HPD6_CONTROL, 0);
+                       break;
+               default:
+                       break;
+               }
+               amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+       }
+}
+
+static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+       return SI_DC_GPIO_HPD_A;
+}
+
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+       DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
+
+       return true;
+}
+
+static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
+{
+       if (crtc >= adev->mode_info.num_crtc)
+               return 0;
+       else
+               return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
+                                   struct amdgpu_mode_mc_save *save)
+{
+       u32 crtc_enabled, tmp, frame_count;
+       int i, j;
+
+       save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+       save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+       /* disable VGA render */
+       WREG32(VGA_RENDER_CONTROL, 0);
+
+       /* blank the display controllers */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+               if (crtc_enabled) {
+                       save->crtc_enabled[i] = true;
+                       tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+
+                       if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+                               dce_v6_0_vblank_wait(adev, i);
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+                               WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       }
+                       /* wait for the next frame */
+                       frame_count = evergreen_get_vblank_counter(adev, i);
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               if (evergreen_get_vblank_counter(adev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+
+                       /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                       tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+                       tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+                       WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       save->crtc_enabled[i] = false;
+                       /* ***** */
+               } else {
+                       save->crtc_enabled[i] = false;
+               }
+       }
+}
+
+static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
+                                     struct amdgpu_mode_mc_save *save)
+{
+       u32 tmp;
+       int i, j;
+
+       /* update crtc base addresses */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+                      upper_32_bits(adev->mc.vram_start));
+               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+                      upper_32_bits(adev->mc.vram_start));
+               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)adev->mc.vram_start);
+               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)adev->mc.vram_start);
+       }
+
+       WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+       WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+
+       /* unlock regs and wait for update */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+                       if ((tmp & 0x7) != 3) {
+                               tmp &= ~0x7;
+                               tmp |= 0x3;
+                               WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+                       if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+                               tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+                               WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (tmp & 1) {
+                               tmp &= ~1;
+                               WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+                               if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+
+       /* Unlock vga access */
+       WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+       mdelay(1);
+       WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+
+}
+
+static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
+                                         bool render)
+{
+       if (!render) 
+               WREG32(R_000300_VGA_RENDER_CONTROL,
+                       RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+
+}
+
+static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
+{
+
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+       int bpc = 0;
+       u32 tmp = 0;
+       enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+       if (connector) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+               bpc = amdgpu_connector_get_monitor_bpc(connector);
+               dither = amdgpu_connector->dither;
+       }
+
+       /* LVDS FMT is set up by atom */
+       if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+               return;
+
+       if (bpc == 0)
+               return;
+
+
+       switch (bpc) {
+       case 6:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE)
+                       /* XXX sort out optimal dither settings */
+                       tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+                               FMT_SPATIAL_DITHER_EN);
+               else
+                       tmp |= FMT_TRUNCATE_EN;
+               break;
+       case 8:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE)
+                       /* XXX sort out optimal dither settings */
+                       tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+                               FMT_RGB_RANDOM_ENABLE |
+                               FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+               else
+                       tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+               break;
+       case 10:
+       default:
+               /* not needed */
+               break;
+       }
+
+       WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+       switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
+       case 0:
+       default:
+               return 1;
+       case 1:
+               return 2;
+       case 2:
+               return 4;
+       case 3:
+               return 8;
+       case 4:
+               return 3;
+       case 5:
+               return 6;
+       case 6:
+               return 10;
+       case 7:
+               return 12;
+       case 8:
+               return 16;
+       }
+}
+
+struct dce6_wm_params {
+       u32 dram_channels; /* number of dram channels */
+       u32 yclk;          /* bandwidth per dram data pin in kHz */
+       u32 sclk;          /* engine clock in kHz */
+       u32 disp_clk;      /* display clock in kHz */
+       u32 src_width;     /* viewport width */
+       u32 active_time;   /* active display time in ns */
+       u32 blank_time;    /* blank time in ns */
+       bool interlaced;    /* mode is interlaced */
+       fixed20_12 vsc;    /* vertical scale ratio */
+       u32 num_heads;     /* number of active crtcs */
+       u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+       u32 lb_size;       /* line buffer allocated to pipe */
+       u32 vtaps;         /* vertical scaler taps */
+};
+
+/**
+ * dce_v6_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate raw DRAM Bandwidth */
+       fixed20_12 dram_efficiency; /* 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       dram_efficiency.full = dfixed_const(7);
+       dram_efficiency.full = dfixed_div(dram_efficiency, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+       /* Calculate DRAM Bandwidth and the part allocated to display. */
+       fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+       disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate the display Data return Bandwidth */
+       fixed20_12 return_efficiency; /* 0.8 */
+       fixed20_12 sclk, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       sclk.full = dfixed_const(wm->sclk);
+       sclk.full = dfixed_div(sclk, a);
+       a.full = dfixed_const(10);
+       return_efficiency.full = dfixed_const(8);
+       return_efficiency.full = dfixed_div(return_efficiency, a);
+       a.full = dfixed_const(32);
+       bandwidth.full = dfixed_mul(a, sclk);
+       bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate the DMIF Request Bandwidth */
+       fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+       fixed20_12 disp_clk, bandwidth;
+       fixed20_12 a, b;
+
+       a.full = dfixed_const(1000);
+       disp_clk.full = dfixed_const(wm->disp_clk);
+       disp_clk.full = dfixed_div(disp_clk, a);
+       a.full = dfixed_const(32);
+       b.full = dfixed_mul(a, disp_clk);
+
+       a.full = dfixed_const(10);
+       disp_clk_request_efficiency.full = dfixed_const(8);
+       disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+       bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+       u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
+       u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
+       u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
+
+       return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v6_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate the display mode Average Bandwidth
+        * DisplayMode should contain the source and destination dimensions,
+        * timing, etc.
+        */
+       fixed20_12 bpp;
+       fixed20_12 line_time;
+       fixed20_12 src_width;
+       fixed20_12 bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+       line_time.full = dfixed_div(line_time, a);
+       bpp.full = dfixed_const(wm->bytes_per_pixel);
+       src_width.full = dfixed_const(wm->src_width);
+       bandwidth.full = dfixed_mul(src_width, bpp);
+       bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+       bandwidth.full = dfixed_div(bandwidth, line_time);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
+{
+       /* First calculate the latency in ns */
+       u32 mc_latency = 2000; /* 2000 ns. */
+       u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
+       u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+       u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+       u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+       u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+               (wm->num_heads * cursor_line_pair_return_time);
+       u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+       u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+       u32 tmp, dmif_size = 12288;
+       fixed20_12 a, b, c;
+
+       if (wm->num_heads == 0)
+               return 0;
+
+       a.full = dfixed_const(2);
+       b.full = dfixed_const(1);
+       if ((wm->vsc.full > a.full) ||
+           ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+           (wm->vtaps >= 5) ||
+           ((wm->vsc.full >= a.full) && wm->interlaced))
+               max_src_lines_per_dst_line = 4;
+       else
+               max_src_lines_per_dst_line = 2;
+
+       a.full = dfixed_const(available_bandwidth);
+       b.full = dfixed_const(wm->num_heads);
+       a.full = dfixed_div(a, b);
+
+       b.full = dfixed_const(mc_latency + 512);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(b, c);
+
+       c.full = dfixed_const(dmif_size);
+       b.full = dfixed_div(c, b);
+
+       tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(c, b);
+       c.full = dfixed_const(wm->bytes_per_pixel);
+       b.full = dfixed_mul(b, c);
+
+       lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+       a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(lb_fill_bw);
+       b.full = dfixed_div(c, b);
+       a.full = dfixed_div(a, b);
+       line_fill_time = dfixed_trunc(a);
+
+       if (line_fill_time < wm->active_time)
+               return latency;
+       else
+               return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+       if (dce_v6_0_average_bandwidth(wm) <=
+           (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+       if (dce_v6_0_average_bandwidth(wm) <=
+           (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v6_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
+{
+       u32 lb_partitions = wm->lb_size / wm->src_width;
+       u32 line_time = wm->active_time + wm->blank_time;
+       u32 latency_tolerant_lines;
+       u32 latency_hiding;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1);
+       if (wm->vsc.full > a.full)
+               latency_tolerant_lines = 1;
+       else {
+               if (lb_partitions <= (wm->vtaps + 1))
+                       latency_tolerant_lines = 1;
+               else
+                       latency_tolerant_lines = 2;
+       }
+
+       latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+       if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v6_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
+                                       struct amdgpu_crtc *amdgpu_crtc,
+                                       u32 lb_size, u32 num_heads)
+{
+       struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+       struct dce6_wm_params wm_low, wm_high;
+       u32 dram_channels;
+       u32 pixel_period;
+       u32 line_time = 0;
+       u32 latency_watermark_a = 0, latency_watermark_b = 0;
+       u32 priority_a_mark = 0, priority_b_mark = 0;
+       u32 priority_a_cnt = PRIORITY_OFF;
+       u32 priority_b_cnt = PRIORITY_OFF;
+       u32 tmp, arb_control3;
+       fixed20_12 a, b, c;
+
+       if (amdgpu_crtc->base.enabled && num_heads && mode) {
+               pixel_period = 1000000 / (u32)mode->clock;
+               line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+               priority_a_cnt = 0;
+               priority_b_cnt = 0;
+
+               dram_channels = si_get_number_of_dram_channels(adev);
+
+               /* watermark for high clocks */
+               if (adev->pm.dpm_enabled) {
+                       wm_high.yclk =
+                               amdgpu_dpm_get_mclk(adev, false) * 10;
+                       wm_high.sclk =
+                               amdgpu_dpm_get_sclk(adev, false) * 10;
+               } else {
+                       wm_high.yclk = adev->pm.current_mclk * 10;
+                       wm_high.sclk = adev->pm.current_sclk * 10;
+               }
+
+               wm_high.disp_clk = mode->clock;
+               wm_high.src_width = mode->crtc_hdisplay;
+               wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+               wm_high.blank_time = line_time - wm_high.active_time;
+               wm_high.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm_high.interlaced = true;
+               wm_high.vsc = amdgpu_crtc->vsc;
+               wm_high.vtaps = 1;
+               if (amdgpu_crtc->rmx_type != RMX_OFF)
+                       wm_high.vtaps = 2;
+               wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm_high.lb_size = lb_size;
+               wm_high.dram_channels = dram_channels;
+               wm_high.num_heads = num_heads;
+
+               if (adev->pm.dpm_enabled) {
+               /* watermark for low clocks */
+                       wm_low.yclk =
+                               amdgpu_dpm_get_mclk(adev, true) * 10;
+                       wm_low.sclk =
+                               amdgpu_dpm_get_sclk(adev, true) * 10;
+               } else {
+                       wm_low.yclk = adev->pm.current_mclk * 10;
+                       wm_low.sclk = adev->pm.current_sclk * 10;
+               }
+
+               wm_low.disp_clk = mode->clock;
+               wm_low.src_width = mode->crtc_hdisplay;
+               wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+               wm_low.blank_time = line_time - wm_low.active_time;
+               wm_low.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm_low.interlaced = true;
+               wm_low.vsc = amdgpu_crtc->vsc;
+               wm_low.vtaps = 1;
+               if (amdgpu_crtc->rmx_type != RMX_OFF)
+                       wm_low.vtaps = 2;
+               wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm_low.lb_size = lb_size;
+               wm_low.dram_channels = dram_channels;
+               wm_low.num_heads = num_heads;
+
+               /* set for high clocks */
+               latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
+               /* set for low clocks */
+               latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
+
+               /* possibly force display priority to high */
+               /* should really do this at mode validation time... */
+               if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+                   !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+                   !dce_v6_0_check_latency_hiding(&wm_high) ||
+                   (adev->mode_info.disp_priority == 2)) {
+                       DRM_DEBUG_KMS("force priority to high\n");
+                       priority_a_cnt |= PRIORITY_ALWAYS_ON;
+                       priority_b_cnt |= PRIORITY_ALWAYS_ON;
+               }
+               if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+                   !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+                   !dce_v6_0_check_latency_hiding(&wm_low) ||
+                   (adev->mode_info.disp_priority == 2)) {
+                       DRM_DEBUG_KMS("force priority to high\n");
+                       priority_a_cnt |= PRIORITY_ALWAYS_ON;
+                       priority_b_cnt |= PRIORITY_ALWAYS_ON;
+               }
+
+               a.full = dfixed_const(1000);
+               b.full = dfixed_const(mode->clock);
+               b.full = dfixed_div(b, a);
+               c.full = dfixed_const(latency_watermark_a);
+               c.full = dfixed_mul(c, b);
+               c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+               c.full = dfixed_div(c, a);
+               a.full = dfixed_const(16);
+               c.full = dfixed_div(c, a);
+               priority_a_mark = dfixed_trunc(c);
+               priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+               a.full = dfixed_const(1000);
+               b.full = dfixed_const(mode->clock);
+               b.full = dfixed_div(b, a);
+               c.full = dfixed_const(latency_watermark_b);
+               c.full = dfixed_mul(c, b);
+               c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+               c.full = dfixed_div(c, a);
+               a.full = dfixed_const(16);
+               c.full = dfixed_div(c, a);
+               priority_b_mark = dfixed_trunc(c);
+               priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+       }
+
+       /* select wm A */
+       arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+       tmp = arb_control3;
+       tmp &= ~LATENCY_WATERMARK_MASK(3);
+       tmp |= LATENCY_WATERMARK_MASK(1);
+       WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+              (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+               LATENCY_HIGH_WATERMARK(line_time)));
+       /* select wm B */
+       tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+       tmp &= ~LATENCY_WATERMARK_MASK(3);
+       tmp |= LATENCY_WATERMARK_MASK(2);
+       WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+              (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+               LATENCY_HIGH_WATERMARK(line_time)));
+       /* restore original selection */
+       WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+
+       /* write the priority marks */
+       WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+       WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+
+       /* save values for DPM */
+       amdgpu_crtc->line_time = line_time;
+       amdgpu_crtc->wm_high = latency_watermark_a;
+}
+
+/* watermark setup */
+static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
+                                  struct amdgpu_crtc *amdgpu_crtc,
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *other_mode)
+{
+       u32 tmp, buffer_alloc, i;
+       u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
+       /*
+        * Line Buffer Setup
+        * There are 3 line buffers, each one shared by 2 display controllers.
+        * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+        * the display controllers.  The paritioning is done via one of four
+        * preset allocations specified in bits 21:20:
+        *  0 - half lb
+        *  2 - whole lb, other crtc must be disabled
+        */
+       /* this can get tricky if we have two large displays on a paired group
+        * of crtcs.  Ideally for multiple large displays we'd assign them to
+        * non-linked crtcs for maximum line buffer allocation.
+        */
+       if (amdgpu_crtc->base.enabled && mode) {
+               if (other_mode) {
+                       tmp = 0; /* 1/2 */
+                       buffer_alloc = 1;
+               } else {
+                       tmp = 2; /* whole */
+                       buffer_alloc = 2;
+               }
+       } else {
+               tmp = 0;
+               buffer_alloc = 0;
+       }
+
+       WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+              DC_LB_MEMORY_CONFIG(tmp));
+
+       WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+              DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+                   DMIF_BUFFERS_ALLOCATED_COMPLETED)
+                       break;
+               udelay(1);
+       }
+
+       if (amdgpu_crtc->base.enabled && mode) {
+               switch (tmp) {
+               case 0:
+               default:
+                       return 4096 * 2;
+               case 2:
+                       return 8192 * 2;
+               }
+       }
+
+       /* controller not enabled, so no lb used */
+       return 0;
+}
+
+
+/**
+ *
+ * dce_v6_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
+{
+       struct drm_display_mode *mode0 = NULL;
+       struct drm_display_mode *mode1 = NULL;
+       u32 num_heads = 0, lb_size;
+       int i;
+
+       if (!adev->mode_info.mode_config_initialized)
+               return;
+
+       amdgpu_update_display_priority(adev);
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->mode_info.crtcs[i]->base.enabled)
+                       num_heads++;
+       }
+       for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
+               mode0 = &adev->mode_info.crtcs[i]->base.mode;
+               mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
+               lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
+               dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
+               lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
+               dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
+       }
+}
+/*
+static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+       int i;
+       u32 offset, tmp;
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               offset = adev->mode_info.audio.pin[i].offset;
+               tmp = RREG32_AUDIO_ENDPT(offset,
+                                     AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+               if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+                       adev->mode_info.audio.pin[i].connected = false;
+               else
+                       adev->mode_info.audio.pin[i].connected = true;
+       }
+
+}
+
+static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
+{
+       int i;
+
+       dce_v6_0_audio_get_connected_pins(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               if (adev->mode_info.audio.pin[i].connected)
+                       return &adev->mode_info.audio.pin[i];
+       }
+       DRM_ERROR("No connected audio pins found!\n");
+       return NULL;
+}
+
+static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       u32 offset;
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       offset = dig->afmt->offset;
+
+       WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+              AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+
+}
+
+static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
+                                               struct drm_display_mode *mode)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
+
+}
+*/
+static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
+                                 struct amdgpu_audio_pin *pin,
+                                 bool enable)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
+}
+
+static const u32 pin_offsets[7] =
+{
+       (0x1780 - 0x1780),
+       (0x1786 - 0x1780),
+       (0x178c - 0x1780),
+       (0x1792 - 0x1780),
+       (0x1798 - 0x1780),
+       (0x179d - 0x1780),
+       (0x17a4 - 0x1780),
+};
+
+static int dce_v6_0_audio_init(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
+{
+
+}
+
+/*
+static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+       DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
+}
+*/
+/*
+ * build a HDMI Video Info Frame
+ */
+/*
+static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+                                              void *buffer, size_t size)
+{
+       DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
+}
+*/
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode)
+{
+       DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
+}
+
+static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* Silent, r600_hdmi_enable will raise WARN for us */
+       if (enable && dig->afmt->enabled)
+               return;
+       if (!enable && !dig->afmt->enabled)
+               return;
+
+       if (!enable && dig->afmt->pin) {
+               dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
+               dig->afmt->pin = NULL;
+       }
+
+       dig->afmt->enabled = enable;
+
+       DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+                 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
+{
+       int i, j;
+
+       for (i = 0; i < adev->mode_info.num_dig; i++)
+               adev->mode_info.afmt[i] = NULL;
+
+       /* DCE6 has audio blocks tied to DIG encoders */
+       for (i = 0; i < adev->mode_info.num_dig; i++) {
+               adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+               if (adev->mode_info.afmt[i]) {
+                       adev->mode_info.afmt[i]->offset = dig_offsets[i];
+                       adev->mode_info.afmt[i]->id = i;
+               } else {
+                       for (j = 0; j < i; j++) {
+                               kfree(adev->mode_info.afmt[j]);
+                               adev->mode_info.afmt[j] = NULL;
+                       }
+                       DRM_ERROR("Out of memory allocating afmt table\n");
+                       return -ENOMEM;
+               }
+       }
+       return 0;
+}
+
+static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->mode_info.num_dig; i++) {
+               kfree(adev->mode_info.afmt[i]);
+               adev->mode_info.afmt[i] = NULL;
+       }
+}
+
+static const u32 vga_control_regs[6] =
+{
+       AVIVO_D1VGA_CONTROL,
+       AVIVO_D2VGA_CONTROL,
+       EVERGREEN_D3VGA_CONTROL,
+       EVERGREEN_D4VGA_CONTROL,
+       EVERGREEN_D5VGA_CONTROL,
+       EVERGREEN_D6VGA_CONTROL,
+};
+
+static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       u32 vga_control;
+
+       vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+       if (enable)
+               WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
+       else
+               WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
+}
+
+static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       if (enable)
+               WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
+       else
+               WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
+}
+
+static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
+                                    struct drm_framebuffer *fb,
+                                    int x, int y, int atomic)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_framebuffer *amdgpu_fb;
+       struct drm_framebuffer *target_fb;
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *rbo;
+       uint64_t fb_location, tiling_flags;
+       uint32_t fb_format, fb_pitch_pixels, pipe_config;
+       u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+       u32 viewport_w, viewport_h;
+       int r;
+       bool bypass_lut = false;
+
+       /* no fb bound */
+       if (!atomic && !crtc->primary->fb) {
+               DRM_DEBUG_KMS("No FB bound\n");
+               return 0;
+       }
+
+       if (atomic) {
+               amdgpu_fb = to_amdgpu_framebuffer(fb);
+               target_fb = fb;
+       }
+       else {
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               target_fb = crtc->primary->fb;
+       }
+
+       /* If atomic, assume fb object is pinned & idle & fenced and
+        * just update base pointers
+        */
+       obj = amdgpu_fb->obj;
+       rbo = gem_to_amdgpu_bo(obj);
+       r = amdgpu_bo_reserve(rbo, false);
+       if (unlikely(r != 0))
+               return r;
+
+       if (atomic)
+               fb_location = amdgpu_bo_gpu_offset(rbo);
+       else {
+               r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+               if (unlikely(r != 0)) {
+                       amdgpu_bo_unreserve(rbo);
+                       return -EINVAL;
+               }
+       }
+
+       amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+       amdgpu_bo_unreserve(rbo);
+
+       switch (target_fb->pixel_format) {
+       case DRM_FORMAT_C8:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+               break;
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_ARGB4444:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_BGRA5551:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_RGB565:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_BGRA1010102:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       default:
+               DRM_ERROR("Unsupported screen format %s\n",
+                         drm_get_format_name(target_fb->pixel_format));
+               return -EINVAL;
+       }
+
+       if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
+               unsigned bankw, bankh, mtaspect, tile_split, num_banks;
+
+               bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+               bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+               mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+               tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+               num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+               fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+               fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+               fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+               fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+               fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+               fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+       } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1)
+               fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
+       pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+       fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+
+       dce_v6_0_vga_enable(crtc, false);
+
+       /* Make sure surface address is updated at vertical blank rather than
+        * horizontal blank
+        */
+       WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(fb_location));
+       WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(fb_location));
+       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+       WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+       WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+       WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+
+       /*
+        * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+        * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+        * retain the full precision throughout the pipeline.
+        */
+       WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
+                (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+                ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+       if (bypass_lut)
+               DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+       WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+       WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+       fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+       WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+       dce_v6_0_grph_enable(crtc, true);
+
+       WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+                      target_fb->height);
+       x &= ~3;
+       y &= ~1;
+       WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+              (x << 16) | y);
+       viewport_w = crtc->mode.hdisplay;
+       viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+
+       WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+              (viewport_w << 16) | viewport_h);
+
+       /* set pageflip to happen anywhere in vblank interval */
+       WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
+
+       if (!atomic && fb && fb != crtc->primary->fb) {
+               amdgpu_fb = to_amdgpu_framebuffer(fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r != 0))
+                       return r;
+               amdgpu_bo_unpin(rbo);
+               amdgpu_bo_unreserve(rbo);
+       }
+
+       /* Bytes per pixel may have changed */
+       dce_v6_0_bandwidth_update(adev);
+
+       return 0;
+
+}
+
+static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
+                                   struct drm_display_mode *mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
+                      EVERGREEN_INTERLEAVE_EN);
+       else
+               WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       int i;
+
+       DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+       WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+               NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+       WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+              NI_GRPH_PRESCALE_BYPASS);
+       WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+              NI_OVL_PRESCALE_BYPASS);
+       WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+               NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+
+
+       WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+       WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+       WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+       for (i = 0; i < 256; i++) {
+               WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+                      (amdgpu_crtc->lut_r[i] << 20) |
+                      (amdgpu_crtc->lut_g[i] << 10) |
+                      (amdgpu_crtc->lut_b[i] << 0));
+       }
+
+       WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+               NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+               NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+               NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+       WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+               NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+       WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+               NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+       WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_OUTPUT_CSC_GRPH_MODE(0) |
+               NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+       /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+       WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
+
+
+}
+
+static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+       switch (amdgpu_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+               if (dig->linkb)
+                       return 1;
+               else
+                       return 0;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+               if (dig->linkb)
+                       return 3;
+               else
+                       return 2;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               if (dig->linkb)
+                       return 5;
+               else
+                       return 4;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               return 6;
+               break;
+       default:
+               DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+               return 0;
+       }
+}
+
+/**
+ * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ *
+ */
+static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       u32 pll_in_use;
+       int pll;
+
+       if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+               if (adev->clock.dp_extclk)
+                       /* skip PPLL programming if using ext clock */
+                       return ATOM_PPLL_INVALID;
+               else
+                       return ATOM_PPLL0;
+       } else {
+               /* use the same PPLL for all monitors with the same clock */
+               pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+               if (pll != ATOM_PPLL_INVALID)
+                       return pll;
+       }
+
+       /*  PPLL1, and PPLL2 */
+       pll_in_use = amdgpu_pll_get_use_mask(crtc);
+       if (!(pll_in_use & (1 << ATOM_PPLL2)))
+               return ATOM_PPLL2;
+       if (!(pll_in_use & (1 << ATOM_PPLL1)))
+               return ATOM_PPLL1;
+       DRM_ERROR("unable to allocate a PPLL\n");
+       return ATOM_PPLL_INVALID;
+}
+
+static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       uint32_t cur_lock;
+
+       cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+       if (lock)
+               cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+       else
+               cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+       WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+
+       WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+                  EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+                  EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+
+}
+
+static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+
+       WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(amdgpu_crtc->cursor_addr));
+       WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(amdgpu_crtc->cursor_addr));
+
+       WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+                  EVERGREEN_CURSOR_EN |
+                  EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+                  EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+}
+
+static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
+                                      int x, int y)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       int xorigin = 0, yorigin = 0;
+
+       int w = amdgpu_crtc->cursor_width;
+
+       /* avivo cursor are offset into the total surface */
+       x += crtc->x;
+       y += crtc->y;
+       DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+       if (x < 0) {
+               xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+               x = 0;
+       }
+       if (y < 0) {
+               yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+               y = 0;
+       }
+
+       WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+       WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+
+       amdgpu_crtc->cursor_x = x;
+       amdgpu_crtc->cursor_y = y;
+       return 0;
+}
+
+static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                    int x, int y)
+{
+       int ret;
+
+       dce_v6_0_lock_cursor(crtc, true);
+       ret = dce_v6_0_cursor_move_locked(crtc, x, y);
+       dce_v6_0_lock_cursor(crtc, false);
+
+       return ret;
+}
+
+static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
+                                    struct drm_file *file_priv,
+                                    uint32_t handle,
+                                    uint32_t width,
+                                    uint32_t height,
+                                    int32_t hot_x,
+                                    int32_t hot_y)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *aobj;
+       int ret;
+
+       if (!handle) {
+               /* turn off cursor */
+               dce_v6_0_hide_cursor(crtc);
+               obj = NULL;
+               goto unpin;
+       }
+
+       if ((width > amdgpu_crtc->max_cursor_width) ||
+           (height > amdgpu_crtc->max_cursor_height)) {
+               DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+               return -ENOENT;
+       }
+
+       aobj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(aobj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       amdgpu_bo_unreserve(aobj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       amdgpu_crtc->cursor_width = width;
+       amdgpu_crtc->cursor_height = height;
+
+       dce_v6_0_lock_cursor(crtc, true);
+
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+           hot_y != amdgpu_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+               y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+               dce_v6_0_cursor_move_locked(crtc, x, y);
+
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
+       }
+
+       dce_v6_0_show_cursor(crtc);
+       dce_v6_0_lock_cursor(crtc, false);
+
+unpin:
+       if (amdgpu_crtc->cursor_bo) {
+               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(aobj, false);
+               if (likely(ret == 0)) {
+                       amdgpu_bo_unpin(aobj);
+                       amdgpu_bo_unreserve(aobj);
+               }
+               drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+       }
+
+       amdgpu_crtc->cursor_bo = obj;
+       return 0;
+}
+
+static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (amdgpu_crtc->cursor_bo) {
+               dce_v6_0_lock_cursor(crtc, true);
+
+               dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+                                           amdgpu_crtc->cursor_y);
+
+               dce_v6_0_show_cursor(crtc);
+
+               dce_v6_0_lock_cursor(crtc, false);
+       }
+}
+
+static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                  u16 *blue, uint32_t size)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       int i;
+
+       /* userspace palettes are always correct as is */
+       for (i = 0; i < size; i++) {
+               amdgpu_crtc->lut_r[i] = red[i] >> 6;
+               amdgpu_crtc->lut_g[i] = green[i] >> 6;
+               amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+       }
+       dce_v6_0_crtc_load_lut(crtc);
+
+       return 0;
+}
+
+static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
+       .cursor_set2 = dce_v6_0_crtc_cursor_set2,
+       .cursor_move = dce_v6_0_crtc_cursor_move,
+       .gamma_set = dce_v6_0_crtc_gamma_set,
+       .set_config = amdgpu_crtc_set_config,
+       .destroy = dce_v6_0_crtc_destroy,
+       .page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               amdgpu_crtc->enabled = true;
+               amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+               amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+               drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+               dce_v6_0_crtc_load_lut(crtc);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+               if (amdgpu_crtc->enabled)
+                       amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+               amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+               amdgpu_crtc->enabled = false;
+               break;
+       }
+       /* adjust pm to dpms */
+       amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
+{
+       /* disable crtc pair power gating before programming */
+       amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+       amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+       dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
+{
+       dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+       amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
+{
+
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_atom_ss ss;
+       int i;
+
+       dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       if (crtc->primary->fb) {
+               int r;
+               struct amdgpu_framebuffer *amdgpu_fb;
+               struct amdgpu_bo *rbo;
+
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r))
+                       DRM_ERROR("failed to reserve rbo before unpin\n");
+               else {
+                       amdgpu_bo_unpin(rbo);
+                       amdgpu_bo_unreserve(rbo);
+               }
+       }
+       /* disable the GRPH */
+       dce_v6_0_grph_enable(crtc, false);
+
+       amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->mode_info.crtcs[i] &&
+                   adev->mode_info.crtcs[i]->enabled &&
+                   i != amdgpu_crtc->crtc_id &&
+                   amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+                       /* one other crtc is using this pll don't turn
+                        * off the pll
+                        */
+                       goto done;
+               }
+       }
+
+       switch (amdgpu_crtc->pll_id) {
+       case ATOM_PPLL1:
+       case ATOM_PPLL2:
+               /* disable the ppll */
+               amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+                                                0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+               break;
+       default:
+               break;
+       }
+done:
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->adjusted_clock = 0;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode,
+                                 int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (!amdgpu_crtc->adjusted_clock)
+               return -EINVAL;
+
+       amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+       amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+       dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+       amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+       amdgpu_atombios_crtc_scaler_setup(crtc);
+       dce_v6_0_cursor_reset(crtc);
+       /* update the hw version fpr dpm */
+       amdgpu_crtc->hw_mode = *adjusted_mode;
+
+       return 0;
+}
+
+static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
+                                    const struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_encoder *encoder;
+
+       /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc == crtc) {
+                       amdgpu_crtc->encoder = encoder;
+                       amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+                       break;
+               }
+       }
+       if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+               amdgpu_crtc->encoder = NULL;
+               amdgpu_crtc->connector = NULL;
+               return false;
+       }
+       if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+               return false;
+       if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+               return false;
+       /* pick pll */
+       amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
+       /* if we can't get a PPLL for a non-DP encoder, fail */
+       if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+           !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+               return false;
+
+       return true;
+}
+
+static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                                 struct drm_framebuffer *old_fb)
+{
+       return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y, enum mode_set_atomic state)
+{
+       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
+       .dpms = dce_v6_0_crtc_dpms,
+       .mode_fixup = dce_v6_0_crtc_mode_fixup,
+       .mode_set = dce_v6_0_crtc_mode_set,
+       .mode_set_base = dce_v6_0_crtc_set_base,
+       .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
+       .prepare = dce_v6_0_crtc_prepare,
+       .commit = dce_v6_0_crtc_commit,
+       .load_lut = dce_v6_0_crtc_load_lut,
+       .disable = dce_v6_0_crtc_disable,
+};
+
+static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+       struct amdgpu_crtc *amdgpu_crtc;
+       int i;
+
+       amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+                             (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+       if (amdgpu_crtc == NULL)
+               return -ENOMEM;
+
+       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+       amdgpu_crtc->crtc_id = index;
+       adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+       amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
+       amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
+       adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+       adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+       for (i = 0; i < 256; i++) {
+               amdgpu_crtc->lut_r[i] = i << 2;
+               amdgpu_crtc->lut_g[i] = i << 2;
+               amdgpu_crtc->lut_b[i] = i << 2;
+       }
+
+       amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
+
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->adjusted_clock = 0;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+       drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+
+       return 0;
+}
+
+static int dce_v6_0_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
+       adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
+
+       dce_v6_0_set_display_funcs(adev);
+       dce_v6_0_set_irq_funcs(adev);
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+       case CHIP_VERDE:
+               adev->mode_info.num_crtc = 6;
+               adev->mode_info.num_hpd = 6;
+               adev->mode_info.num_dig = 6;
+               break;
+       case CHIP_OLAND:
+               adev->mode_info.num_crtc = 2;
+               adev->mode_info.num_hpd = 2;
+               adev->mode_info.num_dig = 2;
+               break;
+       default:
+               /* FIXME: not supported yet */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int dce_v6_0_sw_init(void *handle)
+{
+       int r, i;
+       bool ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+               if (r)
+                       return r;
+       }
+
+       for (i = 8; i < 20; i += 2) {
+               r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+               if (r)
+                       return r;
+       }
+
+       /* HPD hotplug */
+       r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+       if (r)
+               return r;
+
+       adev->mode_info.mode_config_initialized = true;
+
+       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+       adev->ddev->mode_config.async_page_flip = true;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       adev->ddev->mode_config.preferred_depth = 24;
+       adev->ddev->mode_config.prefer_shadow = 1;
+
+       adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+       r = amdgpu_modeset_create_props(adev);
+       if (r)
+               return r;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       /* allocate crtcs */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = dce_v6_0_crtc_init(adev, i);
+               if (r)
+                       return r;
+       }
+
+       ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
+       if (ret)
+               amdgpu_print_display_setup(adev->ddev);
+       else
+               return -EINVAL;
+
+       /* setup afmt */
+       r = dce_v6_0_afmt_init(adev);
+       if (r)
+               return r;
+
+       r = dce_v6_0_audio_init(adev);
+       if (r)
+               return r;
+
+       drm_kms_helper_poll_init(adev->ddev);
+
+       return r;
+}
+
+static int dce_v6_0_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       kfree(adev->mode_info.bios_hardcoded_edid);
+
+       drm_kms_helper_poll_fini(adev->ddev);
+
+       dce_v6_0_audio_fini(adev);
+
+       dce_v6_0_afmt_fini(adev);
+
+       drm_mode_config_cleanup(adev->ddev);
+       adev->mode_info.mode_config_initialized = false;
+
+       return 0;
+}
+
+static int dce_v6_0_hw_init(void *handle)
+{
+       int i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       /* init dig PHYs, disp eng pll */
+       amdgpu_atombios_encoder_init_dig(adev);
+       amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+       /* initialize hpd */
+       dce_v6_0_hpd_init(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       dce_v6_0_pageflip_interrupt_init(adev);
+
+       return 0;
+}
+
+static int dce_v6_0_hw_fini(void *handle)
+{
+       int i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       dce_v6_0_hpd_fini(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       dce_v6_0_pageflip_interrupt_fini(adev);
+
+       return 0;
+}
+
+static int dce_v6_0_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_atombios_scratch_regs_save(adev);
+
+       return dce_v6_0_hw_fini(handle);
+}
+
+static int dce_v6_0_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int ret;
+
+       ret = dce_v6_0_hw_init(handle);
+
+       amdgpu_atombios_scratch_regs_restore(adev);
+
+       /* turn on the BL */
+       if (adev->mode_info.bl_encoder) {
+               u8 bl_level = amdgpu_display_backlight_get_level(adev,
+                                                                 adev->mode_info.bl_encoder);
+               amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+                                                   bl_level);
+       }
+
+       return ret;
+}
+
+static bool dce_v6_0_is_idle(void *handle)
+{
+       return true;
+}
+
+static int dce_v6_0_wait_for_idle(void *handle)
+{
+       return 0;
+}
+
+static int dce_v6_0_soft_reset(void *handle)
+{
+       DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+       return 0;
+}
+
+static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+                                                    int crtc,
+                                                    enum amdgpu_interrupt_state state)
+{
+       u32 reg_block, interrupt_mask;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       switch (crtc) {
+       case 0:
+               reg_block = SI_CRTC0_REGISTER_OFFSET;
+               break;
+       case 1:
+               reg_block = SI_CRTC1_REGISTER_OFFSET;
+               break;
+       case 2:
+               reg_block = SI_CRTC2_REGISTER_OFFSET;
+               break;
+       case 3:
+               reg_block = SI_CRTC3_REGISTER_OFFSET;
+               break;
+       case 4:
+               reg_block = SI_CRTC4_REGISTER_OFFSET;
+               break;
+       case 5:
+               reg_block = SI_CRTC5_REGISTER_OFFSET;
+               break;
+       default:
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               interrupt_mask = RREG32(INT_MASK + reg_block);
+               interrupt_mask &= ~VBLANK_INT_MASK;
+               WREG32(INT_MASK + reg_block, interrupt_mask);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               interrupt_mask = RREG32(INT_MASK + reg_block);
+               interrupt_mask |= VBLANK_INT_MASK;
+               WREG32(INT_MASK + reg_block, interrupt_mask);
+               break;
+       default:
+               break;
+       }
+}
+
+static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+                                                   int crtc,
+                                                   enum amdgpu_interrupt_state state)
+{
+
+}
+
+static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+                                           struct amdgpu_irq_src *src,
+                                           unsigned type,
+                                           enum amdgpu_interrupt_state state)
+{
+       u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+       switch (type) {
+       case AMDGPU_HPD_1:
+               dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_2:
+               dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_3:
+               dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_4:
+               dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_5:
+               dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_6:
+               dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+               break;
+       default:
+               DRM_DEBUG("invalid hdp %d\n", type);
+               return 0;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+               dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+               WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+               dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+               WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+                                            struct amdgpu_irq_src *src,
+                                            unsigned type,
+                                            enum amdgpu_interrupt_state state)
+{
+       switch (type) {
+       case AMDGPU_CRTC_IRQ_VBLANK1:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK2:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK3:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK4:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK5:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK6:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE1:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE2:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE3:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE4:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE5:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE6:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
+                            struct amdgpu_irq_src *source,
+                            struct amdgpu_iv_entry *entry)
+{
+       unsigned crtc = entry->src_id - 1;
+       uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+       unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+       switch (entry->src_data) {
+       case 0: /* vblank */
+               if (disp_int & interrupt_status_offsets[crtc].vblank)
+                       WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               if (amdgpu_irq_enabled(adev, source, irq_type)) {
+                       drm_handle_vblank(adev->ddev, crtc);
+               }
+               DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+               break;
+       case 1: /* vline */
+               if (disp_int & interrupt_status_offsets[crtc].vline)
+                       WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+               break;
+       default:
+               DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+               break;
+       }
+
+       return 0;
+}
+
+static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+                                                struct amdgpu_irq_src *src,
+                                                unsigned type,
+                                                enum amdgpu_interrupt_state state)
+{
+       u32 reg;
+
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
+       }
+
+       reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+       if (state == AMDGPU_IRQ_STATE_DISABLE)
+               WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+                      reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+       else
+               WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+                      reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+       return 0;
+}
+
+static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
+                                struct amdgpu_irq_src *source,
+                                struct amdgpu_iv_entry *entry)
+{
+               unsigned long flags;
+       unsigned crtc_id;
+       struct amdgpu_crtc *amdgpu_crtc;
+       struct amdgpu_flip_work *works;
+
+       crtc_id = (entry->src_id - 8) >> 1;
+       amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
+       }
+
+       if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+           GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+                      GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+       /* IRQ could occur when in initial stage */
+       if (amdgpu_crtc == NULL)
+               return 0;
+
+       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       works = amdgpu_crtc->pflip_works;
+       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+               DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+                                               "AMDGPU_FLIP_SUBMITTED(%d)\n",
+                                               amdgpu_crtc->pflip_status,
+                                               AMDGPU_FLIP_SUBMITTED);
+               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               return 0;
+       }
+
+       /* page flip completed. clean up */
+       amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+       amdgpu_crtc->pflip_works = NULL;
+
+       /* wakeup usersapce */
+       if (works->event)
+               drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+       drm_crtc_vblank_put(&amdgpu_crtc->base);
+       schedule_work(&works->unpin_work);
+
+       return 0;
+}
+
+static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
+                           struct amdgpu_irq_src *source,
+                           struct amdgpu_iv_entry *entry)
+{
+       uint32_t disp_int, mask, int_control, tmp;
+       unsigned hpd;
+
+       if (entry->src_data >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+               return 0;
+       }
+
+       hpd = entry->src_data;
+       disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+       mask = interrupt_status_offsets[hpd].hpd;
+       int_control = hpd_int_control_offsets[hpd];
+
+       if (disp_int & mask) {
+               tmp = RREG32(int_control);
+               tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+               WREG32(int_control, tmp);
+               schedule_work(&adev->hotplug_work);
+               DRM_INFO("IH: HPD%d\n", hpd + 1);
+       }
+
+       return 0;
+
+}
+
+static int dce_v6_0_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int dce_v6_0_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+       .name = "dce_v6_0",
+       .early_init = dce_v6_0_early_init,
+       .late_init = NULL,
+       .sw_init = dce_v6_0_sw_init,
+       .sw_fini = dce_v6_0_sw_fini,
+       .hw_init = dce_v6_0_hw_init,
+       .hw_fini = dce_v6_0_hw_fini,
+       .suspend = dce_v6_0_suspend,
+       .resume = dce_v6_0_resume,
+       .is_idle = dce_v6_0_is_idle,
+       .wait_for_idle = dce_v6_0_wait_for_idle,
+       .soft_reset = dce_v6_0_soft_reset,
+       .set_clockgating_state = dce_v6_0_set_clockgating_state,
+       .set_powergating_state = dce_v6_0_set_powergating_state,
+};
+
+static void
+dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+                         struct drm_display_mode *mode,
+                         struct drm_display_mode *adjusted_mode)
+{
+
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+       /* need to call this here rather than in prepare() since we need some crtc info */
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       /* set scaler clears this on some chips */
+       dce_v6_0_set_interleave(encoder->crtc, mode);
+
+       if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+               dce_v6_0_afmt_enable(encoder, true);
+               dce_v6_0_afmt_setmode(encoder, adjusted_mode);
+       }
+}
+
+static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
+{
+
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+       if ((amdgpu_encoder->active_device &
+            (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+           (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+            ENCODER_OBJECT_ID_NONE)) {
+               struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+               if (dig) {
+                       dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
+                       if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+                               dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+               }
+       }
+
+       amdgpu_atombios_scratch_regs_lock(adev, true);
+
+       if (connector) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               /* select the clock/data port if it uses a router */
+               if (amdgpu_connector->router.cd_valid)
+                       amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+               /* turn eDP panel on for mode set */
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+                       amdgpu_atombios_encoder_set_edp_panel_power(connector,
+                                                            ATOM_TRANSMITTER_ACTION_POWER_ON);
+       }
+
+       /* this is needed for the pll/ss setup to work correctly in some cases */
+       amdgpu_atombios_encoder_set_crtc_source(encoder);
+       /* set up the FMT blocks */
+       dce_v6_0_program_fmt(encoder);
+}
+
+static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
+{
+
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       /* need to call this here as we need the crtc set up */
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+       amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
+{
+
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig;
+
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       if (amdgpu_atombios_encoder_is_digital(encoder)) {
+               if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+                       dce_v6_0_afmt_enable(encoder, false);
+               dig = amdgpu_encoder->enc_priv;
+               dig->dig_encoder = -1;
+       }
+       amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+                     struct drm_display_mode *mode,
+                     struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
+                                   const struct drm_display_mode *mode,
+                                   struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
+       .dpms = dce_v6_0_ext_dpms,
+       .mode_fixup = dce_v6_0_ext_mode_fixup,
+       .prepare = dce_v6_0_ext_prepare,
+       .mode_set = dce_v6_0_ext_mode_set,
+       .commit = dce_v6_0_ext_commit,
+       .disable = dce_v6_0_ext_disable,
+       /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
+       .dpms = amdgpu_atombios_encoder_dpms,
+       .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+       .prepare = dce_v6_0_encoder_prepare,
+       .mode_set = dce_v6_0_encoder_mode_set,
+       .commit = dce_v6_0_encoder_commit,
+       .disable = dce_v6_0_encoder_disable,
+       .detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
+       .dpms = amdgpu_atombios_encoder_dpms,
+       .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+       .prepare = dce_v6_0_encoder_prepare,
+       .mode_set = dce_v6_0_encoder_mode_set,
+       .commit = dce_v6_0_encoder_commit,
+       .detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+               amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+       kfree(amdgpu_encoder->enc_priv);
+       drm_encoder_cleanup(encoder);
+       kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
+       .destroy = dce_v6_0_encoder_destroy,
+};
+
+static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
+                                uint32_t encoder_enum,
+                                uint32_t supported_device,
+                                u16 caps)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+
+       /* see if we already added it */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               amdgpu_encoder = to_amdgpu_encoder(encoder);
+               if (amdgpu_encoder->encoder_enum == encoder_enum) {
+                       amdgpu_encoder->devices |= supported_device;
+                       return;
+               }
+
+       }
+
+       /* add a new one */
+       amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+       if (!amdgpu_encoder)
+               return;
+
+       encoder = &amdgpu_encoder->base;
+       switch (adev->mode_info.num_crtc) {
+       case 1:
+               encoder->possible_crtcs = 0x1;
+               break;
+       case 2:
+       default:
+               encoder->possible_crtcs = 0x3;
+               break;
+       case 4:
+               encoder->possible_crtcs = 0xf;
+               break;
+       case 6:
+               encoder->possible_crtcs = 0x3f;
+               break;
+       }
+
+       amdgpu_encoder->enc_priv = NULL;
+
+       amdgpu_encoder->encoder_enum = encoder_enum;
+       amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+       amdgpu_encoder->devices = supported_device;
+       amdgpu_encoder->rmx_type = RMX_OFF;
+       amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+       amdgpu_encoder->is_ext_encoder = false;
+       amdgpu_encoder->caps = caps;
+
+       switch (amdgpu_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                DRM_MODE_ENCODER_DAC, NULL);
+               drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                       amdgpu_encoder->rmx_type = RMX_FULL;
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_LVDS, NULL);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+               } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_DAC, NULL);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+               } else {
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_TMDS, NULL);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+               }
+               drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_SI170B:
+       case ENCODER_OBJECT_ID_CH7303:
+       case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+       case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+       case ENCODER_OBJECT_ID_TITFP513:
+       case ENCODER_OBJECT_ID_VT1623:
+       case ENCODER_OBJECT_ID_HDMI_SI1930:
+       case ENCODER_OBJECT_ID_TRAVIS:
+       case ENCODER_OBJECT_ID_NUTMEG:
+               /* these are handled by the primary encoders */
+               amdgpu_encoder->is_ext_encoder = true;
+               if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_LVDS, NULL);
+               else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_DAC, NULL);
+               else
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_TMDS, NULL);
+               drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
+               break;
+       }
+}
+
+static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
+       .set_vga_render_state = &dce_v6_0_set_vga_render_state,
+       .bandwidth_update = &dce_v6_0_bandwidth_update,
+       .vblank_get_counter = &dce_v6_0_vblank_get_counter,
+       .vblank_wait = &dce_v6_0_vblank_wait,
+       .is_display_hung = &dce_v6_0_is_display_hung,
+       .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+       .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+       .hpd_sense = &dce_v6_0_hpd_sense,
+       .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
+       .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
+       .page_flip = &dce_v6_0_page_flip,
+       .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
+       .add_encoder = &dce_v6_0_encoder_add,
+       .add_connector = &amdgpu_connector_add,
+       .stop_mc_access = &dce_v6_0_stop_mc_access,
+       .resume_mc_access = &dce_v6_0_resume_mc_access,
+};
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mode_info.funcs == NULL)
+               adev->mode_info.funcs = &dce_v6_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
+       .set = dce_v6_0_set_crtc_interrupt_state,
+       .process = dce_v6_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
+       .set = dce_v6_0_set_pageflip_interrupt_state,
+       .process = dce_v6_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
+       .set = dce_v6_0_set_hpd_interrupt_state,
+       .process = dce_v6_0_hpd_irq,
+};
+
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+       adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
+
+       adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+       adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
+
+       adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+       adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
new file mode 100644 (file)
index 0000000..6a55281
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DCE_V6_0_H__
+#define __DCE_V6_0_H__
+
+extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+
+#endif
index 4fdfab1e920012b4e770071af2c05e0357753c07..a7decf977b5c3eecd99f66ebfec5ee0b8ec157ad 100644 (file)
@@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
  */
 static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 {
-       unsigned i = 0;
+       unsigned i = 100;
 
        if (crtc >= adev->mode_info.num_crtc)
                return;
@@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
         * wait for another frame.
         */
        while (dce_v8_0_is_in_vblank(adev, crtc)) {
-               if (i++ % 100 == 0) {
+               if (i++ == 100) {
+                       i = 0;
                        if (!dce_v8_0_is_counter_moving(adev, crtc))
                                break;
                }
        }
 
        while (!dce_v8_0_is_in_vblank(adev, crtc)) {
-               if (i++ % 100 == 0) {
+               if (i++ == 100) {
+                       i = 0;
                        if (!dce_v8_0_is_counter_moving(adev, crtc))
                                break;
                }
@@ -604,6 +606,52 @@ static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
        WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
+{
+       int num_crtc = 0;
+
+       switch (adev->asic_type) {
+       case CHIP_BONAIRE:
+       case CHIP_HAWAII:
+               num_crtc = 6;
+               break;
+       case CHIP_KAVERI:
+               num_crtc = 4;
+               break;
+       case CHIP_KABINI:
+       case CHIP_MULLINS:
+               num_crtc = 2;
+               break;
+       default:
+               num_crtc = 0;
+       }
+       return num_crtc;
+}
+
+void dce_v8_0_disable_dce(struct amdgpu_device *adev)
+{
+       /*Disable VGA render and enabled crtc, if has DCE engine*/
+       if (amdgpu_atombios_has_dce_engine_info(adev)) {
+               u32 tmp;
+               int crtc_enabled, i;
+
+               dce_v8_0_set_vga_render_state(adev, false);
+
+               /*Disable crtc*/
+               for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
+                       crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+                                                                        CRTC_CONTROL, CRTC_MASTER_EN);
+                       if (crtc_enabled) {
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+                               tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+                               WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       }
+               }
+       }
+}
+
 static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
@@ -1501,13 +1549,13 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
 
                        if (sad->format == eld_reg_to_type[i][1]) {
                                if (sad->channels > max_channels) {
-                               value = (sad->channels <<
-                                AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
-                               (sad->byte2 <<
-                                AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
-                               (sad->freq <<
-                                AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
-                               max_channels = sad->channels;
+                                       value = (sad->channels <<
+                                                AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+                                               (sad->byte2 <<
+                                                AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+                                               (sad->freq <<
+                                                AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
+                                       max_channels = sad->channels;
                                }
 
                                if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
@@ -1613,7 +1661,7 @@ static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock
        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
        uint32_t offset = dig->afmt->offset;
 
-       WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
+       WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
        WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
 
        WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
@@ -1693,6 +1741,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
        /* Silent, r600_hdmi_enable will raise WARN for us */
        if (!dig->afmt->enabled)
                return;
+
        offset = dig->afmt->offset;
 
        /* hdmi deep color mode general control packets setup, if bpc > 8 */
@@ -1817,7 +1866,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
 
        WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
                  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
-                 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
+                 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
 
        WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
                 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
@@ -1826,13 +1875,12 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
        WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
                  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
 
-       /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
        WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
        WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
        WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
        WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
 
-       /* enable audio after to setting up hw */
+       /* enable audio after setting up hw */
        dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
 }
 
@@ -1952,6 +2000,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
        u32 viewport_w, viewport_h;
        int r;
        bool bypass_lut = false;
+       char *format_name;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -1999,7 +2048,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
        case DRM_FORMAT_XRGB4444:
        case DRM_FORMAT_ARGB4444:
                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
-                            (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+                            (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
 #ifdef __BIG_ENDIAN
                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
 #endif
@@ -2056,8 +2105,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
                bypass_lut = true;
                break;
        default:
-               DRM_ERROR("Unsupported screen format %s\n",
-                         drm_get_format_name(target_fb->pixel_format));
+               format_name = drm_get_format_name(target_fb->pixel_format);
+               DRM_ERROR("Unsupported screen format %s\n", format_name);
+               kfree(format_name);
                return -EINVAL;
        }
 
@@ -2137,8 +2187,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
               (viewport_w << 16) | viewport_h);
 
-       /* set pageflip to happen only at start of vblank interval (front porch) */
-       WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+       /* set pageflip to happen anywhere in vblank interval */
+       WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2552,7 +2602,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
        .gamma_set = dce_v8_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
        .destroy = dce_v8_0_crtc_destroy,
-       .page_flip = amdgpu_crtc_page_flip,
+       .page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2653,7 +2703,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
        case ATOM_PPLL2:
                /* disable the ppll */
                amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
-                                         0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+                                                 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
                break;
        case ATOM_PPLL0:
                /* disable the ppll */
@@ -2803,21 +2853,20 @@ static int dce_v8_0_early_init(void *handle)
        dce_v8_0_set_display_funcs(adev);
        dce_v8_0_set_irq_funcs(adev);
 
+       adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
+
        switch (adev->asic_type) {
        case CHIP_BONAIRE:
        case CHIP_HAWAII:
-               adev->mode_info.num_crtc = 6;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 6;
                break;
        case CHIP_KAVERI:
-               adev->mode_info.num_crtc = 4;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 7;
                break;
        case CHIP_KABINI:
        case CHIP_MULLINS:
-               adev->mode_info.num_crtc = 2;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 6; /* ? */
                break;
@@ -3236,7 +3285,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
                        drm_handle_vblank(adev->ddev, crtc);
                }
                DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
                break;
        case 1: /* vline */
                if (disp_int & interrupt_status_offsets[crtc].vline)
@@ -3245,7 +3293,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
                DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
                break;
        default:
                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
index 77016852b2522b3db977b57905968e9f604a9d35..7d0770c3a49bd3ee67128819b56d5c5f74cc66fc 100644 (file)
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
 
+void dce_v8_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
new file mode 100644 (file)
index 0000000..30badd2
--- /dev/null
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "dce_v8_0.h"
+#endif
+#include "dce_v10_0.h"
+#include "dce_v11_0.h"
+#include "dce_virtual.h"
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+                                 struct amdgpu_irq_src *source,
+                                 struct amdgpu_iv_entry *entry);
+
+/**
+ * dce_virtual_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+       return;
+}
+
+static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+       return 0;
+}
+
+static void dce_virtual_page_flip(struct amdgpu_device *adev,
+                             int crtc_id, u64 crtc_base, bool async)
+{
+       return;
+}
+
+static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+                                       u32 *vbl, u32 *position)
+{
+       *vbl = 0;
+       *position = 0;
+
+       return -EINVAL;
+}
+
+static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
+                              enum amdgpu_hpd_id hpd)
+{
+       return true;
+}
+
+static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
+                                     enum amdgpu_hpd_id hpd)
+{
+       return;
+}
+
+static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
+{
+       return false;
+}
+
+static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
+                             struct amdgpu_mode_mc_save *save)
+{
+       switch (adev->asic_type) {
+       case CHIP_BONAIRE:
+       case CHIP_HAWAII:
+       case CHIP_KAVERI:
+       case CHIP_KABINI:
+       case CHIP_MULLINS:
+#ifdef CONFIG_DRM_AMDGPU_CIK
+               dce_v8_0_disable_dce(adev);
+#endif
+               break;
+       case CHIP_FIJI:
+       case CHIP_TONGA:
+               dce_v10_0_disable_dce(adev);
+               break;
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+       case CHIP_POLARIS11:
+       case CHIP_POLARIS10:
+               dce_v11_0_disable_dce(adev);
+               break;
+       case CHIP_TOPAZ:
+               /* no DCE */
+               return;
+       default:
+               DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+       }
+
+       return;
+}
+static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
+                               struct amdgpu_mode_mc_save *save)
+{
+       return;
+}
+
+static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
+                                   bool render)
+{
+       return;
+}
+
+/**
+ * dce_virtual_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
+{
+       return;
+}
+
+static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+                                     u16 *green, u16 *blue, uint32_t size)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       int i;
+
+       /* userspace palettes are always correct as is */
+       for (i = 0; i < size; i++) {
+               amdgpu_crtc->lut_r[i] = red[i] >> 6;
+               amdgpu_crtc->lut_g[i] = green[i] >> 6;
+               amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+       }
+
+       return 0;
+}
+
+static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
+       .cursor_set2 = NULL,
+       .cursor_move = NULL,
+       .gamma_set = dce_virtual_crtc_gamma_set,
+       .set_config = amdgpu_crtc_set_config,
+       .destroy = dce_virtual_crtc_destroy,
+       .page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               amdgpu_crtc->enabled = true;
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+               drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+               amdgpu_crtc->enabled = false;
+               break;
+       }
+}
+
+
+static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
+{
+       dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
+{
+       dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       if (crtc->primary->fb) {
+               int r;
+               struct amdgpu_framebuffer *amdgpu_fb;
+               struct amdgpu_bo *rbo;
+
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r))
+                       DRM_ERROR("failed to reserve rbo before unpin\n");
+               else {
+                       amdgpu_bo_unpin(rbo);
+                       amdgpu_bo_unreserve(rbo);
+               }
+       }
+
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+}
+
+static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode,
+                                 int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       /* update the hw version fpr dpm */
+       amdgpu_crtc->hw_mode = *adjusted_mode;
+
+       return 0;
+}
+
+static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
+                                    const struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_encoder *encoder;
+
+       /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc == crtc) {
+                       amdgpu_crtc->encoder = encoder;
+                       amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+                       break;
+               }
+       }
+       if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+               amdgpu_crtc->encoder = NULL;
+               amdgpu_crtc->connector = NULL;
+               return false;
+       }
+
+       return true;
+}
+
+
+static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                                 struct drm_framebuffer *old_fb)
+{
+       return 0;
+}
+
+static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
+{
+       return;
+}
+
+static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y, enum mode_set_atomic state)
+{
+       return 0;
+}
+
+static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
+       .dpms = dce_virtual_crtc_dpms,
+       .mode_fixup = dce_virtual_crtc_mode_fixup,
+       .mode_set = dce_virtual_crtc_mode_set,
+       .mode_set_base = dce_virtual_crtc_set_base,
+       .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
+       .prepare = dce_virtual_crtc_prepare,
+       .commit = dce_virtual_crtc_commit,
+       .load_lut = dce_virtual_crtc_load_lut,
+       .disable = dce_virtual_crtc_disable,
+};
+
+static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
+{
+       struct amdgpu_crtc *amdgpu_crtc;
+       int i;
+
+       amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+                             (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+       if (amdgpu_crtc == NULL)
+               return -ENOMEM;
+
+       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+       amdgpu_crtc->crtc_id = index;
+       adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+       for (i = 0; i < 256; i++) {
+               amdgpu_crtc->lut_r[i] = i << 2;
+               amdgpu_crtc->lut_g[i] = i << 2;
+               amdgpu_crtc->lut_b[i] = i << 2;
+       }
+
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+       drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
+
+       return 0;
+}
+
+static int dce_virtual_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+       dce_virtual_set_display_funcs(adev);
+       dce_virtual_set_irq_funcs(adev);
+
+       adev->mode_info.num_crtc = 1;
+       adev->mode_info.num_hpd = 1;
+       adev->mode_info.num_dig = 1;
+       return 0;
+}
+
+static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+{
+       struct amdgpu_i2c_bus_rec ddc_bus;
+       struct amdgpu_router router;
+       struct amdgpu_hpd hpd;
+
+       /* look up gpio for ddc, hpd */
+       ddc_bus.valid = false;
+       hpd.hpd = AMDGPU_HPD_NONE;
+       /* needed for aux chan transactions */
+       ddc_bus.hpd = hpd.hpd;
+
+       memset(&router, 0, sizeof(router));
+       router.ddc_valid = false;
+       router.cd_valid = false;
+       amdgpu_display_add_connector(adev,
+                                     0,
+                                     ATOM_DEVICE_CRT1_SUPPORT,
+                                     DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
+                                     CONNECTOR_OBJECT_ID_VIRTUAL,
+                                     &hpd,
+                                     &router);
+
+       amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
+                                                       ATOM_DEVICE_CRT1_SUPPORT,
+                                                       0);
+
+       amdgpu_link_encoder_connector(adev->ddev);
+
+       return true;
+}
+
+static int dce_virtual_sw_init(void *handle)
+{
+       int r, i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
+       if (r)
+               return r;
+
+       adev->ddev->max_vblank_count = 0;
+
+       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       adev->ddev->mode_config.preferred_depth = 24;
+       adev->ddev->mode_config.prefer_shadow = 1;
+
+       adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+       r = amdgpu_modeset_create_props(adev);
+       if (r)
+               return r;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       /* allocate crtcs */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = dce_virtual_crtc_init(adev, i);
+               if (r)
+                       return r;
+       }
+
+       dce_virtual_get_connector_info(adev);
+       amdgpu_print_display_setup(adev->ddev);
+
+       drm_kms_helper_poll_init(adev->ddev);
+
+       adev->mode_info.mode_config_initialized = true;
+       return 0;
+}
+
+static int dce_virtual_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       kfree(adev->mode_info.bios_hardcoded_edid);
+
+       drm_kms_helper_poll_fini(adev->ddev);
+
+       drm_mode_config_cleanup(adev->ddev);
+       adev->mode_info.mode_config_initialized = false;
+       return 0;
+}
+
+static int dce_virtual_hw_init(void *handle)
+{
+       return 0;
+}
+
+static int dce_virtual_hw_fini(void *handle)
+{
+       return 0;
+}
+
+static int dce_virtual_suspend(void *handle)
+{
+       return dce_virtual_hw_fini(handle);
+}
+
+static int dce_virtual_resume(void *handle)
+{
+       return dce_virtual_hw_init(handle);
+}
+
+static bool dce_virtual_is_idle(void *handle)
+{
+       return true;
+}
+
+static int dce_virtual_wait_for_idle(void *handle)
+{
+       return 0;
+}
+
+static int dce_virtual_soft_reset(void *handle)
+{
+       return 0;
+}
+
+static int dce_virtual_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int dce_virtual_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs dce_virtual_ip_funcs = {
+       .name = "dce_virtual",
+       .early_init = dce_virtual_early_init,
+       .late_init = NULL,
+       .sw_init = dce_virtual_sw_init,
+       .sw_fini = dce_virtual_sw_fini,
+       .hw_init = dce_virtual_hw_init,
+       .hw_fini = dce_virtual_hw_fini,
+       .suspend = dce_virtual_suspend,
+       .resume = dce_virtual_resume,
+       .is_idle = dce_virtual_is_idle,
+       .wait_for_idle = dce_virtual_wait_for_idle,
+       .soft_reset = dce_virtual_soft_reset,
+       .set_clockgating_state = dce_virtual_set_clockgating_state,
+       .set_powergating_state = dce_virtual_set_powergating_state,
+};
+
+/* these are handled by the primary encoders */
+static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
+{
+       return;
+}
+
+static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
+{
+       return;
+}
+
+static void
+dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
+                     struct drm_display_mode *mode,
+                     struct drm_display_mode *adjusted_mode)
+{
+       return;
+}
+
+static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
+{
+       return;
+}
+
+static void
+dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       return;
+}
+
+static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
+                                   const struct drm_display_mode *mode,
+                                   struct drm_display_mode *adjusted_mode)
+{
+
+       /* set the active encoder to connector routing */
+       amdgpu_encoder_set_active_device(encoder);
+
+       return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
+       .dpms = dce_virtual_encoder_dpms,
+       .mode_fixup = dce_virtual_encoder_mode_fixup,
+       .prepare = dce_virtual_encoder_prepare,
+       .mode_set = dce_virtual_encoder_mode_set,
+       .commit = dce_virtual_encoder_commit,
+       .disable = dce_virtual_encoder_disable,
+};
+
+static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       kfree(amdgpu_encoder->enc_priv);
+       drm_encoder_cleanup(encoder);
+       kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+       .destroy = dce_virtual_encoder_destroy,
+};
+
+static void dce_virtual_encoder_add(struct amdgpu_device *adev,
+                                uint32_t encoder_enum,
+                                uint32_t supported_device,
+                                u16 caps)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+
+       /* see if we already added it */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               amdgpu_encoder = to_amdgpu_encoder(encoder);
+               if (amdgpu_encoder->encoder_enum == encoder_enum) {
+                       amdgpu_encoder->devices |= supported_device;
+                       return;
+               }
+
+       }
+
+       /* add a new one */
+       amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+       if (!amdgpu_encoder)
+               return;
+
+       encoder = &amdgpu_encoder->base;
+       encoder->possible_crtcs = 0x1;
+       amdgpu_encoder->enc_priv = NULL;
+       amdgpu_encoder->encoder_enum = encoder_enum;
+       amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+       amdgpu_encoder->devices = supported_device;
+       amdgpu_encoder->rmx_type = RMX_OFF;
+       amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+       amdgpu_encoder->is_ext_encoder = false;
+       amdgpu_encoder->caps = caps;
+
+       drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
+                                        DRM_MODE_ENCODER_VIRTUAL, NULL);
+       drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+       DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+}
+
+static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
+       .set_vga_render_state = &dce_virtual_set_vga_render_state,
+       .bandwidth_update = &dce_virtual_bandwidth_update,
+       .vblank_get_counter = &dce_virtual_vblank_get_counter,
+       .vblank_wait = &dce_virtual_vblank_wait,
+       .is_display_hung = &dce_virtual_is_display_hung,
+       .backlight_set_level = NULL,
+       .backlight_get_level = NULL,
+       .hpd_sense = &dce_virtual_hpd_sense,
+       .hpd_set_polarity = &dce_virtual_hpd_set_polarity,
+       .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
+       .page_flip = &dce_virtual_page_flip,
+       .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
+       .add_encoder = &dce_virtual_encoder_add,
+       .add_connector = &amdgpu_connector_add,
+       .stop_mc_access = &dce_virtual_stop_mc_access,
+       .resume_mc_access = &dce_virtual_resume_mc_access,
+};
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mode_info.funcs == NULL)
+               adev->mode_info.funcs = &dce_virtual_display_funcs;
+}
+
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+       struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
+       struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
+       unsigned crtc = 0;
+       drm_handle_vblank(adev->ddev, crtc);
+       dce_virtual_pageflip_irq(adev, NULL, NULL);
+       hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+       return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+                                                    int crtc,
+                                                    enum amdgpu_interrupt_state state)
+{
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       if (state && !adev->mode_info.vsync_timer_enabled) {
+               DRM_DEBUG("Enable software vsync timer\n");
+               hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+               hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+               adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
+               hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+       } else if (!state && adev->mode_info.vsync_timer_enabled) {
+               DRM_DEBUG("Disable software vsync timer\n");
+               hrtimer_cancel(&adev->mode_info.vblank_timer);
+       }
+
+       adev->mode_info.vsync_timer_enabled = state;
+       DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       switch (type) {
+       case AMDGPU_CRTC_IRQ_VBLANK1:
+               dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
+                                         int crtc)
+{
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+}
+
+static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
+                             struct amdgpu_irq_src *source,
+                             struct amdgpu_iv_entry *entry)
+{
+       unsigned crtc = 0;
+       unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
+
+       dce_virtual_crtc_vblank_int_ack(adev, crtc);
+
+       if (amdgpu_irq_enabled(adev, source, irq_type)) {
+               drm_handle_vblank(adev->ddev, crtc);
+       }
+       dce_virtual_pageflip_irq(adev, NULL, NULL);
+       DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+       return 0;
+}
+
+static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
+                                           struct amdgpu_irq_src *src,
+                                           unsigned type,
+                                           enum amdgpu_interrupt_state state)
+{
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
+       }
+       DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
+
+       return 0;
+}
+
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+                                 struct amdgpu_irq_src *source,
+                                 struct amdgpu_iv_entry *entry)
+{
+       unsigned long flags;
+       unsigned crtc_id = 0;
+       struct amdgpu_crtc *amdgpu_crtc;
+       struct amdgpu_flip_work *works;
+
+       crtc_id = 0;
+       amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
+       }
+
+       /* IRQ could occur when in initial stage */
+       if (amdgpu_crtc == NULL)
+               return 0;
+
+       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       works = amdgpu_crtc->pflip_works;
+       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+               DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+                       "AMDGPU_FLIP_SUBMITTED(%d)\n",
+                       amdgpu_crtc->pflip_status,
+                       AMDGPU_FLIP_SUBMITTED);
+               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               return 0;
+       }
+
+       /* page flip completed. clean up */
+       amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+       amdgpu_crtc->pflip_works = NULL;
+
+       /* wakeup usersapce */
+       if (works->event)
+               drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+       drm_crtc_vblank_put(&amdgpu_crtc->base);
+       schedule_work(&works->unpin_work);
+
+       return 0;
+}
+
+static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
+       .set = dce_virtual_set_crtc_irq_state,
+       .process = dce_virtual_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
+       .set = dce_virtual_set_pageflip_irq_state,
+       .process = dce_virtual_pageflip_irq,
+};
+
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+       adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
+
+       adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+       adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
new file mode 100644 (file)
index 0000000..e239243
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DCE_VIRTUAL_H__
+#define __DCE_VIRTUAL_H__
+
+extern const struct amd_ip_funcs dce_virtual_ip_funcs;
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
deleted file mode 100644 (file)
index ed03b75..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_smum.h"
-
-MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int fiji_dpm_early_init(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       fiji_dpm_set_funcs(adev);
-
-       return 0;
-}
-
-static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
-{
-       char fw_name[30] = "amdgpu/fiji_smc.bin";
-       int err;
-
-       err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
-       if (err)
-               goto out;
-       err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
-       if (err) {
-               DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
-               release_firmware(adev->pm.fw);
-               adev->pm.fw = NULL;
-       }
-       return err;
-}
-
-static int fiji_dpm_sw_init(void *handle)
-{
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       ret = fiji_dpm_init_microcode(adev);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int fiji_dpm_sw_fini(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       release_firmware(adev->pm.fw);
-       adev->pm.fw = NULL;
-
-       return 0;
-}
-
-static int fiji_dpm_hw_init(void *handle)
-{
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-
-       ret = fiji_smu_init(adev);
-       if (ret) {
-               DRM_ERROR("SMU initialization failed\n");
-               goto fail;
-       }
-
-       ret = fiji_smu_start(adev);
-       if (ret) {
-               DRM_ERROR("SMU start failed\n");
-               goto fail;
-       }
-
-       mutex_unlock(&adev->pm.mutex);
-       return 0;
-
-fail:
-       adev->firmware.smu_load = false;
-       mutex_unlock(&adev->pm.mutex);
-       return -EINVAL;
-}
-
-static int fiji_dpm_hw_fini(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       mutex_lock(&adev->pm.mutex);
-       fiji_smu_fini(adev);
-       mutex_unlock(&adev->pm.mutex);
-       return 0;
-}
-
-static int fiji_dpm_suspend(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       fiji_dpm_hw_fini(adev);
-
-       return 0;
-}
-
-static int fiji_dpm_resume(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       fiji_dpm_hw_init(adev);
-
-       return 0;
-}
-
-static int fiji_dpm_set_clockgating_state(void *handle,
-                       enum amd_clockgating_state state)
-{
-       return 0;
-}
-
-static int fiji_dpm_set_powergating_state(void *handle,
-                       enum amd_powergating_state state)
-{
-       return 0;
-}
-
-const struct amd_ip_funcs fiji_dpm_ip_funcs = {
-       .name = "fiji_dpm",
-       .early_init = fiji_dpm_early_init,
-       .late_init = NULL,
-       .sw_init = fiji_dpm_sw_init,
-       .sw_fini = fiji_dpm_sw_fini,
-       .hw_init = fiji_dpm_hw_init,
-       .hw_fini = fiji_dpm_hw_fini,
-       .suspend = fiji_dpm_suspend,
-       .resume = fiji_dpm_resume,
-       .is_idle = NULL,
-       .wait_for_idle = NULL,
-       .soft_reset = NULL,
-       .set_clockgating_state = fiji_dpm_set_clockgating_state,
-       .set_powergating_state = fiji_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
-       .get_temperature = NULL,
-       .pre_set_power_state = NULL,
-       .set_power_state = NULL,
-       .post_set_power_state = NULL,
-       .display_configuration_changed = NULL,
-       .get_sclk = NULL,
-       .get_mclk = NULL,
-       .print_power_state = NULL,
-       .debugfs_print_current_performance_level = NULL,
-       .force_performance_level = NULL,
-       .vblank_too_short = NULL,
-       .powergate_uvd = NULL,
-};
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
-{
-       if (NULL == adev->pm.funcs)
-               adev->pm.funcs = &fiji_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
deleted file mode 100644 (file)
index b3e19ba..0000000
+++ /dev/null
@@ -1,863 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_ppsmc.h"
-#include "fiji_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-#define FIJI_SMC_SIZE 0x20000
-
-static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
-       uint32_t val;
-
-       if (smc_address & 3)
-               return -EINVAL;
-
-       if ((smc_address + 3) > limit)
-               return -EINVAL;
-
-       WREG32(mmSMC_IND_INDEX_0, smc_address);
-
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-       return 0;
-}
-
-static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
-       uint32_t addr;
-       uint32_t data, orig_data;
-       int result = 0;
-       uint32_t extra_shift;
-       unsigned long flags;
-
-       if (smc_start_address & 3)
-               return -EINVAL;
-
-       if ((smc_start_address + byte_count) > limit)
-               return -EINVAL;
-
-       addr = smc_start_address;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       while (byte_count >= 4) {
-               /* Bytes are written into the SMC addres space with the MSB first */
-               data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
-               result = fiji_set_smc_sram_address(adev, addr, limit);
-
-               if (result)
-                       goto out;
-
-               WREG32(mmSMC_IND_DATA_0, data);
-
-               src += 4;
-               byte_count -= 4;
-               addr += 4;
-       }
-
-       if (0 != byte_count) {
-               /* Now write odd bytes left, do a read modify write cycle */
-               data = 0;
-
-               result = fiji_set_smc_sram_address(adev, addr, limit);
-               if (result)
-                       goto out;
-
-               orig_data = RREG32(mmSMC_IND_DATA_0);
-               extra_shift = 8 * (4 - byte_count);
-
-               while (byte_count > 0) {
-                       data = (data << 8) + *src++;
-                       byte_count--;
-               }
-
-               data <<= extra_shift;
-               data |= (orig_data & ~((~0UL) << extra_shift));
-
-               result = fiji_set_smc_sram_address(adev, addr, limit);
-               if (result)
-                       goto out;
-
-               WREG32(mmSMC_IND_DATA_0, data);
-       }
-
-out:
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-static int fiji_program_jump_on_start(struct amdgpu_device *adev)
-{
-       static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
-       fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
-       return 0;
-}
-
-static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
-       return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
-       int i;
-       uint32_t val;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32(mmSMC_RESP_0);
-               if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MSG_ARG_0, 0x20000);
-       WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send message\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
-       if (!fiji_is_smc_ram_running(adev))
-       {
-               return -EINVAL;
-       }
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MESSAGE_0, msg);
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send message\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
-                                               PPSMC_Msg msg)
-{
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MESSAGE_0, msg);
-
-       return 0;
-}
-
-static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
-                                               PPSMC_Msg msg,
-                                               uint32_t parameter)
-{
-       if (!fiji_is_smc_ram_running(adev))
-               return -EINVAL;
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MSG_ARG_0, parameter);
-
-       return fiji_send_msg_to_smc(adev, msg);
-}
-
-static int fiji_send_msg_to_smc_with_parameter_without_waiting(
-                                       struct amdgpu_device *adev,
-                                       PPSMC_Msg msg, uint32_t parameter)
-{
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MSG_ARG_0, parameter);
-
-       return fiji_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
-       int i;
-       uint32_t val;
-
-       if (!fiji_is_smc_ram_running(adev))
-               return -EINVAL;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-               if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout)
-               return -EINVAL;
-
-       return 0;
-}
-#endif
-
-static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
-       const struct smc_firmware_header_v1_0 *hdr;
-       uint32_t ucode_size;
-       uint32_t ucode_start_address;
-       const uint8_t *src;
-       uint32_t val;
-       uint32_t byte_count;
-       uint32_t *data;
-       unsigned long flags;
-
-       if (!adev->pm.fw)
-               return -EINVAL;
-
-       /* Skip SMC ucode loading on SR-IOV capable boards.
-        * vbios does this for us in asic_init in that case.
-        */
-       if (adev->virtualization.supports_sr_iov)
-               return 0;
-
-       hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
-       amdgpu_ucode_print_smc_hdr(&hdr->header);
-
-       adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
-       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-       ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
-       src = (const uint8_t *)
-               (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
-       if (ucode_size & 3) {
-               DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
-               return -EINVAL;
-       }
-
-       if (ucode_size > FIJI_SMC_SIZE) {
-               DRM_ERROR("SMC address is beyond the SMC RAM area\n");
-               return -EINVAL;
-       }
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-       byte_count = ucode_size;
-       data = (uint32_t *)src;
-       for (; byte_count >= 4; data++, byte_count -= 4)
-               WREG32(mmSMC_IND_DATA_0, data[0]);
-
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
-       return 0;
-}
-
-#if 0 /* not used yet */
-static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
-                               uint32_t smc_address,
-                               uint32_t *value,
-                               uint32_t limit)
-{
-       int result;
-       unsigned long flags;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       result = fiji_set_smc_sram_address(adev, smc_address, limit);
-       if (result == 0)
-               *value = RREG32(mmSMC_IND_DATA_0);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
-                               uint32_t smc_address,
-                               uint32_t value,
-                               uint32_t limit)
-{
-       int result;
-       unsigned long flags;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       result = fiji_set_smc_sram_address(adev, smc_address, limit);
-       if (result == 0)
-               WREG32(mmSMC_IND_DATA_0, value);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-static int fiji_smu_stop_smc(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
-       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-       return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
-{
-       switch (fw_type) {
-               case UCODE_ID_SDMA0:
-                       return AMDGPU_UCODE_ID_SDMA0;
-               case UCODE_ID_SDMA1:
-                       return AMDGPU_UCODE_ID_SDMA1;
-               case UCODE_ID_CP_CE:
-                       return AMDGPU_UCODE_ID_CP_CE;
-               case UCODE_ID_CP_PFP:
-                       return AMDGPU_UCODE_ID_CP_PFP;
-               case UCODE_ID_CP_ME:
-                       return AMDGPU_UCODE_ID_CP_ME;
-               case UCODE_ID_CP_MEC:
-               case UCODE_ID_CP_MEC_JT1:
-               case UCODE_ID_CP_MEC_JT2:
-                       return AMDGPU_UCODE_ID_CP_MEC1;
-               case UCODE_ID_RLC_G:
-                       return AMDGPU_UCODE_ID_RLC_G;
-               default:
-                       DRM_ERROR("ucode type is out of range!\n");
-                       return AMDGPU_UCODE_ID_MAXIMUM;
-       }
-}
-
-static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
-                                               uint32_t fw_type,
-                                               struct SMU_Entry *entry)
-{
-       enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
-       struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
-       const struct gfx_firmware_header_v1_0 *header = NULL;
-       uint64_t gpu_addr;
-       uint32_t data_size;
-
-       if (ucode->fw == NULL)
-               return -EINVAL;
-       gpu_addr  = ucode->mc_addr;
-       header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
-       data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
-       if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
-               (fw_type == UCODE_ID_CP_MEC_JT2)) {
-               gpu_addr += le32_to_cpu(header->jt_offset) << 2;
-               data_size = le32_to_cpu(header->jt_size) << 2;
-       }
-
-       entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
-       entry->id = (uint16_t)fw_type;
-       entry->image_addr_high = upper_32_bits(gpu_addr);
-       entry->image_addr_low = lower_32_bits(gpu_addr);
-       entry->meta_data_addr_high = 0;
-       entry->meta_data_addr_low = 0;
-       entry->data_size_byte = data_size;
-       entry->num_register_entries = 0;
-
-       if (fw_type == UCODE_ID_RLC_G)
-               entry->flags = 1;
-       else
-               entry->flags = 0;
-
-       return 0;
-}
-
-static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
-{
-       struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
-       struct SMU_DRAMData_TOC *toc;
-       uint32_t fw_to_load;
-
-       WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
-       fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
-       fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
-       toc = (struct SMU_DRAMData_TOC *)private->header;
-       toc->num_entries = 0;
-       toc->structure_version = 1;
-
-       if (!adev->firmware.smu_load)
-               return 0;
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for RLC\n");
-               return -EINVAL;
-       }
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for CE\n");
-               return -EINVAL;
-       }
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for PFP\n");
-               return -EINVAL;
-       }
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for ME\n");
-               return -EINVAL;
-       }
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC\n");
-               return -EINVAL;
-       }
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
-               return -EINVAL;
-       }
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
-               return -EINVAL;
-       }
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for SDMA0\n");
-               return -EINVAL;
-       }
-
-       if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for SDMA1\n");
-               return -EINVAL;
-       }
-
-       fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
-       fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
-       fw_to_load = UCODE_ID_RLC_G_MASK |
-                       UCODE_ID_SDMA0_MASK |
-                       UCODE_ID_SDMA1_MASK |
-                       UCODE_ID_CP_CE_MASK |
-                       UCODE_ID_CP_ME_MASK |
-                       UCODE_ID_CP_PFP_MASK |
-                       UCODE_ID_CP_MEC_MASK;
-
-       if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
-               DRM_ERROR("Fail to request SMU load ucode\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
-       switch (fw_type) {
-               case AMDGPU_UCODE_ID_SDMA0:
-                       return UCODE_ID_SDMA0_MASK;
-               case AMDGPU_UCODE_ID_SDMA1:
-                       return UCODE_ID_SDMA1_MASK;
-               case AMDGPU_UCODE_ID_CP_CE:
-                       return UCODE_ID_CP_CE_MASK;
-               case AMDGPU_UCODE_ID_CP_PFP:
-                       return UCODE_ID_CP_PFP_MASK;
-               case AMDGPU_UCODE_ID_CP_ME:
-                       return UCODE_ID_CP_ME_MASK;
-               case AMDGPU_UCODE_ID_CP_MEC1:
-                       return UCODE_ID_CP_MEC_MASK;
-               case AMDGPU_UCODE_ID_CP_MEC2:
-                       return UCODE_ID_CP_MEC_MASK;
-               case AMDGPU_UCODE_ID_RLC_G:
-                       return UCODE_ID_RLC_G_MASK;
-               default:
-                       DRM_ERROR("ucode type is out of range!\n");
-                       return 0;
-       }
-}
-
-static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
-                                       uint32_t fw_type)
-{
-       uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
-       int i;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("check firmware loading failed\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
-       int result;
-       uint32_t val;
-       int i;
-
-       /* Assert reset */
-       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       result = fiji_smu_upload_firmware_image(adev);
-       if (result)
-               return result;
-
-       /* Clear status */
-       WREG32_SMC(ixSMU_STATUS, 0);
-
-       /* Enable clock */
-       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-       /* De-assert reset */
-       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       /* Set SMU Auto Start */
-       val = RREG32_SMC(ixSMU_INPUT_DATA);
-       val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
-       WREG32_SMC(ixSMU_INPUT_DATA, val);
-
-       /* Clear firmware interrupt enable flag */
-       WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixRCU_UC_EVENTS);
-               if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("Interrupt is not enabled by firmware\n");
-               return -EINVAL;
-       }
-
-       /* Call Test SMU message with 0x20000 offset
-        * to trigger SMU start
-        */
-       fiji_send_msg_to_smc_offset(adev);
-       DRM_INFO("[FM]try triger smu start\n");
-       /* Wait for done bit to be set */
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixSMU_STATUS);
-               if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("Timeout for SMU start\n");
-               return -EINVAL;
-       }
-
-       /* Check pass/failed indicator */
-       val = RREG32_SMC(ixSMU_STATUS);
-       if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
-               DRM_ERROR("SMU Firmware start failed\n");
-               return -EINVAL;
-       }
-       DRM_INFO("[FM]smu started\n");
-       /* Wait for firmware to initialize */
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixFIRMWARE_FLAGS);
-               if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("SMU firmware initialization failed\n");
-               return -EINVAL;
-       }
-       DRM_INFO("[FM]smu initialized\n");
-
-       return 0;
-}
-
-static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
-       int i, result;
-       uint32_t val;
-
-       /* wait for smc boot up */
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixRCU_UC_EVENTS);
-               val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
-               if (val)
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("SMC boot sequence is not completed\n");
-               return -EINVAL;
-       }
-
-       /* Clear firmware interrupt enable flag */
-       WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
-       /* Assert reset */
-       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       result = fiji_smu_upload_firmware_image(adev);
-       if (result)
-               return result;
-
-       /* Set smc instruct start point at 0x0 */
-       fiji_program_jump_on_start(adev);
-
-       /* Enable clock */
-       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-       /* De-assert reset */
-       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       /* Wait for firmware to initialize */
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixFIRMWARE_FLAGS);
-               if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("Timeout for SMC firmware initialization\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int fiji_smu_start(struct amdgpu_device *adev)
-{
-       int result;
-       uint32_t val;
-
-       if (!fiji_is_smc_ram_running(adev)) {
-               val = RREG32_SMC(ixSMU_FIRMWARE);
-               if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
-                       DRM_INFO("[FM]start smu in nonprotection mode\n");
-                       result = fiji_smu_start_in_non_protection_mode(adev);
-                       if (result)
-                               return result;
-               } else {
-                       DRM_INFO("[FM]start smu in protection mode\n");
-                       result = fiji_smu_start_in_protection_mode(adev);
-                       if (result)
-                               return result;
-               }
-       }
-
-       return fiji_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
-       .check_fw_load_finish = fiji_smu_check_fw_load_finish,
-       .request_smu_load_fw = NULL,
-       .request_smu_specific_fw = NULL,
-};
-
-int fiji_smu_init(struct amdgpu_device *adev)
-{
-       struct fiji_smu_private_data *private;
-       uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-       uint32_t smu_internal_buffer_size = 200*4096;
-       struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
-       struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
-       uint64_t mc_addr;
-       void *toc_buf_ptr;
-       void *smu_buf_ptr;
-       int ret;
-
-       private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
-       if (NULL == private)
-               return -ENOMEM;
-
-       /* allocate firmware buffers */
-       if (adev->firmware.smu_load)
-               amdgpu_ucode_init_bo(adev);
-
-       adev->smu.priv = private;
-       adev->smu.fw_flags = 0;
-
-       /* Allocate FW image data structure and header buffer */
-       ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-                              true, AMDGPU_GEM_DOMAIN_VRAM,
-                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-                              NULL, NULL, toc_buf);
-       if (ret) {
-               DRM_ERROR("Failed to allocate memory for TOC buffer\n");
-               return -ENOMEM;
-       }
-
-       /* Allocate buffer for SMU internal buffer */
-       ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-                              true, AMDGPU_GEM_DOMAIN_VRAM,
-                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-                              NULL, NULL, smu_buf);
-       if (ret) {
-               DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
-               return -ENOMEM;
-       }
-
-       /* Retrieve GPU address for header buffer and internal buffer */
-       ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
-       if (ret) {
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to reserve the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.toc_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to pin the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.toc_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to map the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       amdgpu_bo_unreserve(adev->smu.toc_buf);
-       private->header_addr_low = lower_32_bits(mc_addr);
-       private->header_addr_high = upper_32_bits(mc_addr);
-       private->header = toc_buf_ptr;
-
-       ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
-       if (ret) {
-               amdgpu_bo_unref(&adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to reserve the SMU internal buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to pin the SMU internal buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to map the SMU internal buffer\n");
-               return -EINVAL;
-       }
-
-       amdgpu_bo_unreserve(adev->smu.smu_buf);
-       private->smu_buffer_addr_low = lower_32_bits(mc_addr);
-       private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
-       adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
-
-       return 0;
-}
-
-int fiji_smu_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_unref(&adev->smu.toc_buf);
-       amdgpu_bo_unref(&adev->smu.smu_buf);
-       kfree(adev->smu.priv);
-       adev->smu.priv = NULL;
-       if (adev->firmware.fw_buf)
-               amdgpu_ucode_fini_bo(adev);
-
-       return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h b/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
deleted file mode 100644 (file)
index 1cef03d..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_SMUMGR_H
-#define FIJI_SMUMGR_H
-
-#include "fiji_ppsmc.h"
-
-int fiji_smu_init(struct amdgpu_device *adev);
-int fiji_smu_fini(struct amdgpu_device *adev);
-int fiji_smu_start(struct amdgpu_device *adev);
-
-struct fiji_smu_private_data
-{
-       uint8_t *header;
-       uint32_t smu_buffer_addr_high;
-       uint32_t smu_buffer_addr_low;
-       uint32_t header_addr_high;
-       uint32_t header_addr_low;
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
new file mode 100644 (file)
index 0000000..40abb6b
--- /dev/null
@@ -0,0 +1,3362 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_ucode.h"
+#include "si/clearstate_si.h"
+#include "si/sid.h"
+
+#define GFX6_NUM_GFX_RINGS     1
+#define GFX6_NUM_COMPUTE_RINGS 2
+#define STATIC_PER_CU_PG_ENABLE                    (1 << 3)
+#define DYN_PER_CU_PG_ENABLE                       (1 << 2)
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
+
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
+
+
+static const u32 verde_rlc_save_restore_register_list[] =
+{
+       (0x8000 << 16) | (0x98f4 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x98f4 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0xe80 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0xe80 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x89bc >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x89bc >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x8c1c >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x8c1c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x98f0 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xe7c >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9148 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9148 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9150 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x897c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8d8c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac54 >> 2),
+       0X00000000,
+       0x3,
+       (0x9c00 << 16) | (0x98f8 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9910 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9914 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9918 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x991c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9920 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9924 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9928 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x992c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9930 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9934 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9938 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x993c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9940 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9944 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9948 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x994c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9950 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9954 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9958 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x995c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9960 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9964 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9968 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x996c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9970 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9974 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9978 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x997c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9980 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9984 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9988 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x998c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c00 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c14 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c04 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c08 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9b7c >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9b7c >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0xe84 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0xe84 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x89c0 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x89c0 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x914c >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x914c >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x8c20 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x8c20 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9354 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9354 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9060 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9364 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9100 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x913c >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x90e0 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x90e4 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x90e8 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x90e0 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x90e4 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x90e8 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8bcc >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8b24 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x88c4 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8e50 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c0c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8e58 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8e5c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9508 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x950c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9494 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac0c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac10 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac14 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xae00 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac08 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x88d4 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x88c8 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x88cc >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x89b0 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8b10 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8a14 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9830 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9834 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9838 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9a10 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9870 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9874 >> 2),
+       0x00000000,
+       (0x8001 << 16) | (0x9870 >> 2),
+       0x00000000,
+       (0x8001 << 16) | (0x9874 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9870 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9874 >> 2),
+       0x00000000,
+       (0x8041 << 16) | (0x9870 >> 2),
+       0x00000000,
+       (0x8041 << 16) | (0x9874 >> 2),
+       0x00000000,
+       0x00000000
+};
+
+static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err;
+       const struct gfx_firmware_header_v1_0 *cp_hdr;
+       const struct rlc_firmware_header_v1_0 *rlc_hdr;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               chip_name = "tahiti";
+               break;
+       case CHIP_PITCAIRN:
+               chip_name = "pitcairn";
+               break;
+       case CHIP_VERDE:
+               chip_name = "verde";
+               break;
+       case CHIP_OLAND:
+               chip_name = "oland";
+               break;
+       case CHIP_HAINAN:
+               chip_name = "hainan";
+               break;
+       default: BUG();
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+       err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+       if (err)
+               goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+       adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+       err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.me_fw);
+       if (err)
+               goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+       adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+       err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+       if (err)
+               goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+       adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+       err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+       rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+       adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+       adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+
+out:
+       if (err) {
+               printk(KERN_ERR
+                      "gfx6: Failed to load firmware \"%s\"\n",
+                      fw_name);
+               release_firmware(adev->gfx.pfp_fw);
+               adev->gfx.pfp_fw = NULL;
+               release_firmware(adev->gfx.me_fw);
+               adev->gfx.me_fw = NULL;
+               release_firmware(adev->gfx.ce_fw);
+               adev->gfx.ce_fw = NULL;
+               release_firmware(adev->gfx.rlc_fw);
+               adev->gfx.rlc_fw = NULL;
+       }
+       return err;
+}
+
+static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
+{
+       const u32 num_tile_mode_states = 32;
+       u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+       switch (adev->gfx.config.mem_row_size_in_kb) {
+       case 1:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+               break;
+       case 2:
+       default:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+               break;
+       case 4:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+               break;
+       }
+
+       if (adev->asic_type == CHIP_VERDE ||
+               adev->asic_type == CHIP_OLAND ||
+               adev->asic_type == CHIP_HAINAN) {
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 1: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 2:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 3:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 4:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 5:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 6:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 7:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 8: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 9:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 10:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 11:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 12:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 13:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 14:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 15:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 16:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 17:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 21:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 22:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 23: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 24: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 25: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       }
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+       } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:  /* non-AA compressed depth or any compressed stencil */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 1:  /* 2xAA/4xAA compressed depth only */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 2:  /* 8xAA compressed depth only */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 8:  /* 1D and 1D Array Surfaces */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 9:  /* Displayable maps. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 10:  /* Display 8bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 11:  /* Display 16bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 12:  /* Display 32bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 13:  /* Thin. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 14:  /* Thin 8 bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 15:  /* Thin 16 bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 16:  /* Thin 32 bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 17:  /* Thin 64 bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 21:  /* 8 bpp PRT. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 22:  /* 16 bpp PRT */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 23:  /* 32 bpp PRT */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 24:  /* 64 bpp PRT */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 25:  /* 128 bpp PRT */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       }
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+       } else{
+
+               DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+       }
+
+}
+
+static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+                                 u32 sh_num, u32 instance)
+{
+       u32 data;
+
+       if (instance == 0xffffffff)
+               data = INSTANCE_BROADCAST_WRITES;
+       else
+               data = INSTANCE_INDEX(instance);
+
+       if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+               data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+       else if (se_num == 0xffffffff)
+               data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+       else if (sh_num == 0xffffffff)
+               data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+       else
+               data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+       WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 gfx_v6_0_create_bitmask(u32 bit_width)
+{
+       return (u32)(((u64)1 << bit_width) - 1);
+}
+
+static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
+                                   u32 max_rb_num_per_se,
+                                   u32 sh_per_se)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_RB_BACKEND_DISABLE);
+       data &= BACKEND_DISABLE_MASK;
+       data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+       data >>= BACKEND_DISABLE_SHIFT;
+
+       mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+       return data & mask;
+}
+
+static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
+{
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+               *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
+                         SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
+               break;
+       case CHIP_VERDE:
+               *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
+               break;
+       case CHIP_OLAND:
+               *rconf |= RB_YSEL;
+               break;
+       case CHIP_HAINAN:
+               *rconf |= 0x0;
+               break;
+       default:
+               DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+               break;
+       }
+}
+
+static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+                                                   u32 raster_config, unsigned rb_mask,
+                                                   unsigned num_rb)
+{
+       unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+       unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+       unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+       unsigned rb_per_se = num_rb / num_se;
+       unsigned se_mask[4];
+       unsigned se;
+
+       se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+       se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+       se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+       se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+       WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+       WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+       WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+       for (se = 0; se < num_se; se++) {
+               unsigned raster_config_se = raster_config;
+               unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+               unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+               int idx = (se / 2) * 2;
+
+               if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+                       raster_config_se &= ~SE_MAP_MASK;
+
+                       if (!se_mask[idx]) {
+                               raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+                       } else {
+                               raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+                       }
+               }
+
+               pkr0_mask &= rb_mask;
+               pkr1_mask &= rb_mask;
+               if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+                       raster_config_se &= ~PKR_MAP_MASK;
+
+                       if (!pkr0_mask) {
+                               raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+                       } else {
+                               raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+                       }
+               }
+
+               if (rb_per_se >= 2) {
+                       unsigned rb0_mask = 1 << (se * rb_per_se);
+                       unsigned rb1_mask = rb0_mask << 1;
+
+                       rb0_mask &= rb_mask;
+                       rb1_mask &= rb_mask;
+                       if (!rb0_mask || !rb1_mask) {
+                               raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+                               if (!rb0_mask) {
+                                       raster_config_se |=
+                                               RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+                               } else {
+                                       raster_config_se |=
+                                               RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+                               }
+                       }
+
+                       if (rb_per_se > 2) {
+                               rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+                               rb1_mask = rb0_mask << 1;
+                               rb0_mask &= rb_mask;
+                               rb1_mask &= rb_mask;
+                               if (!rb0_mask || !rb1_mask) {
+                                       raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+                                       if (!rb0_mask) {
+                                               raster_config_se |=
+                                                       RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+                                       } else {
+                                               raster_config_se |=
+                                                       RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+                                       }
+                               }
+                       }
+               }
+
+               /* GRBM_GFX_INDEX has a different offset on SI */
+               gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+               WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
+       }
+
+       /* GRBM_GFX_INDEX has a different offset on SI */
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
+                             u32 se_num, u32 sh_per_se,
+                             u32 max_rb_num_per_se)
+{
+       int i, j;
+       u32 data, mask;
+       u32 disabled_rbs = 0;
+       u32 enabled_rbs = 0;
+       unsigned num_rb_pipes;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+                       data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
+                       disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+               }
+       }
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       mask = 1;
+       for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+               if (!(disabled_rbs & mask))
+                       enabled_rbs |= mask;
+               mask <<= 1;
+       }
+
+       adev->gfx.config.backend_enable_mask = enabled_rbs;
+       adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+
+       num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+                            adev->gfx.config.max_shader_engines, 16);
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < se_num; i++) {
+               gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
+               data = 0;
+               for (j = 0; j < sh_per_se; j++) {
+                       switch (enabled_rbs & 3) {
+                       case 1:
+                               data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 2:
+                               data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 3:
+                       default:
+                               data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+                               break;
+                       }
+                       enabled_rbs >>= 2;
+               }
+               gfx_v6_0_raster_config(adev, &data);
+
+               if (!adev->gfx.config.backend_enable_mask ||
+                               adev->gfx.config.num_rbs >= num_rb_pipes)
+                       WREG32(PA_SC_RASTER_CONFIG, data);
+               else
+                       gfx_v6_0_write_harvested_raster_configs(adev, data,
+                                                               adev->gfx.config.backend_enable_mask,
+                                                               num_rb_pipes);
+       }
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
+/*
+static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+}
+*/
+
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+       data &= INACTIVE_CUS_MASK;
+       data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+       data >>= INACTIVE_CUS_SHIFT;
+
+       mask = gfx_v6_0_create_bitmask(cu_per_sh);
+
+       return ~data & mask;
+}
+
+
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
+                        u32 se_num, u32 sh_per_se,
+                        u32 cu_per_sh)
+{
+       int i, j, k;
+       u32 data, mask;
+       u32 active_cu = 0;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+                       data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+                       active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+
+                       mask = 1;
+                       for (k = 0; k < 16; k++) {
+                               mask <<= k;
+                               if (active_cu & mask) {
+                                       data &= ~mask;
+                                       WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+                                       break;
+                               }
+                       }
+               }
+       }
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
+{
+       u32 gb_addr_config = 0;
+       u32 mc_shared_chmap, mc_arb_ramcfg;
+       u32 sx_debug_1;
+       u32 hdp_host_path_cntl;
+       u32 tmp;
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               adev->gfx.config.max_shader_engines = 2;
+               adev->gfx.config.max_tile_pipes = 12;
+               adev->gfx.config.max_cu_per_sh = 8;
+               adev->gfx.config.max_sh_per_se = 2;
+               adev->gfx.config.max_backends_per_se = 4;
+               adev->gfx.config.max_texture_channel_caches = 12;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_PITCAIRN:
+               adev->gfx.config.max_shader_engines = 2;
+               adev->gfx.config.max_tile_pipes = 8;
+               adev->gfx.config.max_cu_per_sh = 5;
+               adev->gfx.config.max_sh_per_se = 2;
+               adev->gfx.config.max_backends_per_se = 4;
+               adev->gfx.config.max_texture_channel_caches = 8;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+               break;
+
+       case CHIP_VERDE:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 5;
+               adev->gfx.config.max_sh_per_se = 2;
+               adev->gfx.config.max_backends_per_se = 4;
+               adev->gfx.config.max_texture_channel_caches = 4;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_OLAND:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 6;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 4;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 16;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_HAINAN:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 5;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 1;
+               adev->gfx.config.max_texture_channel_caches = 2;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 16;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 1);
+       WREG32(SRBM_INT_ACK, 1);
+
+       WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+       mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+       mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+       adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+       adev->gfx.config.mem_max_burst_length_bytes = 256;
+       tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+       adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+       if (adev->gfx.config.mem_row_size_in_kb > 4)
+               adev->gfx.config.mem_row_size_in_kb = 4;
+       adev->gfx.config.shader_engine_tile_size = 32;
+       adev->gfx.config.num_gpus = 1;
+       adev->gfx.config.multi_gpu_tile_size = 64;
+
+       gb_addr_config &= ~ROW_SIZE_MASK;
+       switch (adev->gfx.config.mem_row_size_in_kb) {
+       case 1:
+       default:
+               gb_addr_config |= ROW_SIZE(0);
+               break;
+       case 2:
+               gb_addr_config |= ROW_SIZE(1);
+               break;
+       case 4:
+               gb_addr_config |= ROW_SIZE(2);
+               break;
+       }
+       adev->gfx.config.gb_addr_config = gb_addr_config;
+
+       WREG32(GB_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMIF_ADDR_CALC, gb_addr_config);
+       WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+#if 0
+       if (adev->has_uvd) {
+               WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+               WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+               WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+       }
+#endif
+       gfx_v6_0_tiling_mode_table_init(adev);
+
+       gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
+                   adev->gfx.config.max_sh_per_se,
+                   adev->gfx.config.max_backends_per_se);
+
+       gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
+                    adev->gfx.config.max_sh_per_se,
+                    adev->gfx.config.max_cu_per_sh);
+
+       gfx_v6_0_get_cu_info(adev);
+
+       WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+                                    ROQ_IB2_START(0x2b)));
+       WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+       sx_debug_1 = RREG32(SX_DEBUG_1);
+       WREG32(SX_DEBUG_1, sx_debug_1);
+
+       WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+       WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) |
+                                SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) |
+                                SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) |
+                                SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size)));
+
+       WREG32(VGT_NUM_INSTANCES, 1);
+       WREG32(CP_PERFMON_CNTL, 0);
+       WREG32(SQ_CONFIG, 0);
+       WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+                                         FORCE_EOV_MAX_REZ_CNT(255)));
+
+       WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+              AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+       WREG32(VGT_GS_VERTEX_REUSE, 16);
+       WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+       WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+       WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+       WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+       WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+       WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+       WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+       WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+       WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+       hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+       WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+       WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+       udelay(50);
+}
+
+
+static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       adev->gfx.scratch.num_reg = 7;
+       adev->gfx.scratch.reg_base = SCRATCH_REG0;
+       for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+               adev->gfx.scratch.free[i] = true;
+               adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
+       }
+}
+
+static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t scratch;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       r = amdgpu_gfx_scratch_get(adev, &scratch);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+               return r;
+       }
+       WREG32(scratch, 0xCAFEDEAD);
+
+       r = amdgpu_ring_alloc(ring, 3);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
+               amdgpu_gfx_scratch_free(adev, scratch);
+               return r;
+       }
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(scratch);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+                         ring->idx, scratch, tmp);
+               r = -EINVAL;
+       }
+       amdgpu_gfx_scratch_free(adev, scratch);
+       return r;
+}
+
+static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       /* flush hdp cache */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, HDP_DEBUG0);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0x1);
+}
+
+static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+                                    u64 seq, unsigned flags)
+{
+       bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+       bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+       /* flush read cache over gart */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);
+       amdgpu_ring_write(ring, 0xFFFFFFFF);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 10); /* poll interval */
+       /* EVENT_WRITE_EOP - flush caches, send int */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+       amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+                               DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+       amdgpu_ring_write(ring, lower_32_bits(seq));
+       amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib,
+                                 unsigned vm_id, bool ctx_switch)
+{
+       u32 header, control = 0;
+
+       /* insert SWITCH_BUFFER packet before first IB in the ring frame */
+       if (ctx_switch) {
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+       }
+
+       if (ib->flags & AMDGPU_IB_FLAG_CE)
+               header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+       else
+               header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+       control |= ib->length_dw | (vm_id << 24);
+
+       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                         (2 << 0) |
+#endif
+                         (ib->gpu_addr & 0xFFFFFFFC));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+       amdgpu_ring_write(ring, control);
+}
+
+/**
+ * gfx_v6_0_ring_test_ib - basic ring IB test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Allocate an IB and execute it on the gfx ring (SI).
+ * Provides a basic gfx ring test to verify that IBs are working.
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ib ib;
+       struct fence *f = NULL;
+       uint32_t scratch;
+       uint32_t tmp = 0;
+       long r;
+
+       r = amdgpu_gfx_scratch_get(adev, &scratch);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+               return r;
+       }
+       WREG32(scratch, 0xCAFEDEAD);
+       memset(&ib, 0, sizeof(ib));
+       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+               goto err1;
+       }
+       ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+       ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
+       ib.ptr[2] = 0xDEADBEEF;
+       ib.length_dw = 3;
+
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+       if (r)
+               goto err2;
+
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out\n");
+               r = -ETIMEDOUT;
+               goto err2;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+               goto err2;
+       }
+       tmp = RREG32(scratch);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
+       } else {
+               DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+                         scratch, tmp);
+               r = -EINVAL;
+       }
+
+err2:
+       amdgpu_ib_free(adev, &ib, NULL);
+       fence_put(f);
+err1:
+       amdgpu_gfx_scratch_free(adev, scratch);
+       return r;
+}
+
+static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+{
+       int i;
+       if (enable)
+               WREG32(CP_ME_CNTL, 0);
+       else {
+               WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+               WREG32(SCRATCH_UMSK, 0);
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+                       adev->gfx.gfx_ring[i].ready = false;
+               for (i = 0; i < adev->gfx.num_compute_rings; i++)
+                       adev->gfx.compute_ring[i].ready = false;
+       }
+       udelay(50);
+}
+
+static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+{
+       unsigned i;
+       const struct gfx_firmware_header_v1_0 *pfp_hdr;
+       const struct gfx_firmware_header_v1_0 *ce_hdr;
+       const struct gfx_firmware_header_v1_0 *me_hdr;
+       const __le32 *fw_data;
+       u32 fw_size;
+
+       if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+               return -EINVAL;
+
+       gfx_v6_0_cp_gfx_enable(adev, false);
+       pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+       ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+       me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+
+       amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+       amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+       amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+
+       /* PFP */
+       fw_data = (const __le32 *)
+               (adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+
+       /* CE */
+       fw_data = (const __le32 *)
+               (adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+       WREG32(CP_CE_UCODE_ADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+       WREG32(CP_CE_UCODE_ADDR, 0);
+
+       /* ME */
+       fw_data = (const __be32 *)
+               (adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+       WREG32(CP_ME_RAM_WADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+       WREG32(CP_ME_RAM_WADDR, 0);
+
+
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+       WREG32(CP_CE_UCODE_ADDR, 0);
+       WREG32(CP_ME_RAM_WADDR, 0);
+       WREG32(CP_ME_RAM_RADDR, 0);
+       return 0;
+}
+
+static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev)
+{
+       const struct cs_section_def *sect = NULL;
+       const struct cs_extent_def *ext = NULL;
+       struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
+       int r, i;
+
+       r = amdgpu_ring_alloc(ring, 7 + 4);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       amdgpu_ring_write(ring, 0x1);
+       amdgpu_ring_write(ring, 0x0);
+       amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1);
+       amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+       amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+       amdgpu_ring_write(ring, 0xc000);
+       amdgpu_ring_write(ring, 0xe000);
+       amdgpu_ring_commit(ring);
+
+       gfx_v6_0_cp_gfx_enable(adev, true);
+
+       r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+       for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+               for (ext = sect->section; ext->extent != NULL; ++ext) {
+                       if (sect->id == SECT_CONTEXT) {
+                               amdgpu_ring_write(ring,
+                                                 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+                               amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+                               for (i = 0; i < ext->reg_count; i++)
+                                       amdgpu_ring_write(ring, ext->extent[i]);
+                       }
+               }
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       amdgpu_ring_write(ring, 0x00000316);
+       amdgpu_ring_write(ring, 0x0000000e);
+       amdgpu_ring_write(ring, 0x00000010);
+
+       amdgpu_ring_commit(ring);
+
+       return 0;
+}
+
+static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 tmp;
+       u32 rb_bufsz;
+       int r;
+       u64 rptr_addr;
+
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
+       WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+       /* Set the write pointer delay */
+       WREG32(CP_RB_WPTR_DELAY, 0);
+
+       WREG32(CP_DEBUG, 0);
+       WREG32(SCRATCH_ADDR, 0);
+
+       /* ring 0 - compute and gfx */
+       /* Set ring buffer size */
+       ring = &adev->gfx.gfx_ring[0];
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+
+#ifdef __BIG_ENDIAN
+       tmp |= BUF_SWAP_32BIT;
+#endif
+       WREG32(CP_RB0_CNTL, tmp);
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+       ring->wptr = 0;
+       WREG32(CP_RB0_WPTR, ring->wptr);
+
+       /* set the wb address whether it's enabled or not */
+       rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+       WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+       WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+       WREG32(SCRATCH_UMSK, 0);
+
+       mdelay(1);
+       WREG32(CP_RB0_CNTL, tmp);
+
+       WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+       /* start the rings */
+       gfx_v6_0_cp_gfx_start(adev);
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               return r;
+       }
+
+       return 0;
+}
+
+static u32 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       return ring->adev->wb.wb[ring->rptr_offs];
+}
+
+static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->gfx.gfx_ring[0])
+               return RREG32(CP_RB0_WPTR);
+       else if (ring == &adev->gfx.compute_ring[0])
+               return RREG32(CP_RB1_WPTR);
+       else if (ring == &adev->gfx.compute_ring[1])
+               return RREG32(CP_RB2_WPTR);
+       else
+               BUG();
+}
+
+static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       WREG32(CP_RB0_WPTR, ring->wptr);
+       (void)RREG32(CP_RB0_WPTR);
+}
+
+static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->gfx.compute_ring[0]) {
+               WREG32(CP_RB1_WPTR, ring->wptr);
+               (void)RREG32(CP_RB1_WPTR);
+       } else if (ring == &adev->gfx.compute_ring[1]) {
+               WREG32(CP_RB2_WPTR, ring->wptr);
+               (void)RREG32(CP_RB2_WPTR);
+       } else {
+               BUG();
+       }
+
+}
+
+static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 tmp;
+       u32 rb_bufsz;
+       int r;
+       u64 rptr_addr;
+
+       /* ring1  - compute only */
+       /* Set ring buffer size */
+
+       ring = &adev->gfx.compute_ring[0];
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+       tmp |= BUF_SWAP_32BIT;
+#endif
+       WREG32(CP_RB1_CNTL, tmp);
+
+       WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+       ring->wptr = 0;
+       WREG32(CP_RB1_WPTR, ring->wptr);
+
+       rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+       WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+       WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+       mdelay(1);
+       WREG32(CP_RB1_CNTL, tmp);
+       WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+       ring = &adev->gfx.compute_ring[1];
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+       tmp |= BUF_SWAP_32BIT;
+#endif
+       WREG32(CP_RB2_CNTL, tmp);
+
+       WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+       ring->wptr = 0;
+       WREG32(CP_RB2_WPTR, ring->wptr);
+       rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+       WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
+       WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+       mdelay(1);
+       WREG32(CP_RB2_CNTL, tmp);
+       WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+       adev->gfx.compute_ring[0].ready = true;
+       adev->gfx.compute_ring[1].ready = true;
+
+       r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]);
+       if (r) {
+               adev->gfx.compute_ring[0].ready = false;
+               return r;
+       }
+
+       r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]);
+       if (r) {
+               adev->gfx.compute_ring[1].ready = false;
+               return r;
+       }
+
+       return 0;
+}
+
+static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable)
+{
+       gfx_v6_0_cp_gfx_enable(adev, enable);
+}
+
+static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
+{
+       return gfx_v6_0_cp_gfx_load_microcode(adev);
+}
+
+static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+                                              bool enable)
+{      
+       u32 tmp = RREG32(CP_INT_CNTL_RING0);
+       u32 mask;
+       int i;
+
+       if (enable)
+               tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       else
+               tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       WREG32(CP_INT_CNTL_RING0, tmp);
+
+       if (!enable) {
+               /* read a gfx register */
+               tmp = RREG32(DB_DEPTH_INFO);
+
+               mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+                               break;
+                       udelay(1);
+               }
+       }
+}
+
+static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
+{
+       int r;
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+       r = gfx_v6_0_cp_load_microcode(adev);
+       if (r)
+               return r;
+
+       r = gfx_v6_0_cp_gfx_resume(adev);
+       if (r)
+               return r;
+       r = gfx_v6_0_cp_compute_resume(adev);
+       if (r)
+               return r;
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+       return 0;
+}
+
+static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+                                WAIT_REG_MEM_FUNCTION(3) | /* equal */
+                                WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, 0xffffffff);
+       amdgpu_ring_write(ring, 4); /* poll interval */
+
+       if (usepfp) {
+               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+       }
+}
+
+static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                       unsigned vm_id, uint64_t pd_addr)
+{
+       int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+
+       /* write new base address */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+                                WRITE_DATA_DST_SEL(0)));
+       if (vm_id < 8) {
+               amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+       } else {
+               amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+       }
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       /* bits 0-15 are the VM contexts0-15 */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 1 << vm_id);
+
+       /* wait for the invalidate to complete */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+                                WAIT_REG_MEM_ENGINE(0))); /* me */
+       amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0); /* ref */
+       amdgpu_ring_write(ring, 0); /* mask */
+       amdgpu_ring_write(ring, 0x20); /* poll interval */
+
+       if (usepfp) {
+               /* sync PFP to ME, otherwise we might get invalid PFP reads */
+               amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               amdgpu_ring_write(ring, 0x0);
+
+               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+       }
+}
+
+
+static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->gfx.rlc.save_restore_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+               amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
+               adev->gfx.rlc.save_restore_obj = NULL;
+       }
+
+       if (adev->gfx.rlc.clear_state_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+               amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+               adev->gfx.rlc.clear_state_obj = NULL;
+       }
+
+       if (adev->gfx.rlc.cp_table_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+               amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+               adev->gfx.rlc.cp_table_obj = NULL;
+       }
+}
+
+static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+{
+       const u32 *src_ptr;
+       volatile u32 *dst_ptr;
+       u32 dws, i;
+       u64 reg_list_mc_addr;
+       const struct cs_section_def *cs_data;
+       int r;
+
+       adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list;
+       adev->gfx.rlc.reg_list_size =
+                       (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+
+       adev->gfx.rlc.cs_data = si_cs_data;
+       src_ptr = adev->gfx.rlc.reg_list;
+       dws = adev->gfx.rlc.reg_list_size;
+       cs_data = adev->gfx.rlc.cs_data;
+
+       if (src_ptr) {
+               /* save restore block */
+               if (adev->gfx.rlc.save_restore_obj == NULL) {
+
+                       r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, NULL,
+                                            &adev->gfx.rlc.save_restore_obj);
+
+                       if (r) {
+                               dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+                               return r;
+                       }
+               }
+
+               r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+               if (unlikely(r != 0)) {
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+               r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
+                                 &adev->gfx.rlc.save_restore_gpu_addr);
+               if (r) {
+                       amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+                       dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+
+               r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+               /* write the sr buffer */
+               dst_ptr = adev->gfx.rlc.sr_ptr;
+               for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+                       dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+               amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+       }
+
+       if (cs_data) {
+               /* clear state block */
+               adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
+               dws = adev->gfx.rlc.clear_state_size + (256 / 4);
+
+               if (adev->gfx.rlc.clear_state_obj == NULL) {
+                       r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, NULL,
+                                            &adev->gfx.rlc.clear_state_obj);
+
+                       if (r) {
+                               dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+                               gfx_v6_0_rlc_fini(adev);
+                               return r;
+                       }
+               }
+               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+               if (unlikely(r != 0)) {
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+               r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+                                 &adev->gfx.rlc.clear_state_gpu_addr);
+               if (r) {
+                       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+                       dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+
+               r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+               /* set up the cs buffer */
+               dst_ptr = adev->gfx.rlc.cs_ptr;
+               reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
+               dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+               dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+               dst_ptr[2] = cpu_to_le32(adev->gfx.rlc.clear_state_size);
+               gfx_v6_0_get_csb_buffer(adev, &dst_ptr[(256/4)]);
+               amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+       }
+
+       return 0;
+}
+
+static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+{
+       u32 tmp;
+
+       tmp = RREG32(RLC_LB_CNTL);
+       if (enable)
+               tmp |= LOAD_BALANCE_ENABLE;
+       else
+               tmp &= ~LOAD_BALANCE_ENABLE;
+       WREG32(RLC_LB_CNTL, tmp);
+
+       if (!enable) {
+               gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               WREG32(SPI_LB_CU_MASK, 0x00ff);
+       }
+
+}
+
+static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+                       break;
+               udelay(1);
+       }
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+                       break;
+               udelay(1);
+       }
+}
+
+static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
+{
+       u32 tmp;
+
+       tmp = RREG32(RLC_CNTL);
+       if (tmp != rlc)
+               WREG32(RLC_CNTL, rlc);
+}
+
+static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(RLC_CNTL);
+
+       if (data & RLC_ENABLE) {
+               data &= ~RLC_ENABLE;
+               WREG32(RLC_CNTL, data);
+
+               gfx_v6_0_wait_for_rlc_serdes(adev);
+       }
+
+       return orig;
+}
+
+static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
+{
+       WREG32(RLC_CNTL, 0);
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+       gfx_v6_0_wait_for_rlc_serdes(adev);
+}
+
+static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
+{
+       WREG32(RLC_CNTL, RLC_ENABLE);
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+       udelay(50);
+}
+
+static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(GRBM_SOFT_RESET);
+
+       tmp |= SOFT_RESET_RLC;
+       WREG32(GRBM_SOFT_RESET, tmp);
+       udelay(50);
+       tmp &= ~SOFT_RESET_RLC;
+       WREG32(GRBM_SOFT_RESET, tmp);
+       udelay(50);
+}
+
+static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       /* Enable LBPW only for DDR3 */
+       tmp = RREG32(MC_SEQ_MISC0);
+       if ((tmp & 0xF0000000) == 0xB0000000)
+               return true;
+       return false;
+}
+static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
+{
+}
+
+static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
+{
+       u32 i;
+       const struct rlc_firmware_header_v1_0 *hdr;
+       const __le32 *fw_data;
+       u32 fw_size;
+
+
+       if (!adev->gfx.rlc_fw)
+               return -EINVAL;
+
+       gfx_v6_0_rlc_stop(adev);
+       gfx_v6_0_rlc_reset(adev);
+       gfx_v6_0_init_pg(adev);
+       gfx_v6_0_init_cg(adev);
+
+       WREG32(RLC_RL_BASE, 0);
+       WREG32(RLC_RL_SIZE, 0);
+       WREG32(RLC_LB_CNTL, 0);
+       WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+       WREG32(RLC_LB_CNTR_INIT, 0);
+       WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+
+       WREG32(RLC_MC_CNTL, 0);
+       WREG32(RLC_UCODE_CNTL, 0);
+
+       hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+       fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+       fw_data = (const __le32 *)
+               (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+       amdgpu_ucode_print_rlc_hdr(&hdr->header);
+
+       for (i = 0; i < fw_size; i++) {
+               WREG32(RLC_UCODE_ADDR, i);
+               WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+       }
+       WREG32(RLC_UCODE_ADDR, 0);
+
+       gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
+       gfx_v6_0_rlc_start(adev);
+
+       return 0;
+}
+
+static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
+{
+       u32 data, orig, tmp;
+
+       orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+               gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+               WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+
+               tmp = gfx_v6_0_halt_rlc(adev);
+
+               WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+               WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+               WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+
+               gfx_v6_0_wait_for_rlc_serdes(adev);
+               gfx_v6_0_update_rlc(adev, tmp);
+
+               WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+
+               data |= CGCG_EN | CGLS_EN;
+       } else {
+               gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+               RREG32(CB_CGTT_SCLK_CTRL);
+               RREG32(CB_CGTT_SCLK_CTRL);
+               RREG32(CB_CGTT_SCLK_CTRL);
+               RREG32(CB_CGTT_SCLK_CTRL);
+
+               data &= ~(CGCG_EN | CGLS_EN);
+       }
+
+       if (orig != data)
+               WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+
+       u32 data, orig, tmp = 0;
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+               orig = data = RREG32(CGTS_SM_CTRL_REG);
+               data = 0x96940200;
+               if (orig != data)
+                       WREG32(CGTS_SM_CTRL_REG, data);
+
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+                       orig = data = RREG32(CP_MEM_SLP_CNTL);
+                       data |= CP_MEM_LS_EN;
+                       if (orig != data)
+                               WREG32(CP_MEM_SLP_CNTL, data);
+               }
+
+               orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+               data &= 0xffffffc0;
+               if (orig != data)
+                       WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+               tmp = gfx_v6_0_halt_rlc(adev);
+
+               WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+               WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+               WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+
+               gfx_v6_0_update_rlc(adev, tmp);
+       } else {
+               orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+               data |= 0x00000003;
+               if (orig != data)
+                       WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+               data = RREG32(CP_MEM_SLP_CNTL);
+               if (data & CP_MEM_LS_EN) {
+                       data &= ~CP_MEM_LS_EN;
+                       WREG32(CP_MEM_SLP_CNTL, data);
+               }
+               orig = data = RREG32(CGTS_SM_CTRL_REG);
+               data |= LS_OVERRIDE | OVERRIDE;
+               if (orig != data)
+                       WREG32(CGTS_SM_CTRL_REG, data);
+
+               tmp = gfx_v6_0_halt_rlc(adev);
+
+               WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+               WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+               WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+
+               gfx_v6_0_update_rlc(adev, tmp);
+       }
+}
+/*
+static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
+                              bool enable)
+{
+       gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+       if (enable) {
+               gfx_v6_0_enable_mgcg(adev, true);
+               gfx_v6_0_enable_cgcg(adev, true);
+       } else {
+               gfx_v6_0_enable_cgcg(adev, false);
+               gfx_v6_0_enable_mgcg(adev, false);
+       }
+       gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+}
+*/
+static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
+                                               bool enable)
+{
+}
+
+static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
+                                               bool enable)
+{
+}
+
+static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(RLC_PG_CNTL);
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
+               data &= ~0x8000;
+       else
+               data |= 0x8000;
+       if (orig != data)
+               WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+{
+}
+/*
+static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
+{
+       const __le32 *fw_data;
+       volatile u32 *dst_ptr;
+       int me, i, max_me = 4;
+       u32 bo_offset = 0;
+       u32 table_offset, table_size;
+
+       if (adev->asic_type == CHIP_KAVERI)
+               max_me = 5;
+
+       if (adev->gfx.rlc.cp_table_ptr == NULL)
+               return;
+
+       dst_ptr = adev->gfx.rlc.cp_table_ptr;
+       for (me = 0; me < max_me; me++) {
+               if (me == 0) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.ce_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 1) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.pfp_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 2) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.me_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 3) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec2_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               }
+
+               for (i = 0; i < table_size; i ++) {
+                       dst_ptr[bo_offset + i] =
+                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+               }
+
+               bo_offset += table_size;
+       }
+}
+*/
+static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+                                    bool enable)
+{
+
+       u32 tmp;
+
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+               tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
+               WREG32(RLC_TTOP_D, tmp);
+
+               tmp = RREG32(RLC_PG_CNTL);
+               tmp |= GFX_PG_ENABLE;
+               WREG32(RLC_PG_CNTL, tmp);
+
+               tmp = RREG32(RLC_AUTO_PG_CTRL);
+               tmp |= AUTO_PG_EN;
+               WREG32(RLC_AUTO_PG_CTRL, tmp);
+       } else {
+               tmp = RREG32(RLC_AUTO_PG_CTRL);
+               tmp &= ~AUTO_PG_EN;
+               WREG32(RLC_AUTO_PG_CTRL, tmp);
+
+               tmp = RREG32(DB_RENDER_CONTROL);
+       }
+}
+
+static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
+                                        u32 se, u32 sh)
+{
+
+       u32 mask = 0, tmp, tmp1;
+       int i;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
+       tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+       tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       tmp &= 0xffff0000;
+
+       tmp |= tmp1;
+       tmp >>= 16;
+
+       for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
+               mask <<= 1;
+               mask |= 1;
+       }
+
+       return (~tmp) & mask;
+}
+
+static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
+{
+       u32 i, j, k, active_cu_number = 0;
+
+       u32 mask, counter, cu_bitmap;
+       u32 tmp = 0;
+
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       mask = 1;
+                       cu_bitmap = 0;
+                       counter  = 0;
+                       for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+                               if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
+                                       if (counter < 2)
+                                               cu_bitmap |= mask;
+                                       counter++;
+                               }
+                               mask <<= 1;
+                       }
+
+                       active_cu_number += counter;
+                       tmp |= (cu_bitmap << (i * 16 + j * 8));
+               }
+       }
+
+       WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+       tmp = RREG32(RLC_MAX_PG_CU);
+       tmp &= ~MAX_PU_CU_MASK;
+       tmp |= MAX_PU_CU(active_cu_number);
+       WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
+                                           bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(RLC_PG_CNTL);
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
+               data |= STATIC_PER_CU_PG_ENABLE;
+       else
+               data &= ~STATIC_PER_CU_PG_ENABLE;
+       if (orig != data)
+               WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
+                                            bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(RLC_PG_CNTL);
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
+               data |= DYN_PER_CU_PG_ENABLE;
+       else
+               data &= ~DYN_PER_CU_PG_ENABLE;
+       if (orig != data)
+               WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+
+       tmp = RREG32(RLC_PG_CNTL);
+       tmp |= GFX_PG_SRC;
+       WREG32(RLC_PG_CNTL, tmp);
+
+       WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+       tmp = RREG32(RLC_AUTO_PG_CTRL);
+
+       tmp &= ~GRBM_REG_SGIT_MASK;
+       tmp |= GRBM_REG_SGIT(0x700);
+       tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
+       WREG32(RLC_AUTO_PG_CTRL, tmp);
+}
+
+static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
+{
+       gfx_v6_0_enable_gfx_cgpg(adev, enable);
+       gfx_v6_0_enable_gfx_static_mgpg(adev, enable);
+       gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable);
+}
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
+{
+       u32 count = 0;
+       const struct cs_section_def *sect = NULL;
+       const struct cs_extent_def *ext = NULL;
+
+       if (adev->gfx.rlc.cs_data == NULL)
+               return 0;
+
+       /* begin clear state */
+       count += 2;
+       /* context control state */
+       count += 3;
+
+       for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+               for (ext = sect->section; ext->extent != NULL; ++ext) {
+                       if (sect->id == SECT_CONTEXT)
+                               count += 2 + ext->reg_count;
+                       else
+                               return 0;
+               }
+       }
+       /* pa_sc_raster_config */
+       count += 3;
+       /* end clear state */
+       count += 2;
+       /* clear state */
+       count += 2;
+
+       return count;
+}
+
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
+                                   volatile u32 *buffer)
+{
+       u32 count = 0, i;
+       const struct cs_section_def *sect = NULL;
+       const struct cs_extent_def *ext = NULL;
+
+       if (adev->gfx.rlc.cs_data == NULL)
+               return;
+       if (buffer == NULL)
+               return;
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       buffer[count++] = cpu_to_le32(0x80000000);
+       buffer[count++] = cpu_to_le32(0x80000000);
+
+       for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+               for (ext = sect->section; ext->extent != NULL; ++ext) {
+                       if (sect->id == SECT_CONTEXT) {
+                               buffer[count++] =
+                                       cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+                               buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
+                               for (i = 0; i < ext->reg_count; i++)
+                                       buffer[count++] = cpu_to_le32(ext->extent[i]);
+                       } else {
+                               return;
+                       }
+               }
+       }
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+               buffer[count++] = cpu_to_le32(0x2a00126a);
+               break;
+       case CHIP_VERDE:
+               buffer[count++] = cpu_to_le32(0x0000124a);
+               break;
+       case CHIP_OLAND:
+               buffer[count++] = cpu_to_le32(0x00000082);
+               break;
+       case CHIP_HAINAN:
+               buffer[count++] = cpu_to_le32(0x00000000);
+               break;
+       default:
+               buffer[count++] = cpu_to_le32(0x00000000);
+               break;
+       }
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+       buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
+{
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+                             AMD_PG_SUPPORT_GFX_SMG |
+                             AMD_PG_SUPPORT_GFX_DMG |
+                             AMD_PG_SUPPORT_CP |
+                             AMD_PG_SUPPORT_GDS |
+                             AMD_PG_SUPPORT_RLC_SMU_HS)) {
+               gfx_v6_0_enable_sclk_slowdown_on_pu(adev, true);
+               gfx_v6_0_enable_sclk_slowdown_on_pd(adev, true);
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+                       gfx_v6_0_init_gfx_cgpg(adev);
+                       gfx_v6_0_enable_cp_pg(adev, true);
+                       gfx_v6_0_enable_gds_pg(adev, true);
+               } else {
+                       WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+                       WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+               }
+               gfx_v6_0_init_ao_cu_mask(adev);
+               gfx_v6_0_update_gfx_pg(adev, true);
+       } else {
+
+               WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+               WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+       }
+}
+
+static void gfx_v6_0_fini_pg(struct amdgpu_device *adev)
+{
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+                             AMD_PG_SUPPORT_GFX_SMG |
+                             AMD_PG_SUPPORT_GFX_DMG |
+                             AMD_PG_SUPPORT_CP |
+                             AMD_PG_SUPPORT_GDS |
+                             AMD_PG_SUPPORT_RLC_SMU_HS)) {
+               gfx_v6_0_update_gfx_pg(adev, false);
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+                       gfx_v6_0_enable_cp_pg(adev, false);
+                       gfx_v6_0_enable_gds_pg(adev, false);
+               }
+       }
+}
+
+static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+       uint64_t clock;
+
+       mutex_lock(&adev->gfx.gpu_clock_mutex);
+       WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+       clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+               ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+       mutex_unlock(&adev->gfx.gpu_clock_mutex);
+       return clock;
+}
+
+static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       amdgpu_ring_write(ring, 0x80000000);
+       amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               6; /* gfx_v6_0_ring_emit_ib */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               5 + /* gfx_v6_0_ring_emit_hdp_flush */
+               5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+               14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+               7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+               17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+               3; /* gfx_v6_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               5 + /* gfx_v6_0_ring_emit_hdp_flush */
+               5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+               7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+               17 + /* gfx_v6_0_ring_emit_vm_flush */
+               14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
+       .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
+       .select_se_sh = &gfx_v6_0_select_se_sh,
+};
+
+static int gfx_v6_0_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
+       adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
+       adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+       gfx_v6_0_set_ring_funcs(adev);
+       gfx_v6_0_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int gfx_v6_0_sw_init(void *handle)
+{
+       struct amdgpu_ring *ring;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i, r;
+
+       r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+       if (r)
+               return r;
+
+       gfx_v6_0_scratch_init(adev);
+
+       r = gfx_v6_0_init_microcode(adev);
+       if (r) {
+               DRM_ERROR("Failed to load gfx firmware!\n");
+               return r;
+       }
+
+       r = gfx_v6_0_rlc_init(adev);
+       if (r) {
+               DRM_ERROR("Failed to init rlc BOs!\n");
+               return r;
+       }
+
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+               ring = &adev->gfx.gfx_ring[i];
+               ring->ring_obj = NULL;
+               sprintf(ring->name, "gfx");
+               r = amdgpu_ring_init(adev, ring, 1024,
+                                    0x80000000, 0xf,
+                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
+                                    AMDGPU_RING_TYPE_GFX);
+               if (r)
+                       return r;
+       }
+
+       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+               unsigned irq_type;
+
+               if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
+                       DRM_ERROR("Too many (%d) compute rings!\n", i);
+                       break;
+               }
+               ring = &adev->gfx.compute_ring[i];
+               ring->ring_obj = NULL;
+               ring->use_doorbell = false;
+               ring->doorbell_index = 0;
+               ring->me = 1;
+               ring->pipe = i;
+               ring->queue = i;
+               sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+               irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
+               r = amdgpu_ring_init(adev, ring, 1024,
+                                    0x80000000, 0xf,
+                                    &adev->gfx.eop_irq, irq_type,
+                                    AMDGPU_RING_TYPE_COMPUTE);
+               if (r)
+                       return r;
+       }
+
+       return r;
+}
+
+static int gfx_v6_0_sw_fini(void *handle)
+{
+       int i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
+       amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
+       amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+               amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
+       for (i = 0; i < adev->gfx.num_compute_rings; i++)
+               amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+       gfx_v6_0_rlc_fini(adev);
+
+       return 0;
+}
+
+static int gfx_v6_0_hw_init(void *handle)
+{
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       gfx_v6_0_gpu_init(adev);
+
+       r = gfx_v6_0_rlc_resume(adev);
+       if (r)
+               return r;
+
+       r = gfx_v6_0_cp_resume(adev);
+       if (r)
+               return r;
+
+       adev->gfx.ce_ram_size = 0x8000;
+
+       return r;
+}
+
+static int gfx_v6_0_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       gfx_v6_0_cp_enable(adev, false);
+       gfx_v6_0_rlc_stop(adev);
+       gfx_v6_0_fini_pg(adev);
+
+       return 0;
+}
+
+static int gfx_v6_0_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return gfx_v6_0_hw_fini(adev);
+}
+
+static int gfx_v6_0_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return gfx_v6_0_hw_init(adev);
+}
+
+static bool gfx_v6_0_is_idle(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+               return false;
+       else
+               return true;
+}
+
+static int gfx_v6_0_wait_for_idle(void *handle)
+{
+       unsigned i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (gfx_v6_0_is_idle(handle))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static int gfx_v6_0_soft_reset(void *handle)
+{
+       return 0;
+}
+
+static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+                                                enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+}
+
+static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+                                                    int ring,
+                                                    enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+       switch (state){
+       case AMDGPU_IRQ_STATE_DISABLE:
+               if (ring == 0) {
+                       cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+                       cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+                       WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+                       break;
+               } else {
+                       cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+                       cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+                       WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+                       break;
+
+               }
+       case AMDGPU_IRQ_STATE_ENABLE:
+               if (ring == 0) {
+                       cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+                       cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+                       WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+                       break;
+               } else {
+                       cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+                       cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+                       WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+                       break;
+
+               }
+
+       default:
+               BUG();
+               break;
+
+       }
+}
+
+static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
+                                            struct amdgpu_irq_src *src,
+                                            unsigned type,
+                                            enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
+                                             struct amdgpu_irq_src *src,
+                                             unsigned type,
+                                             enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+                                           struct amdgpu_irq_src *src,
+                                           unsigned type,
+                                           enum amdgpu_interrupt_state state)
+{
+       switch (type) {
+       case AMDGPU_CP_IRQ_GFX_EOP:
+               gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+               gfx_v6_0_set_compute_eop_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+               gfx_v6_0_set_compute_eop_interrupt_state(adev, 1, state);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
+                           struct amdgpu_irq_src *source,
+                           struct amdgpu_iv_entry *entry)
+{
+       switch (entry->ring_id) {
+       case 0:
+               amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+               break;
+       case 1:
+       case 2:
+               amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
+                                struct amdgpu_irq_src *source,
+                                struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal register access in command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
+                                 struct amdgpu_irq_src *source,
+                                 struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal instruction in command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int gfx_v6_0_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       bool gate = false;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (state == AMD_CG_STATE_GATE)
+               gate = true;
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+       if (gate) {
+               gfx_v6_0_enable_mgcg(adev, true);
+               gfx_v6_0_enable_cgcg(adev, true);
+       } else {
+               gfx_v6_0_enable_cgcg(adev, false);
+               gfx_v6_0_enable_mgcg(adev, false);
+       }
+       gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+       return 0;
+}
+
+static int gfx_v6_0_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       bool gate = false;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (state == AMD_PG_STATE_GATE)
+               gate = true;
+
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+                             AMD_PG_SUPPORT_GFX_SMG |
+                             AMD_PG_SUPPORT_GFX_DMG |
+                             AMD_PG_SUPPORT_CP |
+                             AMD_PG_SUPPORT_GDS |
+                             AMD_PG_SUPPORT_RLC_SMU_HS)) {
+               gfx_v6_0_update_gfx_pg(adev, gate);
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+                       gfx_v6_0_enable_cp_pg(adev, gate);
+                       gfx_v6_0_enable_gds_pg(adev, gate);
+               }
+       }
+
+       return 0;
+}
+
+const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+       .name = "gfx_v6_0",
+       .early_init = gfx_v6_0_early_init,
+       .late_init = NULL,
+       .sw_init = gfx_v6_0_sw_init,
+       .sw_fini = gfx_v6_0_sw_fini,
+       .hw_init = gfx_v6_0_hw_init,
+       .hw_fini = gfx_v6_0_hw_fini,
+       .suspend = gfx_v6_0_suspend,
+       .resume = gfx_v6_0_resume,
+       .is_idle = gfx_v6_0_is_idle,
+       .wait_for_idle = gfx_v6_0_wait_for_idle,
+       .soft_reset = gfx_v6_0_soft_reset,
+       .set_clockgating_state = gfx_v6_0_set_clockgating_state,
+       .set_powergating_state = gfx_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+       .get_rptr = gfx_v6_0_ring_get_rptr,
+       .get_wptr = gfx_v6_0_ring_get_wptr,
+       .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
+       .parse_cs = NULL,
+       .emit_ib = gfx_v6_0_ring_emit_ib,
+       .emit_fence = gfx_v6_0_ring_emit_fence,
+       .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+       .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+       .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+       .test_ring = gfx_v6_0_ring_test_ring,
+       .test_ib = gfx_v6_0_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
+       .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+       .get_rptr = gfx_v6_0_ring_get_rptr,
+       .get_wptr = gfx_v6_0_ring_get_wptr,
+       .set_wptr = gfx_v6_0_ring_set_wptr_compute,
+       .parse_cs = NULL,
+       .emit_ib = gfx_v6_0_ring_emit_ib,
+       .emit_fence = gfx_v6_0_ring_emit_fence,
+       .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+       .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+       .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+       .test_ring = gfx_v6_0_ring_test_ring,
+       .test_ib = gfx_v6_0_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
+};
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+               adev->gfx.gfx_ring[i].funcs = &gfx_v6_0_ring_funcs_gfx;
+       for (i = 0; i < adev->gfx.num_compute_rings; i++)
+               adev->gfx.compute_ring[i].funcs = &gfx_v6_0_ring_funcs_compute;
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs = {
+       .set = gfx_v6_0_set_eop_interrupt_state,
+       .process = gfx_v6_0_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs = {
+       .set = gfx_v6_0_set_priv_reg_fault_state,
+       .process = gfx_v6_0_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs = {
+       .set = gfx_v6_0_set_priv_inst_fault_state,
+       .process = gfx_v6_0_priv_inst_irq,
+};
+
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+       adev->gfx.eop_irq.funcs = &gfx_v6_0_eop_irq_funcs;
+
+       adev->gfx.priv_reg_irq.num_types = 1;
+       adev->gfx.priv_reg_irq.funcs = &gfx_v6_0_priv_reg_irq_funcs;
+
+       adev->gfx.priv_inst_irq.num_types = 1;
+       adev->gfx.priv_inst_irq.funcs = &gfx_v6_0_priv_inst_irq_funcs;
+}
+
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
+{
+       int i, j, k, counter, active_cu_number = 0;
+       u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+       struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+
+       memset(cu_info, 0, sizeof(*cu_info));
+
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       mask = 1;
+                       ao_bitmap = 0;
+                       counter = 0;
+                       bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+                       cu_info->bitmap[i][j] = bitmap;
+
+                       for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+                               if (bitmap & mask) {
+                                       if (counter < 2)
+                                               ao_bitmap |= mask;
+                                       counter ++;
+                               }
+                               mask <<= 1;
+                       }
+                       active_cu_number += counter;
+                       ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+               }
+       }
+
+       cu_info->number = active_cu_number;
+       cu_info->ao_cu_mask = ao_cu_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
new file mode 100644 (file)
index 0000000..b9657e7
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V6_0_H__
+#define __GFX_V6_0_H__
+
+extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+
+#endif
index d869d058ef24f1324ff861db8318e5a3c12f8081..71116da9e782d52597837b2dfa95a17b2007998d 100644 (file)
@@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
        return (~data) & mask;
 }
 
+static void
+gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+       switch (adev->asic_type) {
+       case CHIP_BONAIRE:
+               *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+                         SE_XSEL(1) | SE_YSEL(1);
+               *rconf1 |= 0x0;
+               break;
+       case CHIP_HAWAII:
+               *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+                         RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
+                         PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
+                         SE_YSEL(3);
+               *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+                          SE_PAIR_YSEL(2);
+               break;
+       case CHIP_KAVERI:
+               *rconf |= RB_MAP_PKR0(2);
+               *rconf1 |= 0x0;
+               break;
+       case CHIP_KABINI:
+       case CHIP_MULLINS:
+               *rconf |= 0x0;
+               *rconf1 |= 0x0;
+               break;
+       default:
+               DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+               break;
+       }
+}
+
+static void
+gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+                                       u32 raster_config, u32 raster_config_1,
+                                       unsigned rb_mask, unsigned num_rb)
+{
+       unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+       unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+       unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+       unsigned rb_per_se = num_rb / num_se;
+       unsigned se_mask[4];
+       unsigned se;
+
+       se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+       se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+       se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+       se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+       WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+       WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+       WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+       if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+                            (!se_mask[2] && !se_mask[3]))) {
+               raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+               if (!se_mask[0] && !se_mask[1]) {
+                       raster_config_1 |=
+                               SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+               } else {
+                       raster_config_1 |=
+                               SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+               }
+       }
+
+       for (se = 0; se < num_se; se++) {
+               unsigned raster_config_se = raster_config;
+               unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+               unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+               int idx = (se / 2) * 2;
+
+               if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+                       raster_config_se &= ~SE_MAP_MASK;
+
+                       if (!se_mask[idx]) {
+                               raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+                       } else {
+                               raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+                       }
+               }
+
+               pkr0_mask &= rb_mask;
+               pkr1_mask &= rb_mask;
+               if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+                       raster_config_se &= ~PKR_MAP_MASK;
+
+                       if (!pkr0_mask) {
+                               raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+                       } else {
+                               raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+                       }
+               }
+
+               if (rb_per_se >= 2) {
+                       unsigned rb0_mask = 1 << (se * rb_per_se);
+                       unsigned rb1_mask = rb0_mask << 1;
+
+                       rb0_mask &= rb_mask;
+                       rb1_mask &= rb_mask;
+                       if (!rb0_mask || !rb1_mask) {
+                               raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+                               if (!rb0_mask) {
+                                       raster_config_se |=
+                                               RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+                               } else {
+                                       raster_config_se |=
+                                               RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+                               }
+                       }
+
+                       if (rb_per_se > 2) {
+                               rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+                               rb1_mask = rb0_mask << 1;
+                               rb0_mask &= rb_mask;
+                               rb1_mask &= rb_mask;
+                               if (!rb0_mask || !rb1_mask) {
+                                       raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+                                       if (!rb0_mask) {
+                                               raster_config_se |=
+                                                       RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+                                       } else {
+                                               raster_config_se |=
+                                                       RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+                                       }
+                               }
+                       }
+               }
+
+               /* GRBM_GFX_INDEX has a different offset on CI+ */
+               gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+               WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+               WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+       }
+
+       /* GRBM_GFX_INDEX has a different offset on CI+ */
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
 /**
  * gfx_v7_0_setup_rb - setup the RBs on the asic
  *
@@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
 {
        int i, j;
        u32 data;
+       u32 raster_config = 0, raster_config_1 = 0;
        u32 active_rbs = 0;
        u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
                                        adev->gfx.config.max_sh_per_se;
+       unsigned num_rb_pipes;
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
                }
        }
        gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-       mutex_unlock(&adev->grbm_idx_mutex);
 
        adev->gfx.config.backend_enable_mask = active_rbs;
        adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+       num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+                            adev->gfx.config.max_shader_engines, 16);
+
+       gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
+
+       if (!adev->gfx.config.backend_enable_mask ||
+                       adev->gfx.config.num_rbs >= num_rb_pipes) {
+               WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+               WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+       } else {
+               gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+                                                       adev->gfx.config.backend_enable_mask,
+                                                       num_rb_pipes);
+       }
+       mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 /**
@@ -2096,6 +2254,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, control);
 }
 
+static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+       uint32_t dw2 = 0;
+
+       dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+       if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+               /* set load_global_config & load_global_uconfig */
+               dw2 |= 0x8001;
+               /* set load_cs_sh_regs */
+               dw2 |= 0x01000000;
+               /* set load_per_context_state & load_gfx_sh_regs */
+               dw2 |= 0x10002;
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       amdgpu_ring_write(ring, dw2);
+       amdgpu_ring_write(ring, 0);
+}
+
 /**
  * gfx_v7_0_ring_test_ib - basic ring IB test
  *
@@ -2443,7 +2620,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
        return 0;
 }
 
-static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        return ring->adev->wb.wb[ring->rptr_offs];
 }
@@ -2463,11 +2640,6 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
        (void)RREG32(mmCP_RB0_WPTR);
 }
 
-static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
-       return ring->adev->wb.wb[ring->rptr_offs];
-}
-
 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
        /* XXX check if swapping is necessary on BE */
@@ -2755,8 +2927,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
        u64 wb_gpu_addr;
        u32 *buf;
        struct bonaire_mqd *mqd;
-
-       gfx_v7_0_cp_compute_enable(adev, true);
+       struct amdgpu_ring *ring;
 
        /* fix up chicken bits */
        tmp = RREG32(mmCP_CPF_DEBUG);
@@ -2791,7 +2962,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
 
        /* init the queues.  Just two for now. */
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+               ring = &adev->gfx.compute_ring[i];
 
                if (ring->mqd_obj == NULL) {
                        r = amdgpu_bo_create(adev,
@@ -2970,6 +3141,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
                amdgpu_bo_unreserve(ring->mqd_obj);
 
                ring->ready = true;
+       }
+
+       gfx_v7_0_cp_compute_enable(adev, true);
+
+       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+               ring = &adev->gfx.compute_ring[i];
+
                r = amdgpu_ring_test_ring(ring);
                if (r)
                        ring->ready = false;
@@ -4176,6 +4354,41 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               4; /* gfx_v7_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               20 + /* gfx_v7_0_ring_emit_gds_switch */
+               7 + /* gfx_v7_0_ring_emit_hdp_flush */
+               5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+               12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+               7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+               17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+               3; /* gfx_v7_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               4; /* gfx_v7_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               20 + /* gfx_v7_0_ring_emit_gds_switch */
+               7 + /* gfx_v7_0_ring_emit_hdp_flush */
+               5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+               7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+               17 + /* gfx_v7_0_ring_emit_vm_flush */
+               7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
        .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
        .select_se_sh = &gfx_v7_0_select_se_sh,
@@ -4465,24 +4678,21 @@ static int gfx_v7_0_sw_init(void *handle)
        }
 
        /* reserve GDS, GWS and OA resource for gfx */
-       r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
-                       PAGE_SIZE, true,
-                       AMDGPU_GEM_DOMAIN_GDS, 0,
-                       NULL, NULL, &adev->gds.gds_gfx_bo);
+       r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+                                   PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+                                   &adev->gds.gds_gfx_bo, NULL, NULL);
        if (r)
                return r;
 
-       r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
-               PAGE_SIZE, true,
-               AMDGPU_GEM_DOMAIN_GWS, 0,
-               NULL, NULL, &adev->gds.gws_gfx_bo);
+       r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+                                   PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+                                   &adev->gds.gws_gfx_bo, NULL, NULL);
        if (r)
                return r;
 
-       r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
-                       PAGE_SIZE, true,
-                       AMDGPU_GEM_DOMAIN_OA, 0,
-                       NULL, NULL, &adev->gds.oa_gfx_bo);
+       r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+                                   PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+                                   &adev->gds.oa_gfx_bo, NULL, NULL);
        if (r)
                return r;
 
@@ -4498,9 +4708,9 @@ static int gfx_v7_0_sw_fini(void *handle)
        int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
-       amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
-       amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+       amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4931,7 +5141,7 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
-       .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
+       .get_rptr = gfx_v7_0_ring_get_rptr,
        .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
        .parse_cs = NULL,
@@ -4946,10 +5156,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .test_ib = gfx_v7_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
+       .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
+       .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
-       .get_rptr = gfx_v7_0_ring_get_rptr_compute,
+       .get_rptr = gfx_v7_0_ring_get_rptr,
        .get_wptr = gfx_v7_0_ring_get_wptr_compute,
        .set_wptr = gfx_v7_0_ring_set_wptr_compute,
        .parse_cs = NULL,
@@ -4964,6 +5177,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
        .test_ib = gfx_v7_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
+       .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
index bff8668e9e6d466e059d70f719c0a818235040dc..6c6ff57b1c95f2824537933c8c8ecd6525bda3ab 100644 (file)
@@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] =
 
 static const u32 golden_settings_polaris11_a11[] =
 {
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+       mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] =
        mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
        mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
        mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
-       mmSQ_CONFIG, 0x07f80000, 0x07180000,
+       mmSQ_CONFIG, 0x07f80000, 0x01180000,
        mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
@@ -301,8 +302,8 @@ static const u32 polaris11_golden_common_all[] =
 static const u32 golden_settings_polaris10_a11[] =
 {
        mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
-       mmCB_HW_CONTROL_2, 0, 0x0f000000,
+       mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -409,6 +410,7 @@ static const u32 golden_settings_iceland_a11[] =
        mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
        mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
        mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+       mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
        mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
        mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -505,8 +507,10 @@ static const u32 cz_golden_settings_a11[] =
        mmGB_GPU_ID, 0x0000000f, 0x00000000,
        mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
        mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+       mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
        mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
        mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+       mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
        mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
        mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
@@ -699,7 +703,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
                                                 polaris10_golden_common_all,
                                                 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
                WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
-               if (adev->pdev->revision == 0xc7) {
+               if (adev->pdev->revision == 0xc7 &&
+                   ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
+                    (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
+                    (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
                        amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
                        amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
                }
@@ -1229,10 +1236,9 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
        if (adev->gfx.rlc.clear_state_obj) {
                r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
                if (unlikely(r != 0))
-                       dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+                       dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
                amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
                amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
                adev->gfx.rlc.clear_state_obj = NULL;
        }
@@ -1244,7 +1250,6 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
                        dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
                amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
                amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
                adev->gfx.rlc.cp_table_obj = NULL;
        }
@@ -1286,14 +1291,14 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
                                  &adev->gfx.rlc.clear_state_gpu_addr);
                if (r) {
                        amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-                       dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+                       dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
                        gfx_v8_0_rlc_fini(adev);
                        return r;
                }
 
                r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
                if (r) {
-                       dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+                       dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
                        gfx_v8_0_rlc_fini(adev);
                        return r;
                }
@@ -1328,7 +1333,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
                                  &adev->gfx.rlc.cp_table_gpu_addr);
                if (r) {
                        amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-                       dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+                       dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
                        return r;
                }
                r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
@@ -1341,7 +1346,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 
                amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
                amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
        }
 
        return 0;
@@ -1357,7 +1361,6 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
                        dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
                amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
                amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
                adev->gfx.mec.hpd_eop_obj = NULL;
        }
@@ -2078,24 +2081,21 @@ static int gfx_v8_0_sw_init(void *handle)
        }
 
        /* reserve GDS, GWS and OA resource for gfx */
-       r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
-                       PAGE_SIZE, true,
-                       AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
-                       NULL, &adev->gds.gds_gfx_bo);
+       r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+                                   PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+                                   &adev->gds.gds_gfx_bo, NULL, NULL);
        if (r)
                return r;
 
-       r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
-               PAGE_SIZE, true,
-               AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
-               NULL, &adev->gds.gws_gfx_bo);
+       r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+                                   PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+                                   &adev->gds.gws_gfx_bo, NULL, NULL);
        if (r)
                return r;
 
-       r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
-                       PAGE_SIZE, true,
-                       AMDGPU_GEM_DOMAIN_OA, 0, NULL,
-                       NULL, &adev->gds.oa_gfx_bo);
+       r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+                                   PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+                                   &adev->gds.oa_gfx_bo, NULL, NULL);
        if (r)
                return r;
 
@@ -2113,9 +2113,9 @@ static int gfx_v8_0_sw_fini(void *handle)
        int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
-       amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
-       amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+       amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2123,9 +2123,7 @@ static int gfx_v8_0_sw_fini(void *handle)
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
        gfx_v8_0_mec_fini(adev);
-
        gfx_v8_0_rlc_fini(adev);
-
        gfx_v8_0_free_microcode(adev);
 
        return 0;
@@ -3461,19 +3459,16 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
        else
                data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
 
-       if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
-               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+       if (se_num == 0xffffffff)
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
-       } else if (se_num == 0xffffffff) {
-               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
-               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
-       } else if (sh_num == 0xffffffff) {
-               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+       else
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
-       } else {
+
+       if (sh_num == 0xffffffff)
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+       else
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
-               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
-       }
+
        WREG32(mmGRBM_GFX_INDEX, data);
 }
 
@@ -3486,11 +3481,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
 {
        u32 data, mask;
 
-       data = RREG32(mmCC_RB_BACKEND_DISABLE);
-       data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+       data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
+               RREG32(mmGC_USER_RB_BACKEND_DISABLE);
 
-       data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-       data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+       data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
 
        mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
                                       adev->gfx.config.max_sh_per_se);
@@ -3498,13 +3492,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
        return (~data) & mask;
 }
 
+static void
+gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+       switch (adev->asic_type) {
+       case CHIP_FIJI:
+               *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+                         RB_XSEL2(1) | PKR_MAP(2) |
+                         PKR_XSEL(1) | PKR_YSEL(1) |
+                         SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
+               *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+                          SE_PAIR_YSEL(2);
+               break;
+       case CHIP_TONGA:
+       case CHIP_POLARIS10:
+               *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+                         SE_XSEL(1) | SE_YSEL(1);
+               *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
+                          SE_PAIR_YSEL(2);
+               break;
+       case CHIP_TOPAZ:
+       case CHIP_CARRIZO:
+               *rconf |= RB_MAP_PKR0(2);
+               *rconf1 |= 0x0;
+               break;
+       case CHIP_POLARIS11:
+               *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+                         SE_XSEL(1) | SE_YSEL(1);
+               *rconf1 |= 0x0;
+               break;
+       case CHIP_STONEY:
+               *rconf |= 0x0;
+               *rconf1 |= 0x0;
+               break;
+       default:
+               DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+               break;
+       }
+}
+
+static void
+gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+                                       u32 raster_config, u32 raster_config_1,
+                                       unsigned rb_mask, unsigned num_rb)
+{
+       unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+       unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+       unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+       unsigned rb_per_se = num_rb / num_se;
+       unsigned se_mask[4];
+       unsigned se;
+
+       se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+       se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+       se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+       se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+       WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+       WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+       WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+       if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+                            (!se_mask[2] && !se_mask[3]))) {
+               raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+               if (!se_mask[0] && !se_mask[1]) {
+                       raster_config_1 |=
+                               SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+               } else {
+                       raster_config_1 |=
+                               SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+               }
+       }
+
+       for (se = 0; se < num_se; se++) {
+               unsigned raster_config_se = raster_config;
+               unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+               unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+               int idx = (se / 2) * 2;
+
+               if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+                       raster_config_se &= ~SE_MAP_MASK;
+
+                       if (!se_mask[idx]) {
+                               raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+                       } else {
+                               raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+                       }
+               }
+
+               pkr0_mask &= rb_mask;
+               pkr1_mask &= rb_mask;
+               if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+                       raster_config_se &= ~PKR_MAP_MASK;
+
+                       if (!pkr0_mask) {
+                               raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+                       } else {
+                               raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+                       }
+               }
+
+               if (rb_per_se >= 2) {
+                       unsigned rb0_mask = 1 << (se * rb_per_se);
+                       unsigned rb1_mask = rb0_mask << 1;
+
+                       rb0_mask &= rb_mask;
+                       rb1_mask &= rb_mask;
+                       if (!rb0_mask || !rb1_mask) {
+                               raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+                               if (!rb0_mask) {
+                                       raster_config_se |=
+                                               RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+                               } else {
+                                       raster_config_se |=
+                                               RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+                               }
+                       }
+
+                       if (rb_per_se > 2) {
+                               rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+                               rb1_mask = rb0_mask << 1;
+                               rb0_mask &= rb_mask;
+                               rb1_mask &= rb_mask;
+                               if (!rb0_mask || !rb1_mask) {
+                                       raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+                                       if (!rb0_mask) {
+                                               raster_config_se |=
+                                                       RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+                                       } else {
+                                               raster_config_se |=
+                                                       RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+                                       }
+                               }
+                       }
+               }
+
+               /* GRBM_GFX_INDEX has a different offset on VI */
+               gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+               WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+               WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+       }
+
+       /* GRBM_GFX_INDEX has a different offset on VI */
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
 static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
 {
        int i, j;
        u32 data;
+       u32 raster_config = 0, raster_config_1 = 0;
        u32 active_rbs = 0;
        u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
                                        adev->gfx.config.max_sh_per_se;
+       unsigned num_rb_pipes;
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -3516,10 +3660,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
                }
        }
        gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-       mutex_unlock(&adev->grbm_idx_mutex);
 
        adev->gfx.config.backend_enable_mask = active_rbs;
        adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+       num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+                            adev->gfx.config.max_shader_engines, 16);
+
+       gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
+
+       if (!adev->gfx.config.backend_enable_mask ||
+                       adev->gfx.config.num_rbs >= num_rb_pipes) {
+               WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+               WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+       } else {
+               gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+                                                       adev->gfx.config.backend_enable_mask,
+                                                       num_rb_pipes);
+       }
+
+       mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 /**
@@ -3572,16 +3732,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
        u32 tmp;
        int i;
 
-       tmp = RREG32(mmGRBM_CNTL);
-       tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
-       WREG32(mmGRBM_CNTL, tmp);
-
+       WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
        WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
        WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
        WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
 
        gfx_v8_0_tiling_mode_table_init(adev);
-
        gfx_v8_0_setup_rb(adev);
        gfx_v8_0_get_cu_info(adev);
 
@@ -3765,9 +3921,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
                                sizeof(indirect_start_offsets)/sizeof(int));
 
        /* save and restore list */
-       temp = RREG32(mmRLC_SRM_CNTL);
-       temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
-       WREG32(mmRLC_SRM_CNTL, temp);
+       WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
 
        WREG32(mmRLC_SRM_ARAM_ADDR, 0);
        for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
@@ -3804,11 +3958,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
 
 static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
 {
-       uint32_t data;
-
-       data = RREG32(mmRLC_SRM_CNTL);
-       data |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
-       WREG32(mmRLC_SRM_CNTL, data);
+       WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
 }
 
 static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
@@ -3818,75 +3968,34 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
        if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
                              AMD_PG_SUPPORT_GFX_SMG |
                              AMD_PG_SUPPORT_GFX_DMG)) {
-               data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
-               data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
-               data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
-               WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
-
-               data = 0;
-               data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
-               data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
-               data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
-               data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
-               WREG32(mmRLC_PG_DELAY, data);
+               WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
 
-               data = RREG32(mmRLC_PG_DELAY_2);
-               data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
-               data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
-               WREG32(mmRLC_PG_DELAY_2, data);
+               data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+               data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+               data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+               data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+               WREG32(mmRLC_PG_DELAY, data);
 
-               data = RREG32(mmRLC_AUTO_PG_CTRL);
-               data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
-               data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
-               WREG32(mmRLC_AUTO_PG_CTRL, data);
+               WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+               WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
        }
 }
 
 static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
                                                bool enable)
 {
-       u32 data, orig;
-
-       orig = data = RREG32(mmRLC_PG_CNTL);
-
-       if (enable)
-               data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
-       else
-               data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
-
-       if (orig != data)
-               WREG32(mmRLC_PG_CNTL, data);
+       WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
                                                  bool enable)
 {
-       u32 data, orig;
-
-       orig = data = RREG32(mmRLC_PG_CNTL);
-
-       if (enable)
-               data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
-       else
-               data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
-
-       if (orig != data)
-               WREG32(mmRLC_PG_CNTL, data);
+       WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
 {
-       u32 data, orig;
-
-       orig = data = RREG32(mmRLC_PG_CNTL);
-
-       if (enable)
-               data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
-       else
-               data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
-
-       if (orig != data)
-               WREG32(mmRLC_PG_CNTL, data);
+       WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
 }
 
 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
@@ -3923,36 +4032,26 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
        }
 }
 
-void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
 {
-       u32 tmp = RREG32(mmRLC_CNTL);
-
-       tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
-       WREG32(mmRLC_CNTL, tmp);
+       WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
 
        gfx_v8_0_enable_gui_idle_interrupt(adev, false);
-
        gfx_v8_0_wait_for_rlc_serdes(adev);
 }
 
 static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
 {
-       u32 tmp = RREG32(mmGRBM_SOFT_RESET);
-
-       tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
-       WREG32(mmGRBM_SOFT_RESET, tmp);
+       WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
        udelay(50);
-       tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
-       WREG32(mmGRBM_SOFT_RESET, tmp);
+
+       WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
        udelay(50);
 }
 
 static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
 {
-       u32 tmp = RREG32(mmRLC_CNTL);
-
-       tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
-       WREG32(mmRLC_CNTL, tmp);
+       WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
 
        /* carrizo do enable cp interrupt after cp inited */
        if (!(adev->flags & AMD_IS_APU))
@@ -3994,14 +4093,13 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
        /* disable CG */
        WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
        if (adev->asic_type == CHIP_POLARIS11 ||
-               adev->asic_type == CHIP_POLARIS10)
+           adev->asic_type == CHIP_POLARIS10)
                WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
 
        /* disable PG */
        WREG32(mmRLC_PG_CNTL, 0);
 
        gfx_v8_0_rlc_reset(adev);
-
        gfx_v8_0_init_pg(adev);
 
        if (!adev->pp_enabled) {
@@ -4296,12 +4394,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
        gfx_v8_0_cp_gfx_start(adev);
        ring->ready = true;
        r = amdgpu_ring_test_ring(ring);
-       if (r) {
+       if (r)
                ring->ready = false;
-               return r;
-       }
 
-       return 0;
+       return r;
 }
 
 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4976,7 +5072,6 @@ static int gfx_v8_0_hw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        gfx_v8_0_init_golden_registers(adev);
-
        gfx_v8_0_gpu_init(adev);
 
        r = gfx_v8_0_rlc_resume(adev);
@@ -4984,8 +5079,6 @@ static int gfx_v8_0_hw_init(void *handle)
                return r;
 
        r = gfx_v8_0_cp_resume(adev);
-       if (r)
-               return r;
 
        return r;
 }
@@ -5033,25 +5126,22 @@ static bool gfx_v8_0_is_idle(void *handle)
 static int gfx_v8_0_wait_for_idle(void *handle)
 {
        unsigned i;
-       u32 tmp;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               /* read MC_STATUS */
-               tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
-
-               if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+               if (gfx_v8_0_is_idle(handle))
                        return 0;
+
                udelay(1);
        }
        return -ETIMEDOUT;
 }
 
-static int gfx_v8_0_soft_reset(void *handle)
+static int gfx_v8_0_check_soft_reset(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
        u32 tmp;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* GRBM_STATUS */
        tmp = RREG32(mmGRBM_STATUS);
@@ -5060,16 +5150,12 @@ static int gfx_v8_0_soft_reset(void *handle)
                   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
                   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
                   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
-                  GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+                  GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
+                  GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
                grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
                                                GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
                grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
                                                GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
-       }
-
-       if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
-               grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
-                                               GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
                                                SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
        }
@@ -5080,73 +5166,199 @@ static int gfx_v8_0_soft_reset(void *handle)
                grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
                                                GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
 
+       if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
+           REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
+           REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
+               grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+                                               SOFT_RESET_CPF, 1);
+               grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+                                               SOFT_RESET_CPC, 1);
+               grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+                                               SOFT_RESET_CPG, 1);
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+                                               SOFT_RESET_GRBM, 1);
+       }
+
        /* SRBM_STATUS */
        tmp = RREG32(mmSRBM_STATUS);
        if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
                                                SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+       if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                               SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
 
        if (grbm_soft_reset || srbm_soft_reset) {
-               /* stop the rlc */
-               gfx_v8_0_rlc_stop(adev);
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
+               adev->gfx.grbm_soft_reset = grbm_soft_reset;
+               adev->gfx.srbm_soft_reset = srbm_soft_reset;
+       } else {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
+               adev->gfx.grbm_soft_reset = 0;
+               adev->gfx.srbm_soft_reset = 0;
+       }
+
+       return 0;
+}
+
+static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
+                                 struct amdgpu_ring *ring)
+{
+       int i;
+
+       vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+       if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
+               u32 tmp;
+               tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+               tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
+                                   DEQUEUE_REQ, 2);
+               WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
+                               break;
+                       udelay(1);
+               }
+       }
+}
 
+static int gfx_v8_0_pre_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+               return 0;
+
+       grbm_soft_reset = adev->gfx.grbm_soft_reset;
+       srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+       /* stop the rlc */
+       gfx_v8_0_rlc_stop(adev);
+
+       if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+           REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
                /* Disable GFX parsing/prefetching */
                gfx_v8_0_cp_gfx_enable(adev, false);
 
+       if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+           REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+           REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+           REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+               int i;
+
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+                       gfx_v8_0_inactive_hqd(adev, ring);
+               }
                /* Disable MEC parsing/prefetching */
                gfx_v8_0_cp_compute_enable(adev, false);
+       }
 
-               if (grbm_soft_reset || srbm_soft_reset) {
-                       tmp = RREG32(mmGMCON_DEBUG);
-                       tmp = REG_SET_FIELD(tmp,
-                                           GMCON_DEBUG, GFX_STALL, 1);
-                       tmp = REG_SET_FIELD(tmp,
-                                           GMCON_DEBUG, GFX_CLEAR, 1);
-                       WREG32(mmGMCON_DEBUG, tmp);
+       return 0;
+}
 
-                       udelay(50);
-               }
+static int gfx_v8_0_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+       u32 tmp;
 
-               if (grbm_soft_reset) {
-                       tmp = RREG32(mmGRBM_SOFT_RESET);
-                       tmp |= grbm_soft_reset;
-                       dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
-                       WREG32(mmGRBM_SOFT_RESET, tmp);
-                       tmp = RREG32(mmGRBM_SOFT_RESET);
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+               return 0;
 
-                       udelay(50);
+       grbm_soft_reset = adev->gfx.grbm_soft_reset;
+       srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
-                       tmp &= ~grbm_soft_reset;
-                       WREG32(mmGRBM_SOFT_RESET, tmp);
-                       tmp = RREG32(mmGRBM_SOFT_RESET);
-               }
+       if (grbm_soft_reset || srbm_soft_reset) {
+               tmp = RREG32(mmGMCON_DEBUG);
+               tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
+               tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
+               WREG32(mmGMCON_DEBUG, tmp);
+               udelay(50);
+       }
 
-               if (srbm_soft_reset) {
-                       tmp = RREG32(mmSRBM_SOFT_RESET);
-                       tmp |= srbm_soft_reset;
-                       dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
-                       WREG32(mmSRBM_SOFT_RESET, tmp);
-                       tmp = RREG32(mmSRBM_SOFT_RESET);
+       if (grbm_soft_reset) {
+               tmp = RREG32(mmGRBM_SOFT_RESET);
+               tmp |= grbm_soft_reset;
+               dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmGRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmGRBM_SOFT_RESET);
 
-                       udelay(50);
+               udelay(50);
 
-                       tmp &= ~srbm_soft_reset;
-                       WREG32(mmSRBM_SOFT_RESET, tmp);
-                       tmp = RREG32(mmSRBM_SOFT_RESET);
-               }
+               tmp &= ~grbm_soft_reset;
+               WREG32(mmGRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmGRBM_SOFT_RESET);
+       }
 
-               if (grbm_soft_reset || srbm_soft_reset) {
-                       tmp = RREG32(mmGMCON_DEBUG);
-                       tmp = REG_SET_FIELD(tmp,
-                                           GMCON_DEBUG, GFX_STALL, 0);
-                       tmp = REG_SET_FIELD(tmp,
-                                           GMCON_DEBUG, GFX_CLEAR, 0);
-                       WREG32(mmGMCON_DEBUG, tmp);
-               }
+       if (srbm_soft_reset) {
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
 
-               /* Wait a little for things to settle down */
                udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
        }
+
+       if (grbm_soft_reset || srbm_soft_reset) {
+               tmp = RREG32(mmGMCON_DEBUG);
+               tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
+               tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
+               WREG32(mmGMCON_DEBUG, tmp);
+       }
+
+       /* Wait a little for things to settle down */
+       udelay(50);
+
+       return 0;
+}
+
+static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
+                             struct amdgpu_ring *ring)
+{
+       vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+       WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
+       WREG32(mmCP_HQD_PQ_RPTR, 0);
+       WREG32(mmCP_HQD_PQ_WPTR, 0);
+       vi_srbm_select(adev, 0, 0, 0, 0);
+}
+
+static int gfx_v8_0_post_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+               return 0;
+
+       grbm_soft_reset = adev->gfx.grbm_soft_reset;
+       srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+       if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+           REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
+               gfx_v8_0_cp_gfx_resume(adev);
+
+       if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+           REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+           REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+           REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+               int i;
+
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+                       gfx_v8_0_init_hqd(adev, ring);
+               }
+               gfx_v8_0_cp_compute_resume(adev);
+       }
+       gfx_v8_0_rlc_start(adev);
+
        return 0;
 }
 
@@ -5265,8 +5477,6 @@ static int gfx_v8_0_late_init(void *handle)
 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
                                                       bool enable)
 {
-       uint32_t data, temp;
-
        if (adev->asic_type == CHIP_POLARIS11)
                /* Send msg to SMU via Powerplay */
                amdgpu_set_powergating_state(adev,
@@ -5274,83 +5484,35 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
                                             enable ?
                                             AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
 
-       temp = data = RREG32(mmRLC_PG_CNTL);
-       /* Enable static MGPG */
-       if (enable)
-               data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-       else
-               data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
-       if (temp != data)
-               WREG32(mmRLC_PG_CNTL, data);
+       WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
                                                        bool enable)
 {
-       uint32_t data, temp;
-
-       temp = data = RREG32(mmRLC_PG_CNTL);
-       /* Enable dynamic MGPG */
-       if (enable)
-               data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-       else
-               data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
-       if (temp != data)
-               WREG32(mmRLC_PG_CNTL, data);
+       WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
                bool enable)
 {
-       uint32_t data, temp;
-
-       temp = data = RREG32(mmRLC_PG_CNTL);
-       /* Enable quick PG */
-       if (enable)
-               data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
-       else
-               data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
-
-       if (temp != data)
-               WREG32(mmRLC_PG_CNTL, data);
+       WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
                                          bool enable)
 {
-       u32 data, orig;
-
-       orig = data = RREG32(mmRLC_PG_CNTL);
-
-       if (enable)
-               data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
-       else
-               data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
-
-       if (orig != data)
-               WREG32(mmRLC_PG_CNTL, data);
+       WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
                                                bool enable)
 {
-       u32 data, orig;
-
-       orig = data = RREG32(mmRLC_PG_CNTL);
-
-       if (enable)
-               data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
-       else
-               data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
-
-       if (orig != data)
-               WREG32(mmRLC_PG_CNTL, data);
+       WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
 
        /* Read any GFX register to wake up GFX. */
        if (!enable)
-               data = RREG32(mmDB_RENDER_CONTROL);
+               RREG32(mmDB_RENDER_CONTROL);
 }
 
 static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
@@ -5426,15 +5588,15 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
 
        data = RREG32(mmRLC_SERDES_WR_CTRL);
        if (adev->asic_type == CHIP_STONEY)
-                       data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
-                       RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
-                       RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
-                       RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
-                       RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
-                       RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
-                       RLC_SERDES_WR_CTRL__POWER_UP_MASK |
-                       RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
-                       RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
+               data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+                         RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
+                         RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
+                         RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
+                         RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
+                         RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
+                         RLC_SERDES_WR_CTRL__POWER_UP_MASK |
+                         RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
+                         RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
        else
                data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
                          RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
@@ -5457,10 +5619,10 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
 
 #define MSG_ENTER_RLC_SAFE_MODE     1
 #define MSG_EXIT_RLC_SAFE_MODE      0
-
-#define RLC_GPR_REG2__REQ_MASK           0x00000001
-#define RLC_GPR_REG2__MESSAGE__SHIFT     0x00000001
-#define RLC_GPR_REG2__MESSAGE_MASK       0x0000001e
+#define RLC_GPR_REG2__REQ_MASK 0x00000001
+#define RLC_GPR_REG2__REQ__SHIFT 0
+#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
 
 static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
 {
@@ -5490,7 +5652,7 @@ static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
                }
 
                for (i = 0; i < adev->usec_timeout; i++) {
-                       if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+                       if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
                                break;
                        udelay(1);
                }
@@ -5518,7 +5680,7 @@ static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev)
        }
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+               if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
                        break;
                udelay(1);
        }
@@ -5550,7 +5712,7 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
                }
 
                for (i = 0; i < adev->usec_timeout; i++) {
-                       if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+                       if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
                                break;
                        udelay(1);
                }
@@ -5577,7 +5739,7 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
        }
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+               if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
                        break;
                udelay(1);
        }
@@ -5618,21 +5780,12 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
        /* It is disabled by HW by default */
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
-                       if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+                       if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
                                /* 1 - RLC memory Light sleep */
-                               temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
-                               data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
-                               if (temp != data)
-                                       WREG32(mmRLC_MEM_SLP_CNTL, data);
-                       }
+                               WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
 
-                       if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
-                               /* 2 - CP memory Light sleep */
-                               temp = data = RREG32(mmCP_MEM_SLP_CNTL);
-                               data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
-                               if (temp != data)
-                                       WREG32(mmCP_MEM_SLP_CNTL, data);
-                       }
+                       if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
+                               WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
                }
 
                /* 3 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5830,6 +5983,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
        return 0;
 }
 
+static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
+                                         enum amd_clockgating_state state)
+{
+       uint32_t msg_id, pp_state;
+       void *pp_handle = adev->powerplay.pp_handle;
+
+       if (state == AMD_CG_STATE_UNGATE)
+               pp_state = 0;
+       else
+               pp_state = PP_STATE_CG | PP_STATE_LS;
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                       PP_BLOCK_GFX_CG,
+                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                       PP_BLOCK_GFX_MG,
+                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       return 0;
+}
+
+static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
+                                         enum amd_clockgating_state state)
+{
+       uint32_t msg_id, pp_state;
+       void *pp_handle = adev->powerplay.pp_handle;
+
+       if (state == AMD_CG_STATE_UNGATE)
+               pp_state = 0;
+       else
+               pp_state = PP_STATE_CG | PP_STATE_LS;
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                       PP_BLOCK_GFX_CG,
+                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                       PP_BLOCK_GFX_3D,
+                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                       PP_BLOCK_GFX_MG,
+                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                       PP_BLOCK_GFX_RLC,
+                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                       PP_BLOCK_GFX_CP,
+                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       return 0;
+}
+
 static int gfx_v8_0_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
@@ -5842,33 +6065,33 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
                gfx_v8_0_update_gfx_clock_gating(adev,
                                                 state == AMD_CG_STATE_GATE ? true : false);
                break;
+       case CHIP_TONGA:
+               gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
+               break;
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+               gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
+               break;
        default:
                break;
        }
        return 0;
 }
 
-static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
-       u32 rptr;
-
-       rptr = ring->adev->wb.wb[ring->rptr_offs];
-
-       return rptr;
+       return ring->adev->wb.wb[ring->rptr_offs];
 }
 
 static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u32 wptr;
 
        if (ring->use_doorbell)
                /* XXX check if swapping is necessary on BE */
-               wptr = ring->adev->wb.wb[ring->wptr_offs];
+               return ring->adev->wb.wb[ring->wptr_offs];
        else
-               wptr = RREG32(mmCP_RB0_WPTR);
-
-       return wptr;
+               return RREG32(mmCP_RB0_WPTR);
 }
 
 static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
@@ -5935,12 +6158,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 {
        u32 header, control = 0;
 
-       /* insert SWITCH_BUFFER packet before first IB in the ring frame */
-       if (ctx_switch) {
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
-       }
-
        if (ib->flags & AMDGPU_IB_FLAG_CE)
                header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
        else
@@ -5967,9 +6184,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
-                                         (2 << 0) |
+                               (2 << 0) |
 #endif
-                                         (ib->gpu_addr & 0xFFFFFFFC));
+                               (ib->gpu_addr & 0xFFFFFFFC));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
        amdgpu_ring_write(ring, control);
 }
@@ -6010,14 +6227,6 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, seq);
        amdgpu_ring_write(ring, 0xffffffff);
        amdgpu_ring_write(ring, 4); /* poll interval */
-
-       if (usepfp) {
-               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
-       }
 }
 
 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -6025,6 +6234,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 {
        int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
 
+       /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
+       if (usepfp)
+               amdgpu_ring_insert_nop(ring, 128);
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)) |
@@ -6064,18 +6277,11 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                /* sync PFP to ME, otherwise we might get invalid PFP reads */
                amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
                amdgpu_ring_write(ring, 0x0);
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
+               /* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
+               amdgpu_ring_insert_nop(ring, 128);
        }
 }
 
-static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
-       return ring->adev->wb.wb[ring->rptr_offs];
-}
-
 static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
        return ring->adev->wb.wb[ring->wptr_offs];
@@ -6111,36 +6317,88 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, upper_32_bits(seq));
 }
 
-static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
-                                                enum amdgpu_interrupt_state state)
+static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
 {
-       u32 cp_int_cntl;
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+       amdgpu_ring_write(ring, 0);
+}
 
-       switch (state) {
-       case AMDGPU_IRQ_STATE_DISABLE:
-               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-                                           TIME_STAMP_INT_ENABLE, 0);
-               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-               break;
-       case AMDGPU_IRQ_STATE_ENABLE:
-               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-               cp_int_cntl =
-                       REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-                                     TIME_STAMP_INT_ENABLE, 1);
-               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-               break;
-       default:
-               break;
+static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+       uint32_t dw2 = 0;
+
+       dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+       if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+               /* set load_global_config & load_global_uconfig */
+               dw2 |= 0x8001;
+               /* set load_cs_sh_regs */
+               dw2 |= 0x01000000;
+               /* set load_per_context_state & load_gfx_sh_regs for GFX */
+               dw2 |= 0x10002;
+
+               /* set load_ce_ram if preamble presented */
+               if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
+                       dw2 |= 0x10000000;
+       } else {
+               /* still load_ce_ram if this is the first time preamble presented
+                * although there is no context switch happens.
+                */
+               if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
+                       dw2 |= 0x10000000;
        }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       amdgpu_ring_write(ring, dw2);
+       amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               4; /* gfx_v8_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               20 + /* gfx_v8_0_ring_emit_gds_switch */
+               7 + /* gfx_v8_0_ring_emit_hdp_flush */
+               5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+               6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+               7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+               256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+               2 + /* gfx_v8_ring_emit_sb */
+               3; /* gfx_v8_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               4; /* gfx_v8_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               20 + /* gfx_v8_0_ring_emit_gds_switch */
+               7 + /* gfx_v8_0_ring_emit_hdp_flush */
+               5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+               7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+               17 + /* gfx_v8_0_ring_emit_vm_flush */
+               7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
+static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+                                                enum amdgpu_interrupt_state state)
+{
+       WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
+                    state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 }
 
 static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
                                                     int me, int pipe,
                                                     enum amdgpu_interrupt_state state)
 {
-       u32 mec_int_cntl, mec_int_cntl_reg;
-
        /*
         * amdgpu controls only pipe 0 of MEC1. That's why this function only
         * handles the setting of interrupts for this specific pipe. All other
@@ -6150,7 +6408,6 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
        if (me == 1) {
                switch (pipe) {
                case 0:
-                       mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
                        break;
                default:
                        DRM_DEBUG("invalid pipe %d\n", pipe);
@@ -6161,22 +6418,8 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
                return;
        }
 
-       switch (state) {
-       case AMDGPU_IRQ_STATE_DISABLE:
-               mec_int_cntl = RREG32(mec_int_cntl_reg);
-               mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
-                                            TIME_STAMP_INT_ENABLE, 0);
-               WREG32(mec_int_cntl_reg, mec_int_cntl);
-               break;
-       case AMDGPU_IRQ_STATE_ENABLE:
-               mec_int_cntl = RREG32(mec_int_cntl_reg);
-               mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
-                                            TIME_STAMP_INT_ENABLE, 1);
-               WREG32(mec_int_cntl_reg, mec_int_cntl);
-               break;
-       default:
-               break;
-       }
+       WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE,
+                    state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 }
 
 static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
@@ -6184,24 +6427,8 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
                                             unsigned type,
                                             enum amdgpu_interrupt_state state)
 {
-       u32 cp_int_cntl;
-
-       switch (state) {
-       case AMDGPU_IRQ_STATE_DISABLE:
-               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-                                           PRIV_REG_INT_ENABLE, 0);
-               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-               break;
-       case AMDGPU_IRQ_STATE_ENABLE:
-               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-                                           PRIV_REG_INT_ENABLE, 1);
-               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-               break;
-       default:
-               break;
-       }
+       WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
+                    state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
        return 0;
 }
@@ -6211,24 +6438,8 @@ static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
                                              unsigned type,
                                              enum amdgpu_interrupt_state state)
 {
-       u32 cp_int_cntl;
-
-       switch (state) {
-       case AMDGPU_IRQ_STATE_DISABLE:
-               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-                                           PRIV_INSTR_INT_ENABLE, 0);
-               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-               break;
-       case AMDGPU_IRQ_STATE_ENABLE:
-               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-                                           PRIV_INSTR_INT_ENABLE, 1);
-               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-               break;
-       default:
-               break;
-       }
+       WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
+                    state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
        return 0;
 }
@@ -6334,13 +6545,16 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
        .resume = gfx_v8_0_resume,
        .is_idle = gfx_v8_0_is_idle,
        .wait_for_idle = gfx_v8_0_wait_for_idle,
+       .check_soft_reset = gfx_v8_0_check_soft_reset,
+       .pre_soft_reset = gfx_v8_0_pre_soft_reset,
        .soft_reset = gfx_v8_0_soft_reset,
+       .post_soft_reset = gfx_v8_0_post_soft_reset,
        .set_clockgating_state = gfx_v8_0_set_clockgating_state,
        .set_powergating_state = gfx_v8_0_set_powergating_state,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
-       .get_rptr = gfx_v8_0_ring_get_rptr_gfx,
+       .get_rptr = gfx_v8_0_ring_get_rptr,
        .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
        .parse_cs = NULL,
@@ -6355,10 +6569,14 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .test_ib = gfx_v8_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .emit_switch_buffer = gfx_v8_ring_emit_sb,
+       .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
+       .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
+       .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
-       .get_rptr = gfx_v8_0_ring_get_rptr_compute,
+       .get_rptr = gfx_v8_0_ring_get_rptr,
        .get_wptr = gfx_v8_0_ring_get_wptr_compute,
        .set_wptr = gfx_v8_0_ring_set_wptr_compute,
        .parse_cs = NULL,
@@ -6373,6 +6591,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .test_ib = gfx_v8_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
+       .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6475,15 +6695,12 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
 {
        u32 data, mask;
 
-       data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-       data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-
-       data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
-       data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+       data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+               RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
 
        mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
 
-       return (~data) & mask;
+       return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
 }
 
 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
index bc82c794312ca88c89bdd89cfe665c85aec74214..ebed1f8292979de0055d4c0d943375fdeda77e9d 100644 (file)
@@ -26,6 +26,4 @@
 
 extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
 
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
new file mode 100644 (file)
index 0000000..b13c8aa
--- /dev/null
@@ -0,0 +1,1071 @@
+
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "gmc_v6_0.h"
+#include "amdgpu_ucode.h"
+#include "si/sid.h"
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v6_0_wait_for_idle(void *handle);
+
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+
+static const u32 crtc_offsets[6] =
+{
+       SI_CRTC0_REGISTER_OFFSET,
+       SI_CRTC1_REGISTER_OFFSET,
+       SI_CRTC2_REGISTER_OFFSET,
+       SI_CRTC3_REGISTER_OFFSET,
+       SI_CRTC4_REGISTER_OFFSET,
+       SI_CRTC5_REGISTER_OFFSET
+};
+
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
+                            struct amdgpu_mode_mc_save *save)
+{
+       u32 blackout;
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_stop_mc_access(adev, save);
+
+       gmc_v6_0_wait_for_idle((void *)adev);
+
+       blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+       if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
+               /* Block CPU access */
+               WREG32(BIF_FB_EN, 0);
+               /* blackout the MC */
+               blackout = REG_SET_FIELD(blackout,
+                                        mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+               WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+       }
+       /* wait for the MC to settle */
+       udelay(100);
+
+}
+
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
+                              struct amdgpu_mode_mc_save *save)
+{
+       u32 tmp;
+
+       /* unblackout the MC */
+       tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+       tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+       WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+       /* allow CPU access */
+       tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
+       tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
+       WREG32(BIF_FB_EN, tmp);
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_resume_mc_access(adev, save);
+
+}
+
+static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               chip_name = "tahiti";
+               break;
+       case CHIP_PITCAIRN:
+               chip_name = "pitcairn";
+               break;
+       case CHIP_VERDE:
+               chip_name = "verde";
+               break;
+       case CHIP_OLAND:
+               chip_name = "oland";
+               break;
+       case CHIP_HAINAN:
+               chip_name = "hainan";
+               break;
+       default: BUG();
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+
+       err = amdgpu_ucode_validate(adev->mc.fw);
+
+out:
+       if (err) {
+               dev_err(adev->dev,
+                      "si_mc: Failed to load firmware \"%s\"\n",
+                      fw_name);
+               release_firmware(adev->mc.fw);
+               adev->mc.fw = NULL;
+       }
+       return err;
+}
+
+static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
+{
+       const __le32 *new_fw_data = NULL;
+       u32 running;
+       const __le32 *new_io_mc_regs = NULL;
+       int i, regs_size, ucode_size;
+       const struct mc_firmware_header_v1_0 *hdr;
+
+       if (!adev->mc.fw)
+               return -EINVAL;
+
+       hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+
+       amdgpu_ucode_print_mc_hdr(&hdr->header);
+
+       adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+       regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+       new_io_mc_regs = (const __le32 *)
+               (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+       new_fw_data = (const __le32 *)
+               (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+       running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+       if (running == 0) {
+
+               /* reset the engine and set to writable */
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+               /* load mc io regs */
+               for (i = 0; i < regs_size; i++) {
+                       WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+                       WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+               }
+               /* load the MC ucode */
+               for (i = 0; i < ucode_size; i++) {
+                       WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+               }
+
+               /* put the engine back into the active state */
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+               /* wait for training to complete */
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+                               break;
+                       udelay(1);
+               }
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+                               break;
+                       udelay(1);
+               }
+
+       }
+
+       return 0;
+}
+
+static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+                                      struct amdgpu_mc *mc)
+{
+       if (mc->mc_vram_size > 0xFFC0000000ULL) {
+               dev_warn(adev->dev, "limiting VRAM\n");
+               mc->real_vram_size = 0xFFC0000000ULL;
+               mc->mc_vram_size = 0xFFC0000000ULL;
+       }
+       amdgpu_vram_location(adev, &adev->mc, 0);
+       adev->mc.gtt_base_align = 0;
+       amdgpu_gtt_location(adev, mc);
+}
+
+static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_mc_save save;
+       u32 tmp;
+       int i, j;
+
+       /* Initialize HDP */
+       for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+               WREG32((0xb05 + j), 0x00000000);
+               WREG32((0xb06 + j), 0x00000000);
+               WREG32((0xb07 + j), 0x00000000);
+               WREG32((0xb08 + j), 0x00000000);
+               WREG32((0xb09 + j), 0x00000000);
+       }
+       WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+       gmc_v6_0_mc_stop(adev, &save);
+
+       if (gmc_v6_0_wait_for_idle((void *)adev)) {
+               dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+       }
+
+       WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+       /* Update configuration */
+       WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+              adev->mc.vram_start >> 12);
+       WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+              adev->mc.vram_end >> 12);
+       WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+              adev->vram_scratch.gpu_addr >> 12);
+       tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+       tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+       WREG32(MC_VM_FB_LOCATION, tmp);
+       /* XXX double check these! */
+       WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+       WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+       WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+       WREG32(MC_VM_AGP_BASE, 0);
+       WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+       WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+
+       if (gmc_v6_0_wait_for_idle((void *)adev)) {
+               dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+       }
+       gmc_v6_0_mc_resume(adev, &save);
+       amdgpu_display_set_vga_render_state(adev, false);
+}
+
+static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
+{
+
+       u32 tmp;
+       int chansize, numchan;
+
+       tmp = RREG32(MC_ARB_RAMCFG);
+       if (tmp & CHANSIZE_OVERRIDE) {
+               chansize = 16;
+       } else if (tmp & CHANSIZE_MASK) {
+               chansize = 64;
+       } else {
+               chansize = 32;
+       }
+       tmp = RREG32(MC_SHARED_CHMAP);
+       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+       case 0:
+       default:
+               numchan = 1;
+               break;
+       case 1:
+               numchan = 2;
+               break;
+       case 2:
+               numchan = 4;
+               break;
+       case 3:
+               numchan = 8;
+               break;
+       case 4:
+               numchan = 3;
+               break;
+       case 5:
+               numchan = 6;
+               break;
+       case 6:
+               numchan = 10;
+               break;
+       case 7:
+               numchan = 12;
+               break;
+       case 8:
+               numchan = 16;
+               break;
+       }
+       adev->mc.vram_width = numchan * chansize;
+       /* Could aper size report 0 ? */
+       adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
+       adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+       /* size in MB on si */
+       adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       adev->mc.visible_vram_size = adev->mc.aper_size;
+
+       /* unless the user had overridden it, set the gart
+        * size equal to the 1024 or vram, whichever is larger.
+        */
+       if (amdgpu_gart_size == -1)
+               adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+       else
+               adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+
+       gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+
+       return 0;
+}
+
+static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+                                       uint32_t vmid)
+{
+       WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+       WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
+                                    void *cpu_pt_addr,
+                                    uint32_t gpu_page_idx,
+                                    uint64_t addr,
+                                    uint32_t flags)
+{
+       void __iomem *ptr = (void *)cpu_pt_addr;
+       uint64_t value;
+
+       value = addr & 0xFFFFFFFFFFFFF000ULL;
+       value |= flags;
+       writeq(value, ptr + (gpu_page_idx * 8));
+
+       return 0;
+}
+
+static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
+                                             bool value)
+{
+       u32 tmp;
+
+       tmp = RREG32(VM_CONTEXT1_CNTL);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       WREG32(VM_CONTEXT1_CNTL, tmp);
+}
+
+static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+{
+       int r, i;
+
+       if (adev->gart.robj == NULL) {
+               dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+               return -EINVAL;
+       }
+       r = amdgpu_gart_table_vram_pin(adev);
+       if (r)
+               return r;
+       /* Setup TLB control */
+       WREG32(MC_VM_MX_L1_TLB_CNTL,
+              (0xA << 7) |
+              ENABLE_L1_TLB |
+              ENABLE_L1_FRAGMENT_PROCESSING |
+              SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+              ENABLE_ADVANCED_DRIVER_MODEL |
+              SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+       /* Setup L2 cache */
+       WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+              ENABLE_L2_FRAGMENT_PROCESSING |
+              ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+              ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+              EFFECTIVE_L2_QUEUE_SIZE(7) |
+              CONTEXT1_IDENTITY_ACCESS_MODE(1));
+       WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+       WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+              BANK_SELECT(4) |
+              L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+       /* setup context0 */
+       WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+       WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+                       (u32)(adev->dummy_page.addr >> 12));
+       WREG32(VM_CONTEXT0_CNTL2, 0);
+       WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+       WREG32(0x575, 0);
+       WREG32(0x576, 0);
+       WREG32(0x577, 0);
+
+       /* empty context1-15 */
+       /* set vm size, must be a multiple of 4 */
+       WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
+       /* Assign the pt base to something valid for now; the pts used for
+        * the VMs are determined by the application and setup and assigned
+        * on the fly in the vm part of radeon_gart.c
+        */
+       for (i = 1; i < 16; i++) {
+               if (i < 8)
+                       WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+                              adev->gart.table_addr >> 12);
+               else
+                       WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+                              adev->gart.table_addr >> 12);
+       }
+
+       /* enable context1-15 */
+       WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+              (u32)(adev->dummy_page.addr >> 12));
+       WREG32(VM_CONTEXT1_CNTL2, 4);
+       WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+                               PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
+                               RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+       gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
+       dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+                (unsigned)(adev->mc.gtt_size >> 20),
+                (unsigned long long)adev->gart.table_addr);
+       adev->gart.ready = true;
+       return 0;
+}
+
+static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->gart.robj) {
+               dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
+               return 0;
+       }
+       r = amdgpu_gart_init(adev);
+       if (r)
+               return r;
+       adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+       return amdgpu_gart_table_vram_alloc(adev);
+}
+
+static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+{
+       /*unsigned i;
+
+       for (i = 1; i < 16; ++i) {
+               uint32_t reg;
+               if (i < 8)
+                       reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
+               else
+                       reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
+               adev->vm_manager.saved_table_addr[i] = RREG32(reg);
+       }*/
+
+       /* Disable all tables */
+       WREG32(VM_CONTEXT0_CNTL, 0);
+       WREG32(VM_CONTEXT1_CNTL, 0);
+       /* Setup TLB control */
+       WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+              SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+       /* Setup L2 cache */
+       WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+              ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+              EFFECTIVE_L2_QUEUE_SIZE(7) |
+              CONTEXT1_IDENTITY_ACCESS_MODE(1));
+       WREG32(VM_L2_CNTL2, 0);
+       WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+              L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+       amdgpu_gart_table_vram_unpin(adev);
+}
+
+static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
+{
+       amdgpu_gart_table_vram_free(adev);
+       amdgpu_gart_fini(adev);
+}
+
+static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
+{
+       /*
+        * number of VMs
+        * VMID 0 is reserved for System
+        * amdgpu graphics/compute will use VMIDs 1-7
+        * amdkfd will use VMIDs 8-15
+        */
+       adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+       amdgpu_vm_manager_init(adev);
+
+       /* base offset of vram pages */
+       if (adev->flags & AMD_IS_APU) {
+               u64 tmp = RREG32(MC_VM_FB_OFFSET);
+               tmp <<= 22;
+               adev->vm_manager.vram_base_offset = tmp;
+       } else
+               adev->vm_manager.vram_base_offset = 0;
+
+       return 0;
+}
+
+static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
+{
+}
+
+static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
+                                    u32 status, u32 addr, u32 mc_client)
+{
+       u32 mc_id;
+       u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
+       u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                                       xxPROTECTIONS);
+       char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+               (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+       mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                             xxMEMORY_CLIENT_ID);
+
+       dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+              protections, vmid, addr,
+              REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                            xxMEMORY_CLIENT_RW) ?
+              "write" : "read", block, mc_client, mc_id);
+}
+
+/*
+static const u32 mc_cg_registers[] = {
+       MC_HUB_MISC_HUB_CG,
+       MC_HUB_MISC_SIP_CG,
+       MC_HUB_MISC_VM_CG,
+       MC_XPB_CLK_GAT,
+       ATC_MISC_CG,
+       MC_CITF_MISC_WR_CG,
+       MC_CITF_MISC_RD_CG,
+       MC_CITF_MISC_VM_CG,
+       VM_L2_CG,
+};
+
+static const u32 mc_cg_ls_en[] = {
+       MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
+       MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
+       MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+       MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
+       ATC_MISC_CG__MEM_LS_ENABLE_MASK,
+       MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
+       MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
+       MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+       VM_L2_CG__MEM_LS_ENABLE_MASK,
+};
+
+static const u32 mc_cg_en[] = {
+       MC_HUB_MISC_HUB_CG__ENABLE_MASK,
+       MC_HUB_MISC_SIP_CG__ENABLE_MASK,
+       MC_HUB_MISC_VM_CG__ENABLE_MASK,
+       MC_XPB_CLK_GAT__ENABLE_MASK,
+       ATC_MISC_CG__ENABLE_MASK,
+       MC_CITF_MISC_WR_CG__ENABLE_MASK,
+       MC_CITF_MISC_RD_CG__ENABLE_MASK,
+       MC_CITF_MISC_VM_CG__ENABLE_MASK,
+       VM_L2_CG__ENABLE_MASK,
+};
+
+static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
+                                 bool enable)
+{
+       int i;
+       u32 orig, data;
+
+       for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+               orig = data = RREG32(mc_cg_registers[i]);
+               if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+                       data |= mc_cg_ls_en[i];
+               else
+                       data &= ~mc_cg_ls_en[i];
+               if (data != orig)
+                       WREG32(mc_cg_registers[i], data);
+       }
+}
+
+static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
+                                   bool enable)
+{
+       int i;
+       u32 orig, data;
+
+       for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+               orig = data = RREG32(mc_cg_registers[i]);
+               if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+                       data |= mc_cg_en[i];
+               else
+                       data &= ~mc_cg_en[i];
+               if (data != orig)
+                       WREG32(mc_cg_registers[i], data);
+       }
+}
+
+static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
+                                    bool enable)
+{
+       u32 orig, data;
+
+       orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+
+       if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+               data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
+       } else {
+               data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
+       }
+
+       if (orig != data)
+               WREG32_PCIE(ixPCIE_CNTL2, data);
+}
+
+static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
+                                    bool enable)
+{
+       u32 orig, data;
+
+       orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+       if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+               data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
+       else
+               data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
+
+       if (orig != data)
+               WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
+                                  bool enable)
+{
+       u32 orig, data;
+
+       orig = data = RREG32(HDP_MEM_POWER_LS);
+
+       if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+               data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
+       else
+               data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
+
+       if (orig != data)
+               WREG32(HDP_MEM_POWER_LS, data);
+}
+*/
+
+static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
+{
+       switch (mc_seq_vram_type) {
+       case MC_SEQ_MISC0__MT__GDDR1:
+               return AMDGPU_VRAM_TYPE_GDDR1;
+       case MC_SEQ_MISC0__MT__DDR2:
+               return AMDGPU_VRAM_TYPE_DDR2;
+       case MC_SEQ_MISC0__MT__GDDR3:
+               return AMDGPU_VRAM_TYPE_GDDR3;
+       case MC_SEQ_MISC0__MT__GDDR4:
+               return AMDGPU_VRAM_TYPE_GDDR4;
+       case MC_SEQ_MISC0__MT__GDDR5:
+               return AMDGPU_VRAM_TYPE_GDDR5;
+       case MC_SEQ_MISC0__MT__DDR3:
+               return AMDGPU_VRAM_TYPE_DDR3;
+       default:
+               return AMDGPU_VRAM_TYPE_UNKNOWN;
+       }
+}
+
+static int gmc_v6_0_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       gmc_v6_0_set_gart_funcs(adev);
+       gmc_v6_0_set_irq_funcs(adev);
+
+       if (adev->flags & AMD_IS_APU) {
+               adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+       } else {
+               u32 tmp = RREG32(MC_SEQ_MISC0);
+               tmp &= MC_SEQ_MISC0__MT__MASK;
+               adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+       }
+
+       return 0;
+}
+
+static int gmc_v6_0_late_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
+static int gmc_v6_0_sw_init(void *handle)
+{
+       int r;
+       int dma_bits;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+       if (r)
+               return r;
+
+       adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+
+       adev->mc.mc_mask = 0xffffffffffULL;
+
+       adev->need_dma32 = false;
+       dma_bits = adev->need_dma32 ? 32 : 40;
+       r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+       if (r) {
+               adev->need_dma32 = true;
+               dma_bits = 32;
+               dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+       }
+       r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+       if (r) {
+               pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
+               dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
+       }
+
+       r = gmc_v6_0_init_microcode(adev);
+       if (r) {
+               dev_err(adev->dev, "Failed to load mc firmware!\n");
+               return r;
+       }
+
+       r = amdgpu_ttm_global_init(adev);
+       if (r) {
+               return r;
+       }
+
+       r = gmc_v6_0_mc_init(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_bo_init(adev);
+       if (r)
+               return r;
+
+       r = gmc_v6_0_gart_init(adev);
+       if (r)
+               return r;
+
+       if (!adev->vm_manager.enabled) {
+               r = gmc_v6_0_vm_init(adev);
+               if (r) {
+                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+                       return r;
+               }
+               adev->vm_manager.enabled = true;
+       }
+
+       return r;
+}
+
+static int gmc_v6_0_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->vm_manager.enabled) {
+               gmc_v6_0_vm_fini(adev);
+               adev->vm_manager.enabled = false;
+       }
+       gmc_v6_0_gart_fini(adev);
+       amdgpu_gem_force_release(adev);
+       amdgpu_bo_fini(adev);
+
+       return 0;
+}
+
+static int gmc_v6_0_hw_init(void *handle)
+{
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       gmc_v6_0_mc_program(adev);
+
+       if (!(adev->flags & AMD_IS_APU)) {
+               r = gmc_v6_0_mc_load_microcode(adev);
+               if (r) {
+                       dev_err(adev->dev, "Failed to load MC firmware!\n");
+                       return r;
+               }
+       }
+
+       r = gmc_v6_0_gart_enable(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int gmc_v6_0_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+       gmc_v6_0_gart_disable(adev);
+
+       return 0;
+}
+
+static int gmc_v6_0_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->vm_manager.enabled) {
+               gmc_v6_0_vm_fini(adev);
+               adev->vm_manager.enabled = false;
+       }
+       gmc_v6_0_hw_fini(adev);
+
+       return 0;
+}
+
+static int gmc_v6_0_resume(void *handle)
+{
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = gmc_v6_0_hw_init(adev);
+       if (r)
+               return r;
+
+       if (!adev->vm_manager.enabled) {
+               r = gmc_v6_0_vm_init(adev);
+               if (r) {
+                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+                       return r;
+               }
+               adev->vm_manager.enabled = true;
+       }
+
+       return r;
+}
+
+static bool gmc_v6_0_is_idle(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 tmp = RREG32(SRBM_STATUS);
+
+       if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                  SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
+               return false;
+
+       return true;
+}
+
+static int gmc_v6_0_wait_for_idle(void *handle)
+{
+       unsigned i;
+       u32 tmp;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+                                              SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                                              SRBM_STATUS__MCC_BUSY_MASK |
+                                              SRBM_STATUS__MCD_BUSY_MASK |
+                                              SRBM_STATUS__VMC_BUSY_MASK);
+               if (!tmp)
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+
+}
+
+static int gmc_v6_0_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       struct amdgpu_mode_mc_save save;
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(SRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                               mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
+
+       if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                  SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
+               if (!(adev->flags & AMD_IS_APU))
+                       srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                                       mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
+       }
+
+       if (srbm_soft_reset) {
+               gmc_v6_0_mc_stop(adev, &save);
+               if (gmc_v6_0_wait_for_idle(adev)) {
+                       dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+               }
+
+
+               tmp = RREG32(SRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(SRBM_SOFT_RESET, tmp);
+               tmp = RREG32(SRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(SRBM_SOFT_RESET, tmp);
+               tmp = RREG32(SRBM_SOFT_RESET);
+
+               udelay(50);
+
+               gmc_v6_0_mc_resume(adev, &save);
+               udelay(50);
+       }
+
+       return 0;
+}
+
+static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+                                            struct amdgpu_irq_src *src,
+                                            unsigned type,
+                                            enum amdgpu_interrupt_state state)
+{
+       u32 tmp;
+       u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               tmp = RREG32(VM_CONTEXT0_CNTL);
+               tmp &= ~bits;
+               WREG32(VM_CONTEXT0_CNTL, tmp);
+               tmp = RREG32(VM_CONTEXT1_CNTL);
+               tmp &= ~bits;
+               WREG32(VM_CONTEXT1_CNTL, tmp);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               tmp = RREG32(VM_CONTEXT0_CNTL);
+               tmp |= bits;
+               WREG32(VM_CONTEXT0_CNTL, tmp);
+               tmp = RREG32(VM_CONTEXT1_CNTL);
+               tmp |= bits;
+               WREG32(VM_CONTEXT1_CNTL, tmp);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       u32 addr, status;
+
+       addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+       status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+               gmc_v6_0_set_fault_enable_default(adev, false);
+
+       dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+               entry->src_id, entry->src_data);
+       dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+               addr);
+       dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+               status);
+       gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+
+       return 0;
+}
+
+static int gmc_v6_0_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int gmc_v6_0_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+       .name = "gmc_v6_0",
+       .early_init = gmc_v6_0_early_init,
+       .late_init = gmc_v6_0_late_init,
+       .sw_init = gmc_v6_0_sw_init,
+       .sw_fini = gmc_v6_0_sw_fini,
+       .hw_init = gmc_v6_0_hw_init,
+       .hw_fini = gmc_v6_0_hw_fini,
+       .suspend = gmc_v6_0_suspend,
+       .resume = gmc_v6_0_resume,
+       .is_idle = gmc_v6_0_is_idle,
+       .wait_for_idle = gmc_v6_0_wait_for_idle,
+       .soft_reset = gmc_v6_0_soft_reset,
+       .set_clockgating_state = gmc_v6_0_set_clockgating_state,
+       .set_powergating_state = gmc_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+       .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
+       .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+       .set = gmc_v6_0_vm_fault_interrupt_state,
+       .process = gmc_v6_0_process_interrupt,
+};
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+{
+       if (adev->gart.gart_funcs == NULL)
+               adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+}
+
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->mc.vm_fault.num_types = 1;
+       adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
new file mode 100644 (file)
index 0000000..42c4fc6
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GMC_V6_0_H__
+#define __GMC_V6_0_H__
+
+extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+
+#endif
index d24a82bd0c7a119d7b450ee801f3b5ffa1dc51e3..aa0c4b964621cd9726366fba08679394095b75f0 100644 (file)
@@ -144,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                return 0;
        default: BUG();
        }
@@ -182,7 +183,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
        const struct mc_firmware_header_v1_0 *hdr;
        const __le32 *fw_data = NULL;
        const __le32 *io_mc_regs = NULL;
-       u32 running, blackout = 0;
+       u32 running;
        int i, ucode_size, regs_size;
 
        if (!adev->mc.fw)
@@ -202,11 +203,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
        running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 
        if (running == 0) {
-               if (running) {
-                       blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
-                       WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
-               }
-
                /* reset the engine and set to writable */
                WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
                WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -238,9 +234,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
                                break;
                        udelay(1);
                }
-
-               if (running)
-                       WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
        }
 
        return 0;
@@ -392,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
         * size equal to the 1024 or vram, whichever is larger.
         */
        if (amdgpu_gart_size == -1)
-               adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+               adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
        else
                adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 
@@ -952,6 +945,11 @@ static int gmc_v7_0_sw_init(void *handle)
                return r;
        }
 
+       r = amdgpu_ttm_global_init(adev);
+       if (r) {
+               return r;
+       }
+
        r = gmc_v7_0_mc_init(adev);
        if (r)
                return r;
index 717359d3ba8c506b77b09d46663b7afc598e9df9..1b319f5bc6962d5d6250db12fcb18302db789ade 100644 (file)
@@ -103,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] =
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
+static const u32 golden_settings_stoney_common[] =
+{
+       mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
+       mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
+};
 
 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
@@ -142,6 +147,9 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
                amdgpu_program_register_sequence(adev,
                                                 stoney_mgcg_cgcg_init,
                                                 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_stoney_common,
+                                                (const u32)ARRAY_SIZE(golden_settings_stoney_common));
                break;
        default:
                break;
@@ -253,7 +261,7 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
        const struct mc_firmware_header_v1_0 *hdr;
        const __le32 *fw_data = NULL;
        const __le32 *io_mc_regs = NULL;
-       u32 running, blackout = 0;
+       u32 running;
        int i, ucode_size, regs_size;
 
        if (!adev->mc.fw)
@@ -261,8 +269,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
 
        /* Skip MC ucode loading on SR-IOV capable boards.
         * vbios does this for us in asic_init in that case.
+        * Skip MC ucode loading on VF, because hypervisor will do that
+        * for this adaptor.
         */
-       if (adev->virtualization.supports_sr_iov)
+       if (amdgpu_sriov_bios(adev))
                return 0;
 
        hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
@@ -279,11 +289,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
        running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 
        if (running == 0) {
-               if (running) {
-                       blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
-                       WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
-               }
-
                /* reset the engine and set to writable */
                WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
                WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -315,9 +320,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
                                break;
                        udelay(1);
                }
-
-               if (running)
-                       WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
        }
 
        return 0;
@@ -469,7 +471,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
         * size equal to the 1024 or vram, whichever is larger.
         */
        if (amdgpu_gart_size == -1)
-               adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+               adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
        else
                adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 
@@ -949,6 +951,11 @@ static int gmc_v8_0_sw_init(void *handle)
                return r;
        }
 
+       r = amdgpu_ttm_global_init(adev);
+       if (r) {
+               return r;
+       }
+
        r = gmc_v8_0_mc_init(adev);
        if (r)
                return r;
@@ -1092,9 +1099,8 @@ static int gmc_v8_0_wait_for_idle(void *handle)
 
 }
 
-static int gmc_v8_0_soft_reset(void *handle)
+static int gmc_v8_0_check_soft_reset(void *handle)
 {
-       struct amdgpu_mode_mc_save save;
        u32 srbm_soft_reset = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1109,13 +1115,42 @@ static int gmc_v8_0_soft_reset(void *handle)
                        srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
                                                        SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
        }
-
        if (srbm_soft_reset) {
-               gmc_v8_0_mc_stop(adev, &save);
-               if (gmc_v8_0_wait_for_idle((void *)adev)) {
-                       dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
-               }
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
+               adev->mc.srbm_soft_reset = srbm_soft_reset;
+       } else {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
+               adev->mc.srbm_soft_reset = 0;
+       }
+       return 0;
+}
+
+static int gmc_v8_0_pre_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+               return 0;
 
+       gmc_v8_0_mc_stop(adev, &adev->mc.save);
+       if (gmc_v8_0_wait_for_idle(adev)) {
+               dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+       }
+
+       return 0;
+}
+
+static int gmc_v8_0_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+               return 0;
+       srbm_soft_reset = adev->mc.srbm_soft_reset;
+
+       if (srbm_soft_reset) {
+               u32 tmp;
 
                tmp = RREG32(mmSRBM_SOFT_RESET);
                tmp |= srbm_soft_reset;
@@ -1131,14 +1166,22 @@ static int gmc_v8_0_soft_reset(void *handle)
 
                /* Wait a little for things to settle down */
                udelay(50);
-
-               gmc_v8_0_mc_resume(adev, &save);
-               udelay(50);
        }
 
        return 0;
 }
 
+static int gmc_v8_0_post_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+               return 0;
+
+       gmc_v8_0_mc_resume(adev, &adev->mc.save);
+       return 0;
+}
+
 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *src,
                                             unsigned type,
@@ -1406,7 +1449,10 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
        .resume = gmc_v8_0_resume,
        .is_idle = gmc_v8_0_is_idle,
        .wait_for_idle = gmc_v8_0_wait_for_idle,
+       .check_soft_reset = gmc_v8_0_check_soft_reset,
+       .pre_soft_reset = gmc_v8_0_pre_soft_reset,
        .soft_reset = gmc_v8_0_soft_reset,
+       .post_soft_reset = gmc_v8_0_post_soft_reset,
        .set_clockgating_state = gmc_v8_0_set_clockgating_state,
        .set_powergating_state = gmc_v8_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
deleted file mode 100644 (file)
index 2f078ad..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "iceland_smum.h"
-
-MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int iceland_dpm_early_init(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       iceland_dpm_set_funcs(adev);
-
-       return 0;
-}
-
-static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
-{
-       char fw_name[30] = "amdgpu/topaz_smc.bin";
-       int err;
-
-       err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
-       if (err)
-               goto out;
-       err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
-       if (err) {
-               DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
-               release_firmware(adev->pm.fw);
-               adev->pm.fw = NULL;
-       }
-       return err;
-}
-
-static int iceland_dpm_sw_init(void *handle)
-{
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       ret = iceland_dpm_init_microcode(adev);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int iceland_dpm_sw_fini(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       release_firmware(adev->pm.fw);
-       adev->pm.fw = NULL;
-
-       return 0;
-}
-
-static int iceland_dpm_hw_init(void *handle)
-{
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-
-       /* smu init only needs to be called at startup, not resume.
-        * It should be in sw_init, but requires the fw info gathered
-        * in sw_init from other IP modules.
-        */
-       ret = iceland_smu_init(adev);
-       if (ret) {
-               DRM_ERROR("SMU initialization failed\n");
-               goto fail;
-       }
-
-       ret = iceland_smu_start(adev);
-       if (ret) {
-               DRM_ERROR("SMU start failed\n");
-               goto fail;
-       }
-
-       mutex_unlock(&adev->pm.mutex);
-       return 0;
-
-fail:
-       adev->firmware.smu_load = false;
-       mutex_unlock(&adev->pm.mutex);
-       return -EINVAL;
-}
-
-static int iceland_dpm_hw_fini(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-       /* smu fini only needs to be called at teardown, not suspend.
-        * It should be in sw_fini, but we put it here for symmetry
-        * with smu init.
-        */
-       iceland_smu_fini(adev);
-       mutex_unlock(&adev->pm.mutex);
-       return 0;
-}
-
-static int iceland_dpm_suspend(void *handle)
-{
-       return 0;
-}
-
-static int iceland_dpm_resume(void *handle)
-{
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-
-       ret = iceland_smu_start(adev);
-       if (ret) {
-               DRM_ERROR("SMU start failed\n");
-               goto fail;
-       }
-
-fail:
-       mutex_unlock(&adev->pm.mutex);
-       return ret;
-}
-
-static int iceland_dpm_set_clockgating_state(void *handle,
-                       enum amd_clockgating_state state)
-{
-       return 0;
-}
-
-static int iceland_dpm_set_powergating_state(void *handle,
-                       enum amd_powergating_state state)
-{
-       return 0;
-}
-
-const struct amd_ip_funcs iceland_dpm_ip_funcs = {
-       .name = "iceland_dpm",
-       .early_init = iceland_dpm_early_init,
-       .late_init = NULL,
-       .sw_init = iceland_dpm_sw_init,
-       .sw_fini = iceland_dpm_sw_fini,
-       .hw_init = iceland_dpm_hw_init,
-       .hw_fini = iceland_dpm_hw_fini,
-       .suspend = iceland_dpm_suspend,
-       .resume = iceland_dpm_resume,
-       .is_idle = NULL,
-       .wait_for_idle = NULL,
-       .soft_reset = NULL,
-       .set_clockgating_state = iceland_dpm_set_clockgating_state,
-       .set_powergating_state = iceland_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
-       .get_temperature = NULL,
-       .pre_set_power_state = NULL,
-       .set_power_state = NULL,
-       .post_set_power_state = NULL,
-       .display_configuration_changed = NULL,
-       .get_sclk = NULL,
-       .get_mclk = NULL,
-       .print_power_state = NULL,
-       .debugfs_print_current_performance_level = NULL,
-       .force_performance_level = NULL,
-       .vblank_too_short = NULL,
-       .powergate_uvd = NULL,
-};
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
-{
-       if (NULL == adev->pm.funcs)
-               adev->pm.funcs = &iceland_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
deleted file mode 100644 (file)
index 2118399..0000000
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "ppsmc.h"
-#include "iceland_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
-
-#define ICELAND_SMC_SIZE 0x20000
-
-static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
-                                       uint32_t smc_address, uint32_t limit)
-{
-       uint32_t val;
-
-       if (smc_address & 3)
-               return -EINVAL;
-
-       if ((smc_address + 3) > limit)
-               return -EINVAL;
-
-       WREG32(mmSMC_IND_INDEX_0, smc_address);
-
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-       return 0;
-}
-
-static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
-                                    uint32_t smc_start_address,
-                                    const uint8_t *src,
-                                    uint32_t byte_count, uint32_t limit)
-{
-       uint32_t addr;
-       uint32_t data, orig_data;
-       int result = 0;
-       uint32_t extra_shift;
-       unsigned long flags;
-
-       if (smc_start_address & 3)
-               return -EINVAL;
-
-       if ((smc_start_address + byte_count) > limit)
-               return -EINVAL;
-
-       addr = smc_start_address;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       while (byte_count >= 4) {
-               /* Bytes are written into the SMC addres space with the MSB first */
-               data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
-               result = iceland_set_smc_sram_address(adev, addr, limit);
-
-               if (result)
-                       goto out;
-
-               WREG32(mmSMC_IND_DATA_0, data);
-
-               src += 4;
-               byte_count -= 4;
-               addr += 4;
-       }
-
-       if (0 != byte_count) {
-               /* Now write odd bytes left, do a read modify write cycle */
-               data = 0;
-
-               result = iceland_set_smc_sram_address(adev, addr, limit);
-               if (result)
-                       goto out;
-
-               orig_data = RREG32(mmSMC_IND_DATA_0);
-               extra_shift = 8 * (4 - byte_count);
-
-               while (byte_count > 0) {
-                       data = (data << 8) + *src++;
-                       byte_count--;
-               }
-
-               data <<= extra_shift;
-               data |= (orig_data & ~((~0UL) << extra_shift));
-
-               result = iceland_set_smc_sram_address(adev, addr, limit);
-               if (result)
-                       goto out;
-
-               WREG32(mmSMC_IND_DATA_0, data);
-       }
-
-out:
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-void iceland_start_smc(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-void iceland_reset_smc(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-static int iceland_program_jump_on_start(struct amdgpu_device *adev)
-{
-       static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
-       iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
-       return 0;
-}
-
-void iceland_stop_smc_clock(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
-       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
-       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-void iceland_start_smc_clock(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
-       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
-       return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
-       int i;
-       uint32_t val;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32(mmSMC_RESP_0);
-               if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
-       if (!iceland_is_smc_ram_running(adev))
-               return -EINVAL;
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MESSAGE_0, msg);
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send message\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
-                                                  PPSMC_Msg msg)
-{
-       if (!iceland_is_smc_ram_running(adev))
-               return -EINVAL;
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MESSAGE_0, msg);
-
-       return 0;
-}
-
-static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
-                                                 PPSMC_Msg msg,
-                                                 uint32_t parameter)
-{
-       WREG32(mmSMC_MSG_ARG_0, parameter);
-
-       return iceland_send_msg_to_smc(adev, msg);
-}
-
-static int iceland_send_msg_to_smc_with_parameter_without_waiting(
-                                       struct amdgpu_device *adev,
-                                       PPSMC_Msg msg, uint32_t parameter)
-{
-       WREG32(mmSMC_MSG_ARG_0, parameter);
-
-       return iceland_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
-       int i;
-       uint32_t val;
-
-       if (!iceland_is_smc_ram_running(adev))
-               return -EINVAL;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-               if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout)
-               return -EINVAL;
-
-       return 0;
-}
-#endif
-
-static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
-       const struct smc_firmware_header_v1_0 *hdr;
-       uint32_t ucode_size;
-       uint32_t ucode_start_address;
-       const uint8_t *src;
-       uint32_t val;
-       uint32_t byte_count;
-       uint32_t data;
-       unsigned long flags;
-       int i;
-
-       if (!adev->pm.fw)
-               return -EINVAL;
-
-       /* Skip SMC ucode loading on SR-IOV capable boards.
-        * vbios does this for us in asic_init in that case.
-        */
-       if (adev->virtualization.supports_sr_iov)
-               return 0;
-
-       hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
-       amdgpu_ucode_print_smc_hdr(&hdr->header);
-
-       adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
-       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-       ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
-       src = (const uint8_t *)
-               (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
-       if (ucode_size & 3) {
-               DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
-               return -EINVAL;
-       }
-
-       if (ucode_size > ICELAND_SMC_SIZE) {
-               DRM_ERROR("SMC address is beyond the SMC RAM area\n");
-               return -EINVAL;
-       }
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixRCU_UC_EVENTS);
-               if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
-                       break;
-               udelay(1);
-       }
-       val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
-       WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
-
-       iceland_stop_smc_clock(adev);
-       iceland_reset_smc(adev);
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-       byte_count = ucode_size;
-       while (byte_count >= 4) {
-               data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-               WREG32(mmSMC_IND_DATA_0, data);
-               src += 4;
-               byte_count -= 4;
-       }
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
-       return 0;
-}
-
-#if 0 /* not used yet */
-static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
-                                      uint32_t smc_address,
-                                      uint32_t *value,
-                                      uint32_t limit)
-{
-       int result;
-       unsigned long flags;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       result = iceland_set_smc_sram_address(adev, smc_address, limit);
-       if (result == 0)
-               *value = RREG32(mmSMC_IND_DATA_0);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
-                                       uint32_t smc_address,
-                                       uint32_t value,
-                                       uint32_t limit)
-{
-       int result;
-       unsigned long flags;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       result = iceland_set_smc_sram_address(adev, smc_address, limit);
-       if (result == 0)
-               WREG32(mmSMC_IND_DATA_0, value);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-static int iceland_smu_stop_smc(struct amdgpu_device *adev)
-{
-       iceland_reset_smc(adev);
-       iceland_stop_smc_clock(adev);
-
-       return 0;
-}
-#endif
-
-static int iceland_smu_start_smc(struct amdgpu_device *adev)
-{
-       int i;
-       uint32_t val;
-
-       iceland_program_jump_on_start(adev);
-       iceland_start_smc_clock(adev);
-       iceland_start_smc(adev);
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixFIRMWARE_FLAGS);
-               if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
-                       break;
-               udelay(1);
-       }
-       return 0;
-}
-
-static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
-{
-       switch (fw_type) {
-               case UCODE_ID_SDMA0:
-                       return AMDGPU_UCODE_ID_SDMA0;
-               case UCODE_ID_SDMA1:
-                       return AMDGPU_UCODE_ID_SDMA1;
-               case UCODE_ID_CP_CE:
-                       return AMDGPU_UCODE_ID_CP_CE;
-               case UCODE_ID_CP_PFP:
-                       return AMDGPU_UCODE_ID_CP_PFP;
-               case UCODE_ID_CP_ME:
-                       return AMDGPU_UCODE_ID_CP_ME;
-               case UCODE_ID_CP_MEC:
-               case UCODE_ID_CP_MEC_JT1:
-                       return AMDGPU_UCODE_ID_CP_MEC1;
-               case UCODE_ID_CP_MEC_JT2:
-                       return AMDGPU_UCODE_ID_CP_MEC2;
-               case UCODE_ID_RLC_G:
-                       return AMDGPU_UCODE_ID_RLC_G;
-               default:
-                       DRM_ERROR("ucode type is out of range!\n");
-                       return AMDGPU_UCODE_ID_MAXIMUM;
-       }
-}
-
-static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
-       switch (fw_type) {
-               case AMDGPU_UCODE_ID_SDMA0:
-                       return UCODE_ID_SDMA0_MASK;
-               case AMDGPU_UCODE_ID_SDMA1:
-                       return UCODE_ID_SDMA1_MASK;
-               case AMDGPU_UCODE_ID_CP_CE:
-                       return UCODE_ID_CP_CE_MASK;
-               case AMDGPU_UCODE_ID_CP_PFP:
-                       return UCODE_ID_CP_PFP_MASK;
-               case AMDGPU_UCODE_ID_CP_ME:
-                       return UCODE_ID_CP_ME_MASK;
-               case AMDGPU_UCODE_ID_CP_MEC1:
-                       return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
-               case AMDGPU_UCODE_ID_CP_MEC2:
-                       return UCODE_ID_CP_MEC_MASK;
-               case AMDGPU_UCODE_ID_RLC_G:
-                       return UCODE_ID_RLC_G_MASK;
-               default:
-                       DRM_ERROR("ucode type is out of range!\n");
-                       return 0;
-       }
-}
-
-static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
-                                                     uint32_t fw_type,
-                                                     struct SMU_Entry *entry)
-{
-       enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
-       struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
-       const struct gfx_firmware_header_v1_0 *header = NULL;
-       uint64_t gpu_addr;
-       uint32_t data_size;
-
-       if (ucode->fw == NULL)
-               return -EINVAL;
-
-       gpu_addr  = ucode->mc_addr;
-       header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
-       data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
-       entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
-       entry->id = (uint16_t)fw_type;
-       entry->image_addr_high = upper_32_bits(gpu_addr);
-       entry->image_addr_low = lower_32_bits(gpu_addr);
-       entry->meta_data_addr_high = 0;
-       entry->meta_data_addr_low = 0;
-       entry->data_size_byte = data_size;
-       entry->num_register_entries = 0;
-       entry->flags = 0;
-
-       return 0;
-}
-
-static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
-{
-       struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
-       struct SMU_DRAMData_TOC *toc;
-       uint32_t fw_to_load;
-
-       toc = (struct SMU_DRAMData_TOC *)private->header;
-       toc->num_entries = 0;
-       toc->structure_version = 1;
-
-       if (!adev->firmware.smu_load)
-               return 0;
-
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for RLC\n");
-               return -EINVAL;
-       }
-
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for CE\n");
-               return -EINVAL;
-       }
-
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for PFP\n");
-               return -EINVAL;
-       }
-
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for ME\n");
-               return -EINVAL;
-       }
-
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC\n");
-               return -EINVAL;
-       }
-
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
-               return -EINVAL;
-       }
-
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for SDMA0\n");
-               return -EINVAL;
-       }
-
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for SDMA1\n");
-               return -EINVAL;
-       }
-
-       iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
-       iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
-       fw_to_load = UCODE_ID_RLC_G_MASK |
-                       UCODE_ID_SDMA0_MASK |
-                       UCODE_ID_SDMA1_MASK |
-                       UCODE_ID_CP_CE_MASK |
-                       UCODE_ID_CP_ME_MASK |
-                       UCODE_ID_CP_PFP_MASK |
-                       UCODE_ID_CP_MEC_MASK |
-                       UCODE_ID_CP_MEC_JT1_MASK;
-
-
-       if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
-               DRM_ERROR("Fail to request SMU load ucode\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
-                                           uint32_t fw_type)
-{
-       uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
-       int i;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("check firmware loading failed\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int iceland_smu_start(struct amdgpu_device *adev)
-{
-       int result;
-
-       result = iceland_smu_upload_firmware_image(adev);
-       if (result)
-               return result;
-       result = iceland_smu_start_smc(adev);
-       if (result)
-               return result;
-
-       return iceland_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
-       .check_fw_load_finish = iceland_smu_check_fw_load_finish,
-       .request_smu_load_fw = NULL,
-       .request_smu_specific_fw = NULL,
-};
-
-int iceland_smu_init(struct amdgpu_device *adev)
-{
-       struct iceland_smu_private_data *private;
-       uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-       struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
-       uint64_t mc_addr;
-       void *toc_buf_ptr;
-       int ret;
-
-       private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
-       if (NULL == private)
-               return -ENOMEM;
-
-       /* allocate firmware buffers */
-       if (adev->firmware.smu_load)
-               amdgpu_ucode_init_bo(adev);
-
-       adev->smu.priv = private;
-       adev->smu.fw_flags = 0;
-
-       /* Allocate FW image data structure and header buffer */
-       ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-                              true, AMDGPU_GEM_DOMAIN_VRAM,
-                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-                              NULL, NULL, toc_buf);
-       if (ret) {
-               DRM_ERROR("Failed to allocate memory for TOC buffer\n");
-               return -ENOMEM;
-       }
-
-       /* Retrieve GPU address for header buffer and internal buffer */
-       ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
-       if (ret) {
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to reserve the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.toc_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to pin the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.toc_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to map the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       amdgpu_bo_unreserve(adev->smu.toc_buf);
-       private->header_addr_low = lower_32_bits(mc_addr);
-       private->header_addr_high = upper_32_bits(mc_addr);
-       private->header = toc_buf_ptr;
-
-       adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
-
-       return 0;
-}
-
-int iceland_smu_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_unref(&adev->smu.toc_buf);
-       kfree(adev->smu.priv);
-       adev->smu.priv = NULL;
-       if (adev->firmware.fw_buf)
-               amdgpu_ucode_fini_bo(adev);
-
-       return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
deleted file mode 100644 (file)
index 5983e31..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
-
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-       uint8_t *header;
-       uint8_t *mec_image;
-       uint32_t header_addr_high;
-       uint32_t header_addr_low;
-};
-
-#endif
index a845e883f5fa560a18340517edbaa2df8080cf30..f8618a3881a841a3160115eabd065a9b311f862b 100644 (file)
@@ -2845,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
                pi->caps_tcp_ramping = true;
        }
 
-       pi->caps_sclk_ds = true;
+       if (amdgpu_sclk_deep_sleep_en)
+               pi->caps_sclk_ds = true;
+       else
+               pi->caps_sclk_ds = false;
+
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
        if (amdgpu_bapm == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
new file mode 100644 (file)
index 0000000..055321f
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __R600_DPM_H__
+#define __R600_DPM_H__
+
+#define R600_ASI_DFLT                                10000
+#define R600_BSP_DFLT                                0x41EB
+#define R600_BSU_DFLT                                0x2
+#define R600_AH_DFLT                                 5
+#define R600_RLP_DFLT                                25
+#define R600_RMP_DFLT                                65
+#define R600_LHP_DFLT                                40
+#define R600_LMP_DFLT                                15
+#define R600_TD_DFLT                                 0
+#define R600_UTC_DFLT_00                             0x24
+#define R600_UTC_DFLT_01                             0x22
+#define R600_UTC_DFLT_02                             0x22
+#define R600_UTC_DFLT_03                             0x22
+#define R600_UTC_DFLT_04                             0x22
+#define R600_UTC_DFLT_05                             0x22
+#define R600_UTC_DFLT_06                             0x22
+#define R600_UTC_DFLT_07                             0x22
+#define R600_UTC_DFLT_08                             0x22
+#define R600_UTC_DFLT_09                             0x22
+#define R600_UTC_DFLT_10                             0x22
+#define R600_UTC_DFLT_11                             0x22
+#define R600_UTC_DFLT_12                             0x22
+#define R600_UTC_DFLT_13                             0x22
+#define R600_UTC_DFLT_14                             0x22
+#define R600_DTC_DFLT_00                             0x24
+#define R600_DTC_DFLT_01                             0x22
+#define R600_DTC_DFLT_02                             0x22
+#define R600_DTC_DFLT_03                             0x22
+#define R600_DTC_DFLT_04                             0x22
+#define R600_DTC_DFLT_05                             0x22
+#define R600_DTC_DFLT_06                             0x22
+#define R600_DTC_DFLT_07                             0x22
+#define R600_DTC_DFLT_08                             0x22
+#define R600_DTC_DFLT_09                             0x22
+#define R600_DTC_DFLT_10                             0x22
+#define R600_DTC_DFLT_11                             0x22
+#define R600_DTC_DFLT_12                             0x22
+#define R600_DTC_DFLT_13                             0x22
+#define R600_DTC_DFLT_14                             0x22
+#define R600_VRC_DFLT                                0x0000C003
+#define R600_VOLTAGERESPONSETIME_DFLT                1000
+#define R600_BACKBIASRESPONSETIME_DFLT               1000
+#define R600_VRU_DFLT                                0x3
+#define R600_SPLLSTEPTIME_DFLT                       0x1000
+#define R600_SPLLSTEPUNIT_DFLT                       0x3
+#define R600_TPU_DFLT                                0
+#define R600_TPC_DFLT                                0x200
+#define R600_SSTU_DFLT                               0
+#define R600_SST_DFLT                                0x00C8
+#define R600_GICST_DFLT                              0x200
+#define R600_FCT_DFLT                                0x0400
+#define R600_FCTU_DFLT                               0
+#define R600_CTXCGTT3DRPHC_DFLT                      0x20
+#define R600_CTXCGTT3DRSDC_DFLT                      0x40
+#define R600_VDDC3DOORPHC_DFLT                       0x100
+#define R600_VDDC3DOORSDC_DFLT                       0x7
+#define R600_VDDC3DOORSU_DFLT                        0
+#define R600_MPLLLOCKTIME_DFLT                       100
+#define R600_MPLLRESETTIME_DFLT                      150
+#define R600_VCOSTEPPCT_DFLT                          20
+#define R600_ENDINGVCOSTEPPCT_DFLT                    5
+#define R600_REFERENCEDIVIDER_DFLT                    4
+
+#define R600_PM_NUMBER_OF_TC 15
+#define R600_PM_NUMBER_OF_SCLKS 20
+#define R600_PM_NUMBER_OF_MCLKS 4
+#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define R600_TEMP_RANGE_MIN (90 * 1000)
+#define R600_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum r600_power_level {
+       R600_POWER_LEVEL_LOW = 0,
+       R600_POWER_LEVEL_MEDIUM = 1,
+       R600_POWER_LEVEL_HIGH = 2,
+       R600_POWER_LEVEL_CTXSW = 3,
+};
+
+enum r600_td {
+       R600_TD_AUTO,
+       R600_TD_UP,
+       R600_TD_DOWN,
+};
+
+enum r600_display_watermark {
+       R600_DISPLAY_WATERMARK_LOW = 0,
+       R600_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum r600_display_gap
+{
+    R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    R600_PM_DISPLAY_GAP_VBLANK       = 1,
+    R600_PM_DISPLAY_GAP_WATERMARK    = 2,
+    R600_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+#endif
index 1351c7e834a21653a358ad3578dad91f60de9da9..565dab3c72186704542b3763fbcdcecb9a6e9cf9 100644 (file)
@@ -190,12 +190,8 @@ out:
  */
 static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 {
-       u32 rptr;
-
        /* XXX check if swapping is necessary on BE */
-       rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
-       return rptr;
+       return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 }
 
 /**
@@ -714,7 +710,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
                DRM_ERROR("amdgpu: IB test timed out\n");
                r = -ETIMEDOUT;
                goto err1;
-       } else if (r) {
+       } else if (r < 0) {
                DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
        }
@@ -749,24 +745,16 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
                                  uint64_t pe, uint64_t src,
                                  unsigned count)
 {
-       while (count) {
-               unsigned bytes = count * 8;
-               if (bytes > 0x1FFFF8)
-                       bytes = 0x1FFFF8;
-
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-               ib->ptr[ib->length_dw++] = bytes;
-               ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-               ib->ptr[ib->length_dw++] = lower_32_bits(src);
-               ib->ptr[ib->length_dw++] = upper_32_bits(src);
-               ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-               pe += bytes;
-               src += bytes;
-               count -= bytes / 8;
-       }
+       unsigned bytes = count * 8;
+
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+       ib->ptr[ib->length_dw++] = bytes;
+       ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+       ib->ptr[ib->length_dw++] = lower_32_bits(src);
+       ib->ptr[ib->length_dw++] = upper_32_bits(src);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -774,39 +762,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
-                                  const dma_addr_t *pages_addr, uint64_t pe,
-                                  uint64_t addr, unsigned count,
-                                  uint32_t incr, uint32_t flags)
+static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+                                  uint64_t value, unsigned count,
+                                  uint32_t incr)
 {
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count * 2;
-               if (ndw > 0xFFFFE)
-                       ndw = 0xFFFFE;
-
-               /* for non-physically contiguous pages (system) */
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-               ib->ptr[ib->length_dw++] = pe;
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = ndw;
-               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       value = amdgpu_vm_map_gart(pages_addr, addr);
-                       addr += incr;
-                       value |= flags;
-                       ib->ptr[ib->length_dw++] = value;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               }
+       unsigned ndw = count * 2;
+
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+       ib->ptr[ib->length_dw++] = pe;
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = ndw;
+       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+               ib->ptr[ib->length_dw++] = lower_32_bits(value);
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               value += incr;
        }
 }
 
@@ -822,40 +798,21 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
  *
  * Update the page tables using sDMA (CIK).
  */
-static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
-                                    uint64_t pe,
+static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
                                     uint64_t addr, unsigned count,
                                     uint32_t incr, uint32_t flags)
 {
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count;
-               if (ndw > 0x7FFFF)
-                       ndw = 0x7FFFF;
-
-               if (flags & AMDGPU_PTE_VALID)
-                       value = addr;
-               else
-                       value = 0;
-
-               /* for physically contiguous pages (vram) */
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
-               ib->ptr[ib->length_dw++] = pe; /* dst addr */
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = flags; /* mask */
-               ib->ptr[ib->length_dw++] = 0;
-               ib->ptr[ib->length_dw++] = value; /* value */
-               ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               ib->ptr[ib->length_dw++] = incr; /* increment size */
-               ib->ptr[ib->length_dw++] = 0;
-               ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-               pe += ndw * 8;
-               addr += ndw * incr;
-               count -= ndw;
-       }
+       /* for physically contiguous pages (vram) */
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = flags; /* mask */
+       ib->ptr[ib->length_dw++] = 0;
+       ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = incr; /* increment size */
+       ib->ptr[ib->length_dw++] = 0;
+       ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -945,6 +902,22 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
                          SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               7 + 6; /* sdma_v2_4_ring_emit_ib */
+}
+
+static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               6 + /* sdma_v2_4_ring_emit_hdp_flush */
+               3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+               6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+               12 + /* sdma_v2_4_ring_emit_vm_flush */
+               10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static int sdma_v2_4_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1263,6 +1236,8 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
        .test_ib = sdma_v2_4_ring_test_ib,
        .insert_nop = sdma_v2_4_ring_insert_nop,
        .pad_ib = sdma_v2_4_ring_pad_ib,
+       .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
+       .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
index 653ce5ed55aef6641a83649cd1447a06fcf57437..f325fd86430b9e3d565f28fd5b11ace2c2a76667 100644 (file)
@@ -335,12 +335,8 @@ out:
  */
 static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
-       u32 rptr;
-
        /* XXX check if swapping is necessary on BE */
-       rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
-       return rptr;
+       return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 }
 
 /**
@@ -499,31 +495,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
        amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 }
 
-unsigned init_cond_exec(struct amdgpu_ring *ring)
-{
-       unsigned ret;
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
-       amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
-       amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
-       amdgpu_ring_write(ring, 1);
-       ret = ring->wptr;/* this is the offset we need patch later */
-       amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
-       return ret;
-}
-
-void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
-       unsigned cur;
-       BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
-       cur = ring->wptr - 1;
-       if (likely(cur > offset))
-               ring->ring[offset] = cur - offset;
-       else
-               ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
-}
-
-
 /**
  * sdma_v3_0_gfx_stop - stop the gfx async dma engines
  *
@@ -976,24 +947,16 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
                                  uint64_t pe, uint64_t src,
                                  unsigned count)
 {
-       while (count) {
-               unsigned bytes = count * 8;
-               if (bytes > 0x1FFFF8)
-                       bytes = 0x1FFFF8;
-
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-               ib->ptr[ib->length_dw++] = bytes;
-               ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-               ib->ptr[ib->length_dw++] = lower_32_bits(src);
-               ib->ptr[ib->length_dw++] = upper_32_bits(src);
-               ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-               pe += bytes;
-               src += bytes;
-               count -= bytes / 8;
-       }
+       unsigned bytes = count * 8;
+
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+       ib->ptr[ib->length_dw++] = bytes;
+       ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+       ib->ptr[ib->length_dw++] = lower_32_bits(src);
+       ib->ptr[ib->length_dw++] = upper_32_bits(src);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -1001,39 +964,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
-                                  const dma_addr_t *pages_addr, uint64_t pe,
-                                  uint64_t addr, unsigned count,
-                                  uint32_t incr, uint32_t flags)
-{
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count * 2;
-               if (ndw > 0xFFFFE)
-                       ndw = 0xFFFFE;
-
-               /* for non-physically contiguous pages (system) */
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-               ib->ptr[ib->length_dw++] = pe;
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = ndw;
-               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       value = amdgpu_vm_map_gart(pages_addr, addr);
-                       addr += incr;
-                       value |= flags;
-                       ib->ptr[ib->length_dw++] = value;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               }
+static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+                                  uint64_t value, unsigned count,
+                                  uint32_t incr)
+{
+       unsigned ndw = count * 2;
+
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = ndw;
+       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+               ib->ptr[ib->length_dw++] = lower_32_bits(value);
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               value += incr;
        }
 }
 
@@ -1049,40 +1000,21 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
  *
  * Update the page tables using sDMA (CIK).
  */
-static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
-                                    uint64_t pe,
+static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
                                     uint64_t addr, unsigned count,
                                     uint32_t incr, uint32_t flags)
 {
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count;
-               if (ndw > 0x7FFFF)
-                       ndw = 0x7FFFF;
-
-               if (flags & AMDGPU_PTE_VALID)
-                       value = addr;
-               else
-                       value = 0;
-
-               /* for physically contiguous pages (vram) */
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
-               ib->ptr[ib->length_dw++] = pe; /* dst addr */
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = flags; /* mask */
-               ib->ptr[ib->length_dw++] = 0;
-               ib->ptr[ib->length_dw++] = value; /* value */
-               ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               ib->ptr[ib->length_dw++] = incr; /* increment size */
-               ib->ptr[ib->length_dw++] = 0;
-               ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-               pe += ndw * 8;
-               addr += ndw * incr;
-               count -= ndw;
-       }
+       /* for physically contiguous pages (vram) */
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = flags; /* mask */
+       ib->ptr[ib->length_dw++] = 0;
+       ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = incr; /* increment size */
+       ib->ptr[ib->length_dw++] = 0;
+       ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -1172,6 +1104,22 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                          SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               7 + 6; /* sdma_v3_0_ring_emit_ib */
+}
+
+static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               6 + /* sdma_v3_0_ring_emit_hdp_flush */
+               3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+               6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+               12 + /* sdma_v3_0_ring_emit_vm_flush */
+               10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static int sdma_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1320,27 +1268,78 @@ static int sdma_v3_0_wait_for_idle(void *handle)
        return -ETIMEDOUT;
 }
 
-static int sdma_v3_0_soft_reset(void *handle)
+static int sdma_v3_0_check_soft_reset(void *handle)
 {
-       u32 srbm_soft_reset = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset = 0;
        u32 tmp = RREG32(mmSRBM_STATUS2);
 
-       if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
-               /* sdma0 */
-               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
-               tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
-               WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+       if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
+           (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
-       }
-       if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
-               /* sdma1 */
-               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
-               tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
-               WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
        }
 
+       if (srbm_soft_reset) {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
+               adev->sdma.srbm_soft_reset = srbm_soft_reset;
+       } else {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
+               adev->sdma.srbm_soft_reset = 0;
+       }
+
+       return 0;
+}
+
+static int sdma_v3_0_pre_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset = 0;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+               return 0;
+
+       srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+       if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+           REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+               sdma_v3_0_ctx_switch_enable(adev, false);
+               sdma_v3_0_enable(adev, false);
+       }
+
+       return 0;
+}
+
+static int sdma_v3_0_post_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset = 0;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+               return 0;
+
+       srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+       if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+           REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+               sdma_v3_0_gfx_resume(adev);
+               sdma_v3_0_rlc_resume(adev);
+       }
+
+       return 0;
+}
+
+static int sdma_v3_0_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset = 0;
+       u32 tmp;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+               return 0;
+
+       srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
        if (srbm_soft_reset) {
                tmp = RREG32(mmSRBM_SOFT_RESET);
                tmp |= srbm_soft_reset;
@@ -1559,6 +1558,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
        .resume = sdma_v3_0_resume,
        .is_idle = sdma_v3_0_is_idle,
        .wait_for_idle = sdma_v3_0_wait_for_idle,
+       .check_soft_reset = sdma_v3_0_check_soft_reset,
+       .pre_soft_reset = sdma_v3_0_pre_soft_reset,
+       .post_soft_reset = sdma_v3_0_post_soft_reset,
        .soft_reset = sdma_v3_0_soft_reset,
        .set_clockgating_state = sdma_v3_0_set_clockgating_state,
        .set_powergating_state = sdma_v3_0_set_powergating_state,
@@ -1579,6 +1581,8 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
        .test_ib = sdma_v3_0_ring_test_ib,
        .insert_nop = sdma_v3_0_ring_insert_nop,
        .pad_ib = sdma_v3_0_ring_pad_ib,
+       .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
new file mode 100644 (file)
index 0000000..dc9511c
--- /dev/null
@@ -0,0 +1,1965 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "atom.h"
+#include "amdgpu_powerplay.h"
+#include "si/sid.h"
+#include "si_ih.h"
+#include "gfx_v6_0.h"
+#include "gmc_v6_0.h"
+#include "si_dma.h"
+#include "dce_v6_0.h"
+#include "si.h"
+
+static const u32 tahiti_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x031e, 0x00000080, 0x00000000,
+       0x340c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f0, 0x00200000, 0x50100000,
+       0x1c0c, 0x31000311, 0x00000011,
+       0x09df, 0x00000003, 0x000007ff,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x22c9, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+       0xa293, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x2a00126a,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x23a2, 0x01ff1f3f, 0x00000000,
+       0x23a1, 0x01ff1f3f, 0x00000000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b05, 0x00000200, 0x000002fb,
+       0x2b04, 0xffffffff, 0x0000543b,
+       0x2b03, 0xffffffff, 0xa9210876,
+       0x2234, 0xffffffff, 0x000fff40,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0504, 0x20000000, 0x20fffed8,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+       0x0319, 0x00000001, 0x00000001
+};
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+       0x3109, 0xffffffff, 0x00601005,
+       0x311f, 0xffffffff, 0x10104040,
+       0x3122, 0xffffffff, 0x0100000a,
+       0x30c5, 0xffffffff, 0x00000800,
+       0x30c3, 0xffffffff, 0x800000f4,
+       0x3d2a, 0xffffffff, 0x00000000
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x031e, 0x00000080, 0x00000000,
+       0x340c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f0, 0x00200000, 0x50100000,
+       0x1c0c, 0x31000311, 0x00000011,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x22c9, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+       0xa293, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x2a00126a,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b05, 0x000003ff, 0x000000f7,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b03, 0xffffffff, 0x32761054,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+       0x3109, 0xffffffff, 0x00601004,
+       0x311f, 0xffffffff, 0x10102020,
+       0x3122, 0xffffffff, 0x01000020,
+       0x30c5, 0xffffffff, 0x00000800,
+       0x30c3, 0xffffffff, 0x800000a4
+};
+
+static const u32 verde_pg_init[] =
+{
+       0xd4f, 0xffffffff, 0x40000,
+       0xd4e, 0xffffffff, 0x200010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x7007,
+       0xd4e, 0xffffffff, 0x300010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x400000,
+       0xd4e, 0xffffffff, 0x100010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x120200,
+       0xd4e, 0xffffffff, 0x500010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x1e1e16,
+       0xd4e, 0xffffffff, 0x600010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x171f1e,
+       0xd4e, 0xffffffff, 0x700010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4e, 0xffffffff, 0x9ff,
+       0xd40, 0xffffffff, 0x0,
+       0xd41, 0xffffffff, 0x10000800,
+       0xd41, 0xffffffff, 0xf,
+       0xd41, 0xffffffff, 0xf,
+       0xd40, 0xffffffff, 0x4,
+       0xd41, 0xffffffff, 0x1000051e,
+       0xd41, 0xffffffff, 0xffff,
+       0xd41, 0xffffffff, 0xffff,
+       0xd40, 0xffffffff, 0x8,
+       0xd41, 0xffffffff, 0x80500,
+       0xd40, 0xffffffff, 0x12,
+       0xd41, 0xffffffff, 0x9050c,
+       0xd40, 0xffffffff, 0x1d,
+       0xd41, 0xffffffff, 0xb052c,
+       0xd40, 0xffffffff, 0x2a,
+       0xd41, 0xffffffff, 0x1053e,
+       0xd40, 0xffffffff, 0x2d,
+       0xd41, 0xffffffff, 0x10546,
+       0xd40, 0xffffffff, 0x30,
+       0xd41, 0xffffffff, 0xa054e,
+       0xd40, 0xffffffff, 0x3c,
+       0xd41, 0xffffffff, 0x1055f,
+       0xd40, 0xffffffff, 0x3f,
+       0xd41, 0xffffffff, 0x10567,
+       0xd40, 0xffffffff, 0x42,
+       0xd41, 0xffffffff, 0x1056f,
+       0xd40, 0xffffffff, 0x45,
+       0xd41, 0xffffffff, 0x10572,
+       0xd40, 0xffffffff, 0x48,
+       0xd41, 0xffffffff, 0x20575,
+       0xd40, 0xffffffff, 0x4c,
+       0xd41, 0xffffffff, 0x190801,
+       0xd40, 0xffffffff, 0x67,
+       0xd41, 0xffffffff, 0x1082a,
+       0xd40, 0xffffffff, 0x6a,
+       0xd41, 0xffffffff, 0x1b082d,
+       0xd40, 0xffffffff, 0x87,
+       0xd41, 0xffffffff, 0x310851,
+       0xd40, 0xffffffff, 0xba,
+       0xd41, 0xffffffff, 0x891,
+       0xd40, 0xffffffff, 0xbc,
+       0xd41, 0xffffffff, 0x893,
+       0xd40, 0xffffffff, 0xbe,
+       0xd41, 0xffffffff, 0x20895,
+       0xd40, 0xffffffff, 0xc2,
+       0xd41, 0xffffffff, 0x20899,
+       0xd40, 0xffffffff, 0xc6,
+       0xd41, 0xffffffff, 0x2089d,
+       0xd40, 0xffffffff, 0xca,
+       0xd41, 0xffffffff, 0x8a1,
+       0xd40, 0xffffffff, 0xcc,
+       0xd41, 0xffffffff, 0x8a3,
+       0xd40, 0xffffffff, 0xce,
+       0xd41, 0xffffffff, 0x308a5,
+       0xd40, 0xffffffff, 0xd3,
+       0xd41, 0xffffffff, 0x6d08cd,
+       0xd40, 0xffffffff, 0x142,
+       0xd41, 0xffffffff, 0x2000095a,
+       0xd41, 0xffffffff, 0x1,
+       0xd40, 0xffffffff, 0x144,
+       0xd41, 0xffffffff, 0x301f095b,
+       0xd40, 0xffffffff, 0x165,
+       0xd41, 0xffffffff, 0xc094d,
+       0xd40, 0xffffffff, 0x173,
+       0xd41, 0xffffffff, 0xf096d,
+       0xd40, 0xffffffff, 0x184,
+       0xd41, 0xffffffff, 0x15097f,
+       0xd40, 0xffffffff, 0x19b,
+       0xd41, 0xffffffff, 0xc0998,
+       0xd40, 0xffffffff, 0x1a9,
+       0xd41, 0xffffffff, 0x409a7,
+       0xd40, 0xffffffff, 0x1af,
+       0xd41, 0xffffffff, 0xcdc,
+       0xd40, 0xffffffff, 0x1b1,
+       0xd41, 0xffffffff, 0x800,
+       0xd42, 0xffffffff, 0x6c9b2000,
+       0xd44, 0xfc00, 0x2000,
+       0xd51, 0xffffffff, 0xfc0,
+       0xa35, 0x00000100, 0x100
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+       0x3109, 0xffffffff, 0x033f1005,
+       0x311f, 0xffffffff, 0x10808020,
+       0x3122, 0xffffffff, 0x00800008,
+       0x30c5, 0xffffffff, 0x00001000,
+       0x30c3, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x031e, 0x00000080, 0x00000000,
+       0x340c, 0x000300c0, 0x00800040,
+       0x340c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f0, 0x00200000, 0x50100000,
+
+       0x1c0c, 0x31000311, 0x00000011,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0903, 0x000007ff, 0x00000000,
+       0x0903, 0x000007ff, 0x00000000,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x2285, 0xf000001f, 0x00000007,
+       0x2285, 0xf000001f, 0x00000007,
+       0x2285, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+
+       0xa293, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x0000124a,
+       0xa0d4, 0x3f3f3fff, 0x0000124a,
+       0xa0d4, 0x3f3f3fff, 0x0000124a,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x23a2, 0x01ff1f3f, 0x00000000,
+       0x23a3, 0x01ff1f3f, 0x00000000,
+       0x23a2, 0x01ff1f3f, 0x00000000,
+       0x23a1, 0x01ff1f3f, 0x00000000,
+       0x23a1, 0x01ff1f3f, 0x00000000,
+
+       0x23a1, 0x01ff1f3f, 0x00000000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b01, 0x000003ff, 0x00000003,
+       0x2b05, 0x000003ff, 0x00000003,
+       0x2b05, 0x000003ff, 0x00000003,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b03, 0xffffffff, 0x00001032,
+       0x2b03, 0xffffffff, 0x00001032,
+       0x2b03, 0xffffffff, 0x00001032,
+       0x2235, 0x0000001f, 0x00000010,
+       0x2235, 0x0000001f, 0x00000010,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x031e, 0x00000080, 0x00000000,
+       0x340c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f9, 0x00200000, 0x50100000,
+       0x1c0c, 0x31000311, 0x00000011,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x22c9, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+       0xa293, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x00000082,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b05, 0x000003ff, 0x000000f3,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b03, 0xffffffff, 0x00003210,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+       0x3109, 0xffffffff, 0x00601005,
+       0x311f, 0xffffffff, 0x10104040,
+       0x3122, 0xffffffff, 0x0100000a,
+       0x30c5, 0xffffffff, 0x00000800,
+       0x30c3, 0xffffffff, 0x800000f4
+};
+
+static const u32 hainan_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x4595, 0xff000fff, 0x00000100,
+       0x340c, 0x000300c0, 0x00800040,
+       0x3630, 0xff000fff, 0x00000100,
+       0x360c, 0x000300c0, 0x00800040,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x22c9, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+       0xa393, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x00000000,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x03e00000, 0x03600000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b05, 0x000003ff, 0x000000f1,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b03, 0xffffffff, 0x00003210,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+       0x263e, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2471, 0xffffffff, 0x00060005,
+       0x2472, 0xffffffff, 0x00080007,
+       0x2473, 0xffffffff, 0x0000000b,
+       0x2474, 0xffffffff, 0x000a0009,
+       0x2475, 0xffffffff, 0x000d000c,
+       0x2476, 0xffffffff, 0x00070006,
+       0x2477, 0xffffffff, 0x00090008,
+       0x2478, 0xffffffff, 0x0000000c,
+       0x2479, 0xffffffff, 0x000b000a,
+       0x247a, 0xffffffff, 0x000e000d,
+       0x247b, 0xffffffff, 0x00080007,
+       0x247c, 0xffffffff, 0x000a0009,
+       0x247d, 0xffffffff, 0x0000000d,
+       0x247e, 0xffffffff, 0x000c000b,
+       0x247f, 0xffffffff, 0x000f000e,
+       0x2480, 0xffffffff, 0x00090008,
+       0x2481, 0xffffffff, 0x000b000a,
+       0x2482, 0xffffffff, 0x000c000f,
+       0x2483, 0xffffffff, 0x000e000d,
+       0x2484, 0xffffffff, 0x00110010,
+       0x2485, 0xffffffff, 0x000a0009,
+       0x2486, 0xffffffff, 0x000c000b,
+       0x2487, 0xffffffff, 0x0000000f,
+       0x2488, 0xffffffff, 0x000e000d,
+       0x2489, 0xffffffff, 0x00110010,
+       0x248a, 0xffffffff, 0x000b000a,
+       0x248b, 0xffffffff, 0x000d000c,
+       0x248c, 0xffffffff, 0x00000010,
+       0x248d, 0xffffffff, 0x000f000e,
+       0x248e, 0xffffffff, 0x00120011,
+       0x248f, 0xffffffff, 0x000c000b,
+       0x2490, 0xffffffff, 0x000e000d,
+       0x2491, 0xffffffff, 0x00000011,
+       0x2492, 0xffffffff, 0x0010000f,
+       0x2493, 0xffffffff, 0x00130012,
+       0x2494, 0xffffffff, 0x000d000c,
+       0x2495, 0xffffffff, 0x000f000e,
+       0x2496, 0xffffffff, 0x00100013,
+       0x2497, 0xffffffff, 0x00120011,
+       0x2498, 0xffffffff, 0x00150014,
+       0x2499, 0xffffffff, 0x000e000d,
+       0x249a, 0xffffffff, 0x0010000f,
+       0x249b, 0xffffffff, 0x00000013,
+       0x249c, 0xffffffff, 0x00120011,
+       0x249d, 0xffffffff, 0x00150014,
+       0x249e, 0xffffffff, 0x000f000e,
+       0x249f, 0xffffffff, 0x00110010,
+       0x24a0, 0xffffffff, 0x00000014,
+       0x24a1, 0xffffffff, 0x00130012,
+       0x24a2, 0xffffffff, 0x00160015,
+       0x24a3, 0xffffffff, 0x0010000f,
+       0x24a4, 0xffffffff, 0x00120011,
+       0x24a5, 0xffffffff, 0x00000015,
+       0x24a6, 0xffffffff, 0x00140013,
+       0x24a7, 0xffffffff, 0x00170016,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x40b, 0x00000101, 0x00000000,
+       0x82a, 0xffffffff, 0x00000104,
+       0x993, 0x000c0000, 0x000c0000,
+       0x992, 0x000c0000, 0x000c0000,
+       0x1579, 0xff000fff, 0x00000100,
+       0x157a, 0x00000001, 0x00000001,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2480, 0xffffffff, 0x00090008,
+       0x2481, 0xffffffff, 0x000b000a,
+       0x2482, 0xffffffff, 0x000c000f,
+       0x2483, 0xffffffff, 0x000e000d,
+       0x2484, 0xffffffff, 0x00110010,
+       0x2485, 0xffffffff, 0x000a0009,
+       0x2486, 0xffffffff, 0x000c000b,
+       0x2487, 0xffffffff, 0x0000000f,
+       0x2488, 0xffffffff, 0x000e000d,
+       0x2489, 0xffffffff, 0x00110010,
+       0x248a, 0xffffffff, 0x000b000a,
+       0x248b, 0xffffffff, 0x000d000c,
+       0x248c, 0xffffffff, 0x00000010,
+       0x248d, 0xffffffff, 0x000f000e,
+       0x248e, 0xffffffff, 0x00120011,
+       0x248f, 0xffffffff, 0x000c000b,
+       0x2490, 0xffffffff, 0x000e000d,
+       0x2491, 0xffffffff, 0x00000011,
+       0x2492, 0xffffffff, 0x0010000f,
+       0x2493, 0xffffffff, 0x00130012,
+       0x2494, 0xffffffff, 0x000d000c,
+       0x2495, 0xffffffff, 0x000f000e,
+       0x2496, 0xffffffff, 0x00100013,
+       0x2497, 0xffffffff, 0x00120011,
+       0x2498, 0xffffffff, 0x00150014,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x40b, 0x00000101, 0x00000000,
+       0x82a, 0xffffffff, 0x00000104,
+       0x1579, 0xff000fff, 0x00000100,
+       0x157a, 0x00000001, 0x00000001,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 verde_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2480, 0xffffffff, 0x00090008,
+       0x2481, 0xffffffff, 0x000b000a,
+       0x2482, 0xffffffff, 0x000c000f,
+       0x2483, 0xffffffff, 0x000e000d,
+       0x2484, 0xffffffff, 0x00110010,
+       0x2485, 0xffffffff, 0x000a0009,
+       0x2486, 0xffffffff, 0x000c000b,
+       0x2487, 0xffffffff, 0x0000000f,
+       0x2488, 0xffffffff, 0x000e000d,
+       0x2489, 0xffffffff, 0x00110010,
+       0x248a, 0xffffffff, 0x000b000a,
+       0x248b, 0xffffffff, 0x000d000c,
+       0x248c, 0xffffffff, 0x00000010,
+       0x248d, 0xffffffff, 0x000f000e,
+       0x248e, 0xffffffff, 0x00120011,
+       0x248f, 0xffffffff, 0x000c000b,
+       0x2490, 0xffffffff, 0x000e000d,
+       0x2491, 0xffffffff, 0x00000011,
+       0x2492, 0xffffffff, 0x0010000f,
+       0x2493, 0xffffffff, 0x00130012,
+       0x2494, 0xffffffff, 0x000d000c,
+       0x2495, 0xffffffff, 0x000f000e,
+       0x2496, 0xffffffff, 0x00100013,
+       0x2497, 0xffffffff, 0x00120011,
+       0x2498, 0xffffffff, 0x00150014,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x40b, 0x00000101, 0x00000000,
+       0x82a, 0xffffffff, 0x00000104,
+       0x993, 0x000c0000, 0x000c0000,
+       0x992, 0x000c0000, 0x000c0000,
+       0x1579, 0xff000fff, 0x00000100,
+       0x157a, 0x00000001, 0x00000001,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 oland_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2471, 0xffffffff, 0x00060005,
+       0x2472, 0xffffffff, 0x00080007,
+       0x2473, 0xffffffff, 0x0000000b,
+       0x2474, 0xffffffff, 0x000a0009,
+       0x2475, 0xffffffff, 0x000d000c,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x40b, 0x00000101, 0x00000000,
+       0x82a, 0xffffffff, 0x00000104,
+       0x993, 0x000c0000, 0x000c0000,
+       0x992, 0x000c0000, 0x000c0000,
+       0x1579, 0xff000fff, 0x00000100,
+       0x157a, 0x00000001, 0x00000001,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2471, 0xffffffff, 0x00060005,
+       0x2472, 0xffffffff, 0x00080007,
+       0x2473, 0xffffffff, 0x0000000b,
+       0x2474, 0xffffffff, 0x000a0009,
+       0x2475, 0xffffffff, 0x000d000c,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x82a, 0xffffffff, 0x00000104,
+       0x993, 0x000c0000, 0x000c0000,
+       0x992, 0x000c0000, 0x000c0000,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+
+static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(AMDGPU_PCIE_INDEX, reg);
+       (void)RREG32(AMDGPU_PCIE_INDEX);
+       r = RREG32(AMDGPU_PCIE_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(AMDGPU_PCIE_INDEX, reg);
+       (void)RREG32(AMDGPU_PCIE_INDEX);
+       WREG32(AMDGPU_PCIE_DATA, v);
+       (void)RREG32(AMDGPU_PCIE_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+       (void)RREG32(PCIE_PORT_INDEX);
+       r = RREG32(PCIE_PORT_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+       (void)RREG32(PCIE_PORT_INDEX);
+       WREG32(PCIE_PORT_DATA, (v));
+       (void)RREG32(PCIE_PORT_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(SMC_IND_INDEX_0, (reg));
+       r = RREG32(SMC_IND_DATA_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return r;
+}
+
+static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(SMC_IND_INDEX_0, (reg));
+       WREG32(SMC_IND_DATA_0, (v));
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
+       {GRBM_STATUS, false},
+       {GB_ADDR_CONFIG, false},
+       {MC_ARB_RAMCFG, false},
+       {GB_TILE_MODE0, false},
+       {GB_TILE_MODE1, false},
+       {GB_TILE_MODE2, false},
+       {GB_TILE_MODE3, false},
+       {GB_TILE_MODE4, false},
+       {GB_TILE_MODE5, false},
+       {GB_TILE_MODE6, false},
+       {GB_TILE_MODE7, false},
+       {GB_TILE_MODE8, false},
+       {GB_TILE_MODE9, false},
+       {GB_TILE_MODE10, false},
+       {GB_TILE_MODE11, false},
+       {GB_TILE_MODE12, false},
+       {GB_TILE_MODE13, false},
+       {GB_TILE_MODE14, false},
+       {GB_TILE_MODE15, false},
+       {GB_TILE_MODE16, false},
+       {GB_TILE_MODE17, false},
+       {GB_TILE_MODE18, false},
+       {GB_TILE_MODE19, false},
+       {GB_TILE_MODE20, false},
+       {GB_TILE_MODE21, false},
+       {GB_TILE_MODE22, false},
+       {GB_TILE_MODE23, false},
+       {GB_TILE_MODE24, false},
+       {GB_TILE_MODE25, false},
+       {GB_TILE_MODE26, false},
+       {GB_TILE_MODE27, false},
+       {GB_TILE_MODE28, false},
+       {GB_TILE_MODE29, false},
+       {GB_TILE_MODE30, false},
+       {GB_TILE_MODE31, false},
+       {CC_RB_BACKEND_DISABLE, false, true},
+       {GC_USER_RB_BACKEND_DISABLE, false, true},
+       {PA_SC_RASTER_CONFIG, false, true},
+};
+
+static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
+                                         u32 se_num, u32 sh_num,
+                                         u32 reg_offset)
+{
+       uint32_t val;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (se_num != 0xffffffff || sh_num != 0xffffffff)
+               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+
+       val = RREG32(reg_offset);
+
+       if (se_num != 0xffffffff || sh_num != 0xffffffff)
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+       return val;
+}
+
+static int si_read_register(struct amdgpu_device *adev, u32 se_num,
+                            u32 sh_num, u32 reg_offset, u32 *value)
+{
+       uint32_t i;
+
+       *value = 0;
+       for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
+               if (reg_offset != si_allowed_read_registers[i].reg_offset)
+                       continue;
+
+               if (!si_allowed_read_registers[i].untouched)
+                       *value = si_allowed_read_registers[i].grbm_indexed ?
+                                si_read_indexed_register(adev, se_num,
+                                                          sh_num, reg_offset) :
+                                RREG32(reg_offset);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static bool si_read_disabled_bios(struct amdgpu_device *adev)
+{
+       u32 bus_cntl;
+       u32 d1vga_control = 0;
+       u32 d2vga_control = 0;
+       u32 vga_render_control = 0;
+       u32 rom_cntl;
+       bool r;
+
+       bus_cntl = RREG32(R600_BUS_CNTL);
+       if (adev->mode_info.num_crtc) {
+               d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+               d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+               vga_render_control = RREG32(VGA_RENDER_CONTROL);
+       }
+       rom_cntl = RREG32(R600_ROM_CNTL);
+
+       /* enable the rom */
+       WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+       if (adev->mode_info.num_crtc) {
+               /* Disable VGA mode */
+               WREG32(AVIVO_D1VGA_CONTROL,
+                      (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+                                         AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+               WREG32(AVIVO_D2VGA_CONTROL,
+                      (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+                                         AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+               WREG32(VGA_RENDER_CONTROL,
+                      (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+       }
+       WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+       r = amdgpu_read_bios(adev);
+
+       /* restore regs */
+       WREG32(R600_BUS_CNTL, bus_cntl);
+       if (adev->mode_info.num_crtc) {
+               WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+               WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+               WREG32(VGA_RENDER_CONTROL, vga_render_control);
+       }
+       WREG32(R600_ROM_CNTL, rom_cntl);
+       return r;
+}
+
+//xxx: not implemented
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static void si_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+       uint32_t temp;
+
+       temp = RREG32(CONFIG_CNTL);
+       if (state == false) {
+               temp &= ~(1<<0);
+               temp |= (1<<1);
+       } else {
+               temp &= ~(1<<1);
+       }
+       WREG32(CONFIG_CNTL, temp);
+}
+
+static u32 si_get_xclk(struct amdgpu_device *adev)
+{
+        u32 reference_clock = adev->clock.spll.reference_freq;
+       u32 tmp;
+
+       tmp = RREG32(CG_CLKPIN_CNTL_2);
+       if (tmp & MUX_TCLK_TO_XCLK)
+               return TCLK;
+
+       tmp = RREG32(CG_CLKPIN_CNTL);
+       if (tmp & XTALIN_DIVIDE)
+               return reference_clock / 4;
+
+       return reference_clock;
+}
+
+//xxx:not implemented
+static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+       return 0;
+}
+
+static void si_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+       if (is_virtual_machine()) /* passthrough mode */
+               adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
+static const struct amdgpu_asic_funcs si_asic_funcs =
+{
+       .read_disabled_bios = &si_read_disabled_bios,
+       .detect_hw_virtualization = si_detect_hw_virtualization,
+       .read_register = &si_read_register,
+       .reset = &si_asic_reset,
+       .set_vga_state = &si_vga_set_state,
+       .get_xclk = &si_get_xclk,
+       .set_uvd_clocks = &si_set_uvd_clocks,
+       .set_vce_clocks = NULL,
+};
+
+static uint32_t si_get_rev_id(struct amdgpu_device *adev)
+{
+       return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+               >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static int si_common_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->smc_rreg = &si_smc_rreg;
+       adev->smc_wreg = &si_smc_wreg;
+       adev->pcie_rreg = &si_pcie_rreg;
+       adev->pcie_wreg = &si_pcie_wreg;
+       adev->pciep_rreg = &si_pciep_rreg;
+       adev->pciep_wreg = &si_pciep_wreg;
+       adev->uvd_ctx_rreg = NULL;
+       adev->uvd_ctx_wreg = NULL;
+       adev->didt_rreg = NULL;
+       adev->didt_wreg = NULL;
+
+       adev->asic_funcs = &si_asic_funcs;
+
+       adev->rev_id = si_get_rev_id(adev);
+       adev->external_rev_id = 0xFF;
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       /*AMD_CG_SUPPORT_GFX_CGCG |*/
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_VCE_MGCG |
+                       AMD_CG_SUPPORT_UVD_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+                       adev->pg_flags = 0;
+               break;
+       case CHIP_PITCAIRN:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       /*AMD_CG_SUPPORT_GFX_CGCG |*/
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_VCE_MGCG |
+                       AMD_CG_SUPPORT_UVD_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+               adev->pg_flags = 0;
+               break;
+
+       case CHIP_VERDE:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CGTS_LS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_VCE_MGCG |
+                       AMD_CG_SUPPORT_UVD_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+               adev->pg_flags = 0;
+               //???
+               adev->external_rev_id = adev->rev_id + 0x14;
+               break;
+       case CHIP_OLAND:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       /*AMD_CG_SUPPORT_GFX_CGCG |*/
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_UVD_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+               adev->pg_flags = 0;
+               break;
+       case CHIP_HAINAN:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       /*AMD_CG_SUPPORT_GFX_CGCG |*/
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+               adev->pg_flags = 0;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int si_common_sw_init(void *handle)
+{
+       return 0;
+}
+
+static int si_common_sw_fini(void *handle)
+{
+       return 0;
+}
+
+
+static void si_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               amdgpu_program_register_sequence(adev,
+                                                tahiti_golden_registers,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                tahiti_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+               amdgpu_program_register_sequence(adev,
+                                                tahiti_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                tahiti_golden_registers2,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+               break;
+       case CHIP_PITCAIRN:
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_golden_registers,
+                                                (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+       case CHIP_VERDE:
+               amdgpu_program_register_sequence(adev,
+                                                verde_golden_registers,
+                                                (const u32)ARRAY_SIZE(verde_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                verde_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+               amdgpu_program_register_sequence(adev,
+                                                verde_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                verde_pg_init,
+                                                (const u32)ARRAY_SIZE(verde_pg_init));
+               break;
+       case CHIP_OLAND:
+               amdgpu_program_register_sequence(adev,
+                                                oland_golden_registers,
+                                                (const u32)ARRAY_SIZE(oland_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                oland_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+               amdgpu_program_register_sequence(adev,
+                                                oland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+       case CHIP_HAINAN:
+               amdgpu_program_register_sequence(adev,
+                                                hainan_golden_registers,
+                                                (const u32)ARRAY_SIZE(hainan_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                hainan_golden_registers2,
+                                                (const u32)ARRAY_SIZE(hainan_golden_registers2));
+               amdgpu_program_register_sequence(adev,
+                                                hainan_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+               break;
+
+
+       default:
+               BUG();
+       }
+}
+
+static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+       struct pci_dev *root = adev->pdev->bus->self;
+       int bridge_pos, gpu_pos;
+       u32 speed_cntl, mask, current_data_rate;
+       int ret, i;
+       u16 tmp16;
+
+       if (pci_is_root_bus(adev->pdev->bus))
+               return;
+
+       if (amdgpu_pcie_gen2 == 0)
+               return;
+
+       if (adev->flags & AMD_IS_APU)
+               return;
+
+       ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+       if (ret != 0)
+               return;
+
+       if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+               return;
+
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+       current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+               LC_CURRENT_DATA_RATE_SHIFT;
+       if (mask & DRM_PCIE_SPEED_80) {
+               if (current_data_rate == 2) {
+                       DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+                       return;
+               }
+               DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+       } else if (mask & DRM_PCIE_SPEED_50) {
+               if (current_data_rate == 1) {
+                       DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+                       return;
+               }
+               DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+       }
+
+       bridge_pos = pci_pcie_cap(root);
+       if (!bridge_pos)
+               return;
+
+       gpu_pos = pci_pcie_cap(adev->pdev);
+       if (!gpu_pos)
+               return;
+
+       if (mask & DRM_PCIE_SPEED_80) {
+               if (current_data_rate != 2) {
+                       u16 bridge_cfg, gpu_cfg;
+                       u16 bridge_cfg2, gpu_cfg2;
+                       u32 max_lw, current_lw, tmp;
+
+                       pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+                       pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+                       tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+                       pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+                       tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+                       pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+                       tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+                       max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+                       current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+                       if (current_lw < max_lw) {
+                               tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+                               if (tmp & LC_RENEGOTIATION_SUPPORT) {
+                                       tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+                                       tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+                                       tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+                                       WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+                               }
+                       }
+
+                       for (i = 0; i < 10; i++) {
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+                               if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+                                       break;
+
+                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+                               tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+                               tmp |= LC_SET_QUIESCE;
+                               WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+                               tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+                               tmp |= LC_REDO_EQ;
+                               WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+                               mdelay(100);
+
+                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+                               tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+                               tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+                               tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+                               tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+                               pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+                               tmp16 &= ~((1 << 4) | (7 << 9));
+                               tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+                               tmp16 &= ~((1 << 4) | (7 << 9));
+                               tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+                               pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+                               tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+                               tmp &= ~LC_SET_QUIESCE;
+                               WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+                       }
+               }
+       }
+
+       speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+       speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+       WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+       pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+       tmp16 &= ~0xf;
+       if (mask & DRM_PCIE_SPEED_80)
+               tmp16 |= 3;
+       else if (mask & DRM_PCIE_SPEED_50)
+               tmp16 |= 2;
+       else
+               tmp16 |= 1;
+       pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+       speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+       WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+               if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+                       break;
+               udelay(1);
+       }
+}
+
+static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+       r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+       WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+       r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+       WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+static void si_program_aspm(struct amdgpu_device *adev)
+{
+       u32 data, orig;
+       bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+       bool disable_clkreq = false;
+
+       if (amdgpu_aspm == 0)
+               return;
+
+       if (adev->flags & AMD_IS_APU)
+               return;
+       orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+       data &= ~LC_XMIT_N_FTS_MASK;
+       data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+       if (orig != data)
+               WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+       orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+       data |= LC_GO_TO_RECOVERY;
+       if (orig != data)
+               WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+       orig = data = RREG32_PCIE(PCIE_P_CNTL);
+       data |= P_IGNORE_EDB_ERR;
+       if (orig != data)
+               WREG32_PCIE(PCIE_P_CNTL, data);
+
+       orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+       data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+       data |= LC_PMI_TO_L1_DIS;
+       if (!disable_l0s)
+               data |= LC_L0S_INACTIVITY(7);
+
+       if (!disable_l1) {
+               data |= LC_L1_INACTIVITY(7);
+               data &= ~LC_PMI_TO_L1_DIS;
+               if (orig != data)
+                       WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+               if (!disable_plloff_in_l1) {
+                       bool clk_req_support;
+
+                       orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+                       data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+                       data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+                       if (orig != data)
+                               si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+                       orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+                       data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+                       data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+                       if (orig != data)
+                               si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+                       orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+                       data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+                       data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+                       if (orig != data)
+                               si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+                       orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+                       data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+                       data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+                       if (orig != data)
+                               si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+                       if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+                               orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+                               data &= ~PLL_RAMP_UP_TIME_0_MASK;
+                               if (orig != data)
+                                       si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+                               orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+                               data &= ~PLL_RAMP_UP_TIME_1_MASK;
+                               if (orig != data)
+                                       si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+                               orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
+                               data &= ~PLL_RAMP_UP_TIME_2_MASK;
+                               if (orig != data)
+                                       si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+
+                               orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
+                               data &= ~PLL_RAMP_UP_TIME_3_MASK;
+                               if (orig != data)
+                                       si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+
+                               orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+                               data &= ~PLL_RAMP_UP_TIME_0_MASK;
+                               if (orig != data)
+                                       si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+                               orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+                               data &= ~PLL_RAMP_UP_TIME_1_MASK;
+                               if (orig != data)
+                                       si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+                               orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
+                               data &= ~PLL_RAMP_UP_TIME_2_MASK;
+                               if (orig != data)
+                                       si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+
+                               orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
+                               data &= ~PLL_RAMP_UP_TIME_3_MASK;
+                               if (orig != data)
+                                       si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+                       }
+                       orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+                       data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+                       data |= LC_DYN_LANES_PWR_STATE(3);
+                       if (orig != data)
+                               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+                       orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+                       data &= ~LS2_EXIT_TIME_MASK;
+                       if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+                               data |= LS2_EXIT_TIME(5);
+                       if (orig != data)
+                               si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+
+                       orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+                       data &= ~LS2_EXIT_TIME_MASK;
+                       if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+                               data |= LS2_EXIT_TIME(5);
+                       if (orig != data)
+                               si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+
+                       if (!disable_clkreq &&
+                           !pci_is_root_bus(adev->pdev->bus)) {
+                               struct pci_dev *root = adev->pdev->bus->self;
+                               u32 lnkcap;
+
+                               clk_req_support = false;
+                               pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+                               if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+                                       clk_req_support = true;
+                       } else {
+                               clk_req_support = false;
+                       }
+
+                       if (clk_req_support) {
+                               orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+                               data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+                               if (orig != data)
+                                       WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+                               orig = data = RREG32(THM_CLK_CNTL);
+                               data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+                               data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+                               if (orig != data)
+                                       WREG32(THM_CLK_CNTL, data);
+
+                               orig = data = RREG32(MISC_CLK_CNTL);
+                               data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+                               data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+                               if (orig != data)
+                                       WREG32(MISC_CLK_CNTL, data);
+
+                               orig = data = RREG32(CG_CLKPIN_CNTL);
+                               data &= ~BCLK_AS_XCLK;
+                               if (orig != data)
+                                       WREG32(CG_CLKPIN_CNTL, data);
+
+                               orig = data = RREG32(CG_CLKPIN_CNTL_2);
+                               data &= ~FORCE_BIF_REFCLK_EN;
+                               if (orig != data)
+                                       WREG32(CG_CLKPIN_CNTL_2, data);
+
+                               orig = data = RREG32(MPLL_BYPASSCLK_SEL);
+                               data &= ~MPLL_CLKOUT_SEL_MASK;
+                               data |= MPLL_CLKOUT_SEL(4);
+                               if (orig != data)
+                                       WREG32(MPLL_BYPASSCLK_SEL, data);
+
+                               orig = data = RREG32(SPLL_CNTL_MODE);
+                               data &= ~SPLL_REFCLK_SEL_MASK;
+                               if (orig != data)
+                                       WREG32(SPLL_CNTL_MODE, data);
+                       }
+               }
+       } else {
+               if (orig != data)
+                       WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+       }
+
+       orig = data = RREG32_PCIE(PCIE_CNTL2);
+       data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+       if (orig != data)
+               WREG32_PCIE(PCIE_CNTL2, data);
+
+       if (!disable_l0s) {
+               data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+               if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+                       data = RREG32_PCIE(PCIE_LC_STATUS1);
+                       if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+                               orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+                               data &= ~LC_L0S_INACTIVITY_MASK;
+                               if (orig != data)
+                                       WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+                       }
+               }
+       }
+}
+
+static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
+{
+       int readrq;
+       u16 v;
+
+       readrq = pcie_get_readrq(adev->pdev);
+       v = ffs(readrq) - 8;
+       if ((v == 0) || (v == 6) || (v == 7))
+               pcie_set_readrq(adev->pdev, 512);
+}
+
+static int si_common_hw_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_fix_pci_max_read_req_size(adev);
+       si_init_golden_registers(adev);
+       si_pcie_gen3_enable(adev);
+       si_program_aspm(adev);
+
+       return 0;
+}
+
+static int si_common_hw_fini(void *handle)
+{
+       return 0;
+}
+
+static int si_common_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_common_hw_fini(adev);
+}
+
+static int si_common_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_common_hw_init(adev);
+}
+
+static bool si_common_is_idle(void *handle)
+{
+       return true;
+}
+
+static int si_common_wait_for_idle(void *handle)
+{
+       return 0;
+}
+
+static int si_common_soft_reset(void *handle)
+{
+       return 0;
+}
+
+static int si_common_set_clockgating_state(void *handle,
+                                           enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int si_common_set_powergating_state(void *handle,
+                                           enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs si_common_ip_funcs = {
+       .name = "si_common",
+       .early_init = si_common_early_init,
+       .late_init = NULL,
+       .sw_init = si_common_sw_init,
+       .sw_fini = si_common_sw_fini,
+       .hw_init = si_common_hw_init,
+       .hw_fini = si_common_hw_fini,
+       .suspend = si_common_suspend,
+       .resume = si_common_resume,
+       .is_idle = si_common_is_idle,
+       .wait_for_idle = si_common_wait_for_idle,
+       .soft_reset = si_common_soft_reset,
+       .set_clockgating_state = si_common_set_clockgating_state,
+       .set_powergating_state = si_common_set_powergating_state,
+};
+
+static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+{
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &dce_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_dma_ip_funcs,
+       },
+/*     {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 3,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &si_null_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_null_ip_funcs,
+       },
+       */
+};
+
+
+static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
+{
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_dma_ip_funcs,
+       },
+};
+
+int si_set_ip_blocks(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_VERDE:
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+       case CHIP_OLAND:
+               adev->ip_blocks = verde_ip_blocks;
+               adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+               break;
+       case CHIP_HAINAN:
+               adev->ip_blocks = hainan_ip_blocks;
+               adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+               break;
+       default:
+               BUG();
+       }
+       return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
new file mode 100644 (file)
index 0000000..959d7b6
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_H__
+#define __SI_H__
+
+extern const struct amd_ip_funcs si_common_ip_funcs;
+
+void si_srbm_select(struct amdgpu_device *adev,
+                    u32 me, u32 pipe, u32 queue, u32 vmid);
+int si_set_ip_blocks(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
new file mode 100644 (file)
index 0000000..de35819
--- /dev/null
@@ -0,0 +1,915 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+#include "si/sid.h"
+
+const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+       DMA0_REGISTER_OFFSET,
+       DMA1_REGISTER_OFFSET
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+
+static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       return ring->adev->wb.wb[ring->rptr_offs>>2];
+}
+
+static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+       return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+}
+
+static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+       WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+}
+
+static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+                               struct amdgpu_ib *ib,
+                               unsigned vm_id, bool ctx_switch)
+{
+       /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+        * Pad as necessary with NOPs.
+        */
+       while ((ring->wptr & 7) != 5)
+               amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+       amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+       amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
+       amdgpu_ring_write(ring, 1);
+}
+
+static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
+       amdgpu_ring_write(ring, 1);
+}
+
+/**
+ * si_dma_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (VI).
+ */
+static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+                                     unsigned flags)
+{
+
+       bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+       /* write the fence */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+       amdgpu_ring_write(ring, seq);
+       /* optionally write high bits as well */
+       if (write64bit) {
+               addr += 4;
+               amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+               amdgpu_ring_write(ring, addr & 0xfffffffc);
+               amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+               amdgpu_ring_write(ring, upper_32_bits(seq));
+       }
+       /* generate an interrupt */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
+}
+
+static void si_dma_stop(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 rb_cntl;
+       unsigned i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               /* dma0 */
+               rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
+               rb_cntl &= ~DMA_RB_ENABLE;
+               WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+               if (adev->mman.buffer_funcs_ring == ring)
+                       amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+               ring->ready = false;
+       }
+}
+
+static int si_dma_start(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
+       int i, r;
+       uint64_t rptr_addr;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+
+               WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+               WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+               /* Set ring buffer size in dwords */
+               rb_bufsz = order_base_2(ring->ring_size / 4);
+               rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+               rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+               WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+               /* Initialize the ring buffer's read and write pointers */
+               WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
+               WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+
+               rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+
+               WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+               WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+
+               rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+               WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+
+               /* enable DMA IBs */
+               ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+               ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+               WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+               dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
+               dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+               WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+
+               ring->wptr = 0;
+               WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+               WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+
+               ring->ready = true;
+
+               r = amdgpu_ring_test_ring(ring);
+               if (r) {
+                       ring->ready = false;
+                       return r;
+               }
+
+               if (adev->mman.buffer_funcs_ring == ring)
+                       amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+       }
+
+       return 0;
+}
+
+/**
+ * si_dma_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       unsigned i;
+       unsigned index;
+       int r;
+       u32 tmp;
+       u64 gpu_addr;
+
+       r = amdgpu_wb_get(adev, &index);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               return r;
+       }
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       tmp = 0xCAFEDEAD;
+       adev->wb.wb[index] = cpu_to_le32(tmp);
+
+       r = amdgpu_ring_alloc(ring, 4);
+       if (r) {
+               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+               amdgpu_wb_free(adev, index);
+               return r;
+       }
+
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
+       amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = le32_to_cpu(adev->wb.wb[index]);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       amdgpu_wb_free(adev, index);
+
+       return r;
+}
+
+/**
+ * si_dma_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (VI).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ib ib;
+       struct fence *f = NULL;
+       unsigned index;
+       u32 tmp = 0;
+       u64 gpu_addr;
+       long r;
+
+       r = amdgpu_wb_get(adev, &index);
+       if (r) {
+               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+               return r;
+       }
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       tmp = 0xCAFEDEAD;
+       adev->wb.wb[index] = cpu_to_le32(tmp);
+       memset(&ib, 0, sizeof(ib));
+       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+               goto err0;
+       }
+
+       ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
+       ib.ptr[3] = 0xDEADBEEF;
+       ib.length_dw = 4;
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+       if (r)
+               goto err1;
+
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out\n");
+               r = -ETIMEDOUT;
+               goto err1;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+               goto err1;
+       }
+       tmp = le32_to_cpu(adev->wb.wb[index]);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
+       } else {
+               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+               r = -EINVAL;
+       }
+
+err1:
+       amdgpu_ib_free(adev, &ib, NULL);
+       fence_put(f);
+err0:
+       amdgpu_wb_free(adev, index);
+       return r;
+}
+
+/**
+ * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using DMA (SI).
+ */
+static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
+                              uint64_t pe, uint64_t src,
+                              unsigned count)
+{
+       unsigned bytes = count * 8;
+
+       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+                                             1, 0, 0, bytes);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = lower_32_bits(src);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+       ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+}
+
+/**
+ * si_dma_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using DMA (SI).
+ */
+static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+                               uint64_t value, unsigned count,
+                               uint32_t incr)
+{
+       unsigned ndw = count * 2;
+
+       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       for (; ndw > 0; ndw -= 2) {
+               ib->ptr[ib->length_dw++] = lower_32_bits(value);
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               value += incr;
+       }
+}
+
+/**
+ * si_dma_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
+                                    uint64_t pe,
+                                    uint64_t addr, unsigned count,
+                                    uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               if (flags & AMDGPU_PTE_VALID)
+                       value = addr;
+               else
+                       value = 0;
+
+               /* for physically contiguous pages (vram) */
+               ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+               ib->ptr[ib->length_dw++] = pe; /* dst addr */
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+               ib->ptr[ib->length_dw++] = flags; /* mask */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = value; /* value */
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               ib->ptr[ib->length_dw++] = incr; /* increment size */
+               ib->ptr[ib->length_dw++] = 0;
+               pe += ndw * 4;
+               addr += (ndw / 2) * incr;
+               count -= ndw / 2;
+       }
+}
+
+/**
+ * si_dma_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+       while (ib->length_dw & 0x7)
+               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       /* wait for idle */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
+                         (1 << 27)); /* Poll memory */
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
+       amdgpu_ring_write(ring, 0xffffffff); /* mask */
+       amdgpu_ring_write(ring, seq); /* value */
+       amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
+}
+
+/**
+ * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (VI).
+ */
+static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                     unsigned vm_id, uint64_t pd_addr)
+{
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       if (vm_id < 8)
+               amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       else
+               amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       /* bits 0-7 are the VM contexts0-7 */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
+       amdgpu_ring_write(ring, 1 << vm_id);
+
+       /* wait for invalidate to complete */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 0xff << 16); /* retry */
+       amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+       amdgpu_ring_write(ring, 0); /* value */
+       amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
+}
+
+static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               7 + 3; /* si_dma_ring_emit_ib */
+}
+
+static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               3 + /* si_dma_ring_emit_hdp_flush */
+               3 + /* si_dma_ring_emit_hdp_invalidate */
+               6 + /* si_dma_ring_emit_pipeline_sync */
+               12 + /* si_dma_ring_emit_vm_flush */
+               9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static int si_dma_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->sdma.num_instances = 2;
+
+       si_dma_set_ring_funcs(adev);
+       si_dma_set_buffer_funcs(adev);
+       si_dma_set_vm_pte_funcs(adev);
+       si_dma_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int si_dma_sw_init(void *handle)
+{
+       struct amdgpu_ring *ring;
+       int r, i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       /* DMA0 trap event */
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+       if (r)
+               return r;
+
+       /* DMA1 trap event */
+       r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
+       if (r)
+               return r;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               ring->ring_obj = NULL;
+               ring->use_doorbell = false;
+               sprintf(ring->name, "sdma%d", i);
+               r = amdgpu_ring_init(adev, ring, 1024,
+                                    DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
+                                    &adev->sdma.trap_irq,
+                                    (i == 0) ?
+                                    AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+                                    AMDGPU_RING_TYPE_SDMA);
+               if (r)
+                       return r;
+       }
+
+       return r;
+}
+
+static int si_dma_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+
+       return 0;
+}
+
+static int si_dma_hw_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_dma_start(adev);
+}
+
+static int si_dma_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_dma_stop(adev);
+
+       return 0;
+}
+
+static int si_dma_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_dma_hw_fini(adev);
+}
+
+static int si_dma_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_dma_hw_init(adev);
+}
+
+static bool si_dma_is_idle(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 tmp = RREG32(SRBM_STATUS2);
+
+       if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+           return false;
+
+       return true;
+}
+
+static int si_dma_wait_for_idle(void *handle)
+{
+       unsigned i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (si_dma_is_idle(handle))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static int si_dma_soft_reset(void *handle)
+{
+       DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
+       return 0;
+}
+
+static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *src,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       u32 sdma_cntl;
+
+       switch (type) {
+       case AMDGPU_SDMA_IRQ_TRAP0:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+                       sdma_cntl &= ~TRAP_ENABLE;
+                       WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+                       sdma_cntl |= TRAP_ENABLE;
+                       WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case AMDGPU_SDMA_IRQ_TRAP1:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+                       sdma_cntl &= ~TRAP_ENABLE;
+                       WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+                       sdma_cntl |= TRAP_ENABLE;
+                       WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int si_dma_process_trap_irq(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       amdgpu_fence_process(&adev->sdma.instance[0].ring);
+
+       return 0;
+}
+
+static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       amdgpu_fence_process(&adev->sdma.instance[1].ring);
+
+       return 0;
+}
+
+static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
+                                             struct amdgpu_irq_src *source,
+                                             struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal instruction in SDMA command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int si_dma_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       u32 orig, data, offset;
+       int i;
+       bool enable;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       if (i == 0)
+                               offset = DMA0_REGISTER_OFFSET;
+                       else
+                               offset = DMA1_REGISTER_OFFSET;
+                       orig = data = RREG32(DMA_POWER_CNTL + offset);
+                       data &= ~MEM_POWER_OVERRIDE;
+                       if (data != orig)
+                               WREG32(DMA_POWER_CNTL + offset, data);
+                       WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+               }
+       } else {
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       if (i == 0)
+                               offset = DMA0_REGISTER_OFFSET;
+                       else
+                               offset = DMA1_REGISTER_OFFSET;
+                       orig = data = RREG32(DMA_POWER_CNTL + offset);
+                       data |= MEM_POWER_OVERRIDE;
+                       if (data != orig)
+                               WREG32(DMA_POWER_CNTL + offset, data);
+
+                       orig = data = RREG32(DMA_CLK_CTRL + offset);
+                       data = 0xff000000;
+                       if (data != orig)
+                               WREG32(DMA_CLK_CTRL + offset, data);
+               }
+       }
+
+       return 0;
+}
+
+static int si_dma_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       u32 tmp;
+
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       WREG32(DMA_PGFSM_WRITE,  0x00002000);
+       WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+
+       for (tmp = 0; tmp < 5; tmp++)
+               WREG32(DMA_PGFSM_WRITE, 0);
+
+       return 0;
+}
+
+const struct amd_ip_funcs si_dma_ip_funcs = {
+       .name = "si_dma",
+       .early_init = si_dma_early_init,
+       .late_init = NULL,
+       .sw_init = si_dma_sw_init,
+       .sw_fini = si_dma_sw_fini,
+       .hw_init = si_dma_hw_init,
+       .hw_fini = si_dma_hw_fini,
+       .suspend = si_dma_suspend,
+       .resume = si_dma_resume,
+       .is_idle = si_dma_is_idle,
+       .wait_for_idle = si_dma_wait_for_idle,
+       .soft_reset = si_dma_soft_reset,
+       .set_clockgating_state = si_dma_set_clockgating_state,
+       .set_powergating_state = si_dma_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+       .get_rptr = si_dma_ring_get_rptr,
+       .get_wptr = si_dma_ring_get_wptr,
+       .set_wptr = si_dma_ring_set_wptr,
+       .parse_cs = NULL,
+       .emit_ib = si_dma_ring_emit_ib,
+       .emit_fence = si_dma_ring_emit_fence,
+       .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
+       .emit_vm_flush = si_dma_ring_emit_vm_flush,
+       .emit_hdp_flush = si_dma_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
+       .test_ring = si_dma_ring_test_ring,
+       .test_ib = si_dma_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .pad_ib = si_dma_ring_pad_ib,
+       .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
+       .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
+       .set = si_dma_set_trap_irq_state,
+       .process = si_dma_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
+       .set = si_dma_set_trap_irq_state,
+       .process = si_dma_process_trap_irq_1,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
+       .process = si_dma_process_illegal_inst_irq,
+};
+
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
+       adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
+       adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
+}
+
+/**
+ * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (VI).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
+                                      uint64_t src_offset,
+                                      uint64_t dst_offset,
+                                      uint32_t byte_count)
+{
+       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+                                             1, 0, 0, byte_count);
+       ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+       ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
+       ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
+}
+
+/**
+ * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (VI).
+ */
+static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
+                                      uint32_t src_data,
+                                      uint64_t dst_offset,
+                                      uint32_t byte_count)
+{
+       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
+                                             0, 0, 0, byte_count / 4);
+       ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = src_data;
+       ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
+}
+
+
+static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
+       .copy_max_bytes = 0xffff8,
+       .copy_num_dw = 5,
+       .emit_copy_buffer = si_dma_emit_copy_buffer,
+
+       .fill_max_bytes = 0xffff8,
+       .fill_num_dw = 4,
+       .emit_fill_buffer = si_dma_emit_fill_buffer,
+};
+
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mman.buffer_funcs == NULL) {
+               adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+       }
+}
+
+static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+       .copy_pte = si_dma_vm_copy_pte,
+       .write_pte = si_dma_vm_write_pte,
+       .set_pte_pde = si_dma_vm_set_pte_pde,
+};
+
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       if (adev->vm_manager.vm_pte_funcs == NULL) {
+               adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+               for (i = 0; i < adev->sdma.num_instances; i++)
+                       adev->vm_manager.vm_pte_rings[i] =
+                               &adev->sdma.instance[i].ring;
+
+               adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+       }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
new file mode 100644 (file)
index 0000000..3a3e0c7
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_DMA_H__
+#define __SI_DMA_H__
+
+extern const struct amd_ip_funcs si_dma_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
new file mode 100644 (file)
index 0000000..e2db4a7
--- /dev/null
@@ -0,0 +1,7993 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_atombios.h"
+#include "si/sid.h"
+#include "r600_dpm.h"
+#include "si_dpm.h"
+#include "atom.h"
+#include "../include/pptable.h"
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END                 0x20000
+
+#define SCLK_MIN_DEEPSLEEP_FREQ     1350
+
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+
+#define BIOS_SCRATCH_4                                    0x5cd
+
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+
+union power_info {
+       struct _ATOM_POWERPLAY_INFO info;
+       struct _ATOM_POWERPLAY_INFO_V2 info_2;
+       struct _ATOM_POWERPLAY_INFO_V3 info_3;
+       struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+       struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+       struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+       struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+       struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union fan_info {
+       struct _ATOM_PPLIB_FANTABLE fan;
+       struct _ATOM_PPLIB_FANTABLE2 fan2;
+       struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+union pplib_clock_info {
+       struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+       struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+       struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+       struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+       struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
+{
+       R600_UTC_DFLT_00,
+       R600_UTC_DFLT_01,
+       R600_UTC_DFLT_02,
+       R600_UTC_DFLT_03,
+       R600_UTC_DFLT_04,
+       R600_UTC_DFLT_05,
+       R600_UTC_DFLT_06,
+       R600_UTC_DFLT_07,
+       R600_UTC_DFLT_08,
+       R600_UTC_DFLT_09,
+       R600_UTC_DFLT_10,
+       R600_UTC_DFLT_11,
+       R600_UTC_DFLT_12,
+       R600_UTC_DFLT_13,
+       R600_UTC_DFLT_14,
+};
+
+static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
+{
+       R600_DTC_DFLT_00,
+       R600_DTC_DFLT_01,
+       R600_DTC_DFLT_02,
+       R600_DTC_DFLT_03,
+       R600_DTC_DFLT_04,
+       R600_DTC_DFLT_05,
+       R600_DTC_DFLT_06,
+       R600_DTC_DFLT_07,
+       R600_DTC_DFLT_08,
+       R600_DTC_DFLT_09,
+       R600_DTC_DFLT_10,
+       R600_DTC_DFLT_11,
+       R600_DTC_DFLT_12,
+       R600_DTC_DFLT_13,
+       R600_DTC_DFLT_14,
+};
+
+static const struct si_cac_config_reg cac_weights_tahiti[] =
+{
+       { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_tahiti[] =
+{
+       { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+
+};
+
+static const struct si_cac_config_reg cac_override_tahiti[] =
+{
+       { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_tahiti =
+{
+       ((1 << 16) | 27027),
+       6,
+       0,
+       4,
+       95,
+       {
+               0UL,
+               0UL,
+               4521550UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               40
+       },
+       595000000UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_dte_data dte_data_tahiti =
+{
+       { 1159409, 0, 0, 0, 0 },
+       { 777, 0, 0, 0, 0 },
+       2,
+       54000,
+       127000,
+       25,
+       2,
+       10,
+       13,
+       { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
+       { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
+       { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
+       85,
+       false
+};
+
+#if 0
+static const struct si_dte_data dte_data_tahiti_le =
+{
+       { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
+       { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
+       0x5,
+       0xAFC8,
+       0x64,
+       0x32,
+       1,
+       0,
+       0x10,
+       { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
+       { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
+       { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
+       85,
+       true
+};
+#endif
+
+static const struct si_dte_data dte_data_tahiti_pro =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_new_zealand =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
+       { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
+       0x5,
+       0xAFC8,
+       0x69,
+       0x32,
+       1,
+       0,
+       0x10,
+       { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
+       85,
+       true
+};
+
+static const struct si_dte_data dte_data_aruba_pro =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_malta =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_cac_config_reg cac_weights_pitcairn[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_pitcairn[] =
+{
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_pitcairn[] =
+{
+    { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_pitcairn =
+{
+       ((1 << 16) | 27027),
+       5,
+       0,
+       6,
+       100,
+       {
+               51600000UL,
+               1800000UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_dte_data dte_data_pitcairn =
+{
+       { 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0 },
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       0,
+       false
+};
+
+static const struct si_dte_data dte_data_curacao_xt =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_curacao_pro =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_neptune_xt =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_heathrow[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_cape_verde[] =
+{
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_cape_verde[] =
+{
+    { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_cape_verde =
+{
+       ((1 << 16) | 0x6993),
+       5,
+       0,
+       7,
+       105,
+       {
+               0UL,
+               0UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_dte_data dte_data_cape_verde =
+{
+       { 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0 },
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       0,
+       false
+};
+
+static const struct si_dte_data dte_data_venus_xtx =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
+       5,
+       55000,
+       0x69,
+       0xA,
+       1,
+       0,
+       0x3,
+       { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_venus_xt =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
+       5,
+       55000,
+       0x69,
+       0xA,
+       1,
+       0,
+       0x3,
+       { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_venus_pro =
+{
+       {  0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
+       5,
+       55000,
+       0x69,
+       0xA,
+       1,
+       0,
+       0x3,
+       { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_cac_config_reg cac_weights_oland[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_pro[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_xt[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_pro[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_xt[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_oland[] =
+{
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_mars_pro[] =
+{
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_oland[] =
+{
+       { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_oland =
+{
+       ((1 << 16) | 0x6993),
+       5,
+       0,
+       7,
+       105,
+       {
+               0UL,
+               0UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_powertune_data powertune_data_mars_pro =
+{
+       ((1 << 16) | 0x6993),
+       5,
+       0,
+       7,
+       105,
+       {
+               0UL,
+               0UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_dte_data dte_data_oland =
+{
+       { 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0 },
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       0,
+       false
+};
+
+static const struct si_dte_data dte_data_mars_pro =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       55000,
+       105,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_sun_xt =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       55000,
+       105,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+
+static const struct si_cac_config_reg cac_weights_hainan[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_hainan =
+{
+       ((1 << 16) | 0x6993),
+       5,
+       0,
+       9,
+       105,
+       {
+               0UL,
+               0UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
+static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
+static struct  si_ps *si_get_ps(struct amdgpu_ps *rps);
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+                                    const struct atom_voltage_table *table,
+                                    u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+                                   SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+                                   u16 *std_voltage);
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+                                     u16 reg_offset, u32 value);
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+                                        struct rv7xx_pl *pl,
+                                        SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+                                   u32 engine_clock,
+                                   SISLANDS_SMC_SCLK_VALUE *sclk);
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
+
+static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
+{
+       struct si_power_info *pi = adev->pm.dpm.priv;
+       return pi;
+}
+
+static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
+                                                    u16 v, s32 t, u32 ileakage, u32 *leakage)
+{
+       s64 kt, kv, leakage_w, i_leakage, vddc;
+       s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+       s64 tmp;
+
+       i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+       vddc = div64_s64(drm_int2fixp(v), 1000);
+       temperature = div64_s64(drm_int2fixp(t), 1000);
+
+       t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
+       t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
+       av = div64_s64(drm_int2fixp(coeff->av), 100000000);
+       bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
+       t_ref = drm_int2fixp(coeff->t_ref);
+
+       tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+       kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+       kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
+       kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
+
+       leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+       *leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
+                                            const struct ni_leakage_coeffients *coeff,
+                                            u16 v,
+                                            s32 t,
+                                            u32 i_leakage,
+                                            u32 *leakage)
+{
+       si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
+}
+
+static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
+                                              const u32 fixed_kt, u16 v,
+                                              u32 ileakage, u32 *leakage)
+{
+       s64 kt, kv, leakage_w, i_leakage, vddc;
+
+       i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+       vddc = div64_s64(drm_int2fixp(v), 1000);
+
+       kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
+       kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
+                         drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
+
+       leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+       *leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
+                                      const struct ni_leakage_coeffients *coeff,
+                                      const u32 fixed_kt,
+                                      u16 v,
+                                      u32 i_leakage,
+                                      u32 *leakage)
+{
+       si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
+}
+
+
+static void si_update_dte_from_pl2(struct amdgpu_device *adev,
+                                  struct si_dte_data *dte_data)
+{
+       u32 p_limit1 = adev->pm.dpm.tdp_limit;
+       u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
+       u32 k = dte_data->k;
+       u32 t_max = dte_data->max_t;
+       u32 t_split[5] = { 10, 15, 20, 25, 30 };
+       u32 t_0 = dte_data->t0;
+       u32 i;
+
+       if (p_limit2 != 0 && p_limit2 <= p_limit1) {
+               dte_data->tdep_count = 3;
+
+               for (i = 0; i < k; i++) {
+                       dte_data->r[i] =
+                               (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
+                               (p_limit2  * (u32)100);
+               }
+
+               dte_data->tdep_r[1] = dte_data->r[4] * 2;
+
+               for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
+                       dte_data->tdep_r[i] = dte_data->r[4];
+               }
+       } else {
+               DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
+       }
+}
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = adev->pm.dpm.priv;
+
+       return pi;
+}
+
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
+{
+       struct ni_power_info *pi = adev->pm.dpm.priv;
+
+       return pi;
+}
+
+static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
+{
+       struct  si_ps *ps = aps->ps_priv;
+
+       return ps;
+}
+
+static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       bool update_dte_from_pl2 = false;
+
+       if (adev->asic_type == CHIP_TAHITI) {
+               si_pi->cac_weights = cac_weights_tahiti;
+               si_pi->lcac_config = lcac_tahiti;
+               si_pi->cac_override = cac_override_tahiti;
+               si_pi->powertune_data = &powertune_data_tahiti;
+               si_pi->dte_data = dte_data_tahiti;
+
+               switch (adev->pdev->device) {
+               case 0x6798:
+                       si_pi->dte_data.enable_dte_by_default = true;
+                       break;
+               case 0x6799:
+                       si_pi->dte_data = dte_data_new_zealand;
+                       break;
+               case 0x6790:
+               case 0x6791:
+               case 0x6792:
+               case 0x679E:
+                       si_pi->dte_data = dte_data_aruba_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x679B:
+                       si_pi->dte_data = dte_data_malta;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x679A:
+                       si_pi->dte_data = dte_data_tahiti_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               default:
+                       if (si_pi->dte_data.enable_dte_by_default == true)
+                               DRM_ERROR("DTE is not enabled!\n");
+                       break;
+               }
+       } else if (adev->asic_type == CHIP_PITCAIRN) {
+               si_pi->cac_weights = cac_weights_pitcairn;
+               si_pi->lcac_config = lcac_pitcairn;
+               si_pi->cac_override = cac_override_pitcairn;
+               si_pi->powertune_data = &powertune_data_pitcairn;
+
+               switch (adev->pdev->device) {
+               case 0x6810:
+               case 0x6818:
+                       si_pi->dte_data = dte_data_curacao_xt;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6819:
+               case 0x6811:
+                       si_pi->dte_data = dte_data_curacao_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6800:
+               case 0x6806:
+                       si_pi->dte_data = dte_data_neptune_xt;
+                       update_dte_from_pl2 = true;
+                       break;
+               default:
+                       si_pi->dte_data = dte_data_pitcairn;
+                       break;
+               }
+       } else if (adev->asic_type == CHIP_VERDE) {
+               si_pi->lcac_config = lcac_cape_verde;
+               si_pi->cac_override = cac_override_cape_verde;
+               si_pi->powertune_data = &powertune_data_cape_verde;
+
+               switch (adev->pdev->device) {
+               case 0x683B:
+               case 0x683F:
+               case 0x6829:
+               case 0x6835:
+                       si_pi->cac_weights = cac_weights_cape_verde_pro;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               case 0x682C:
+                       si_pi->cac_weights = cac_weights_cape_verde_pro;
+                       si_pi->dte_data = dte_data_sun_xt;
+                       break;
+               case 0x6825:
+               case 0x6827:
+                       si_pi->cac_weights = cac_weights_heathrow;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               case 0x6824:
+               case 0x682D:
+                       si_pi->cac_weights = cac_weights_chelsea_xt;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               case 0x682F:
+                       si_pi->cac_weights = cac_weights_chelsea_pro;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               case 0x6820:
+                       si_pi->cac_weights = cac_weights_heathrow;
+                       si_pi->dte_data = dte_data_venus_xtx;
+                       break;
+               case 0x6821:
+                       si_pi->cac_weights = cac_weights_heathrow;
+                       si_pi->dte_data = dte_data_venus_xt;
+                       break;
+               case 0x6823:
+               case 0x682B:
+               case 0x6822:
+               case 0x682A:
+                       si_pi->cac_weights = cac_weights_chelsea_pro;
+                       si_pi->dte_data = dte_data_venus_pro;
+                       break;
+               default:
+                       si_pi->cac_weights = cac_weights_cape_verde;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               }
+       } else if (adev->asic_type == CHIP_OLAND) {
+               si_pi->lcac_config = lcac_mars_pro;
+               si_pi->cac_override = cac_override_oland;
+               si_pi->powertune_data = &powertune_data_mars_pro;
+               si_pi->dte_data = dte_data_mars_pro;
+
+               switch (adev->pdev->device) {
+               case 0x6601:
+               case 0x6621:
+               case 0x6603:
+               case 0x6605:
+                       si_pi->cac_weights = cac_weights_mars_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6600:
+               case 0x6606:
+               case 0x6620:
+               case 0x6604:
+                       si_pi->cac_weights = cac_weights_mars_xt;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6611:
+               case 0x6613:
+               case 0x6608:
+                       si_pi->cac_weights = cac_weights_oland_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6610:
+                       si_pi->cac_weights = cac_weights_oland_xt;
+                       update_dte_from_pl2 = true;
+                       break;
+               default:
+                       si_pi->cac_weights = cac_weights_oland;
+                       si_pi->lcac_config = lcac_oland;
+                       si_pi->cac_override = cac_override_oland;
+                       si_pi->powertune_data = &powertune_data_oland;
+                       si_pi->dte_data = dte_data_oland;
+                       break;
+               }
+       } else if (adev->asic_type == CHIP_HAINAN) {
+               si_pi->cac_weights = cac_weights_hainan;
+               si_pi->lcac_config = lcac_oland;
+               si_pi->cac_override = cac_override_oland;
+               si_pi->powertune_data = &powertune_data_hainan;
+               si_pi->dte_data = dte_data_sun_xt;
+               update_dte_from_pl2 = true;
+       } else {
+               DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
+               return;
+       }
+
+       ni_pi->enable_power_containment = false;
+       ni_pi->enable_cac = false;
+       ni_pi->enable_sq_ramping = false;
+       si_pi->enable_dte = false;
+
+       if (si_pi->powertune_data->enable_powertune_by_default) {
+               ni_pi->enable_power_containment = true;
+               ni_pi->enable_cac = true;
+               if (si_pi->dte_data.enable_dte_by_default) {
+                       si_pi->enable_dte = true;
+                       if (update_dte_from_pl2)
+                               si_update_dte_from_pl2(adev, &si_pi->dte_data);
+
+               }
+               ni_pi->enable_sq_ramping = true;
+       }
+
+       ni_pi->driver_calculate_cac_leakage = true;
+       ni_pi->cac_configuration_required = true;
+
+       if (ni_pi->cac_configuration_required) {
+               ni_pi->support_cac_long_term_average = true;
+               si_pi->dyn_powertune_data.l2_lta_window_size =
+                       si_pi->powertune_data->l2_lta_window_size_default;
+               si_pi->dyn_powertune_data.lts_truncate =
+                       si_pi->powertune_data->lts_truncate_default;
+       } else {
+               ni_pi->support_cac_long_term_average = false;
+               si_pi->dyn_powertune_data.l2_lta_window_size = 0;
+               si_pi->dyn_powertune_data.lts_truncate = 0;
+       }
+
+       si_pi->dyn_powertune_data.disable_uvd_powertune = false;
+}
+
+static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
+{
+       return 1;
+}
+
+static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
+{
+       u32 xclk;
+       u32 wintime;
+       u32 cac_window;
+       u32 cac_window_size;
+
+       xclk = amdgpu_asic_get_xclk(adev);
+
+       if (xclk == 0)
+               return 0;
+
+       cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+       cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
+
+       wintime = (cac_window_size * 100) / xclk;
+
+       return wintime;
+}
+
+static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
+{
+       return power_in_watts;
+}
+
+static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
+                                           bool adjust_polarity,
+                                           u32 tdp_adjustment,
+                                           u32 *tdp_limit,
+                                           u32 *near_tdp_limit)
+{
+       u32 adjustment_delta, max_tdp_limit;
+
+       if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
+               return -EINVAL;
+
+       max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
+
+       if (adjust_polarity) {
+               *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+               *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
+       } else {
+               *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+               adjustment_delta  = adev->pm.dpm.tdp_limit - *tdp_limit;
+               if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
+                       *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
+               else
+                       *near_tdp_limit = 0;
+       }
+
+       if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
+               return -EINVAL;
+       if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
+                                     struct amdgpu_ps *amdgpu_state)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (ni_pi->enable_power_containment) {
+               SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+               PP_SIslands_PAPMParameters *papm_parm;
+               struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
+               u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+               u32 tdp_limit;
+               u32 near_tdp_limit;
+               int ret;
+
+               if (scaling_factor == 0)
+                       return -EINVAL;
+
+               memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+               ret = si_calculate_adjusted_tdp_limits(adev,
+                                                      false, /* ??? */
+                                                      adev->pm.dpm.tdp_adjustment,
+                                                      &tdp_limit,
+                                                      &near_tdp_limit);
+               if (ret)
+                       return ret;
+
+               smc_table->dpm2Params.TDPLimit =
+                       cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
+               smc_table->dpm2Params.NearTDPLimit =
+                       cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
+               smc_table->dpm2Params.SafePowerLimit =
+                       cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+               ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                                 (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+                                                  offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
+                                                 (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
+                                                 sizeof(u32) * 3,
+                                                 si_pi->sram_end);
+               if (ret)
+                       return ret;
+
+               if (si_pi->enable_ppm) {
+                       papm_parm = &si_pi->papm_parm;
+                       memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
+                       papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
+                       papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
+                       papm_parm->dGPU_T_Warning = cpu_to_be32(95);
+                       papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
+                       papm_parm->PlatformPowerLimit = 0xffffffff;
+                       papm_parm->NearTDPLimitPAPM = 0xffffffff;
+
+                       ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
+                                                         (u8 *)papm_parm,
+                                                         sizeof(PP_SIslands_PAPMParameters),
+                                                         si_pi->sram_end);
+                       if (ret)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
+static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *amdgpu_state)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (ni_pi->enable_power_containment) {
+               SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+               u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+               int ret;
+
+               memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+               smc_table->dpm2Params.NearTDPLimit =
+                       cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
+               smc_table->dpm2Params.SafePowerLimit =
+                       cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+               ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                                 (si_pi->state_table_start +
+                                                  offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+                                                  offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
+                                                 (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
+                                                 sizeof(u32) * 2,
+                                                 si_pi->sram_end);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
+                                              const u16 prev_std_vddc,
+                                              const u16 curr_std_vddc)
+{
+       u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
+       u64 prev_vddc = (u64)prev_std_vddc;
+       u64 curr_vddc = (u64)curr_std_vddc;
+       u64 pwr_efficiency_ratio, n, d;
+
+       if ((prev_vddc == 0) || (curr_vddc == 0))
+               return 0;
+
+       n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
+       d = prev_vddc * prev_vddc;
+       pwr_efficiency_ratio = div64_u64(n, d);
+
+       if (pwr_efficiency_ratio > (u64)0xFFFF)
+               return 0;
+
+       return (u16)pwr_efficiency_ratio;
+}
+
+static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
+                                           struct amdgpu_ps *amdgpu_state)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
+           amdgpu_state->vclk && amdgpu_state->dclk)
+               return true;
+
+       return false;
+}
+
+struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *pi = adev->pm.dpm.priv;
+
+       return pi;
+}
+
+static int si_populate_power_containment_values(struct amdgpu_device *adev,
+                                               struct amdgpu_ps *amdgpu_state,
+                                               SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       SISLANDS_SMC_VOLTAGE_VALUE vddc;
+       u32 prev_sclk;
+       u32 max_sclk;
+       u32 min_sclk;
+       u16 prev_std_vddc;
+       u16 curr_std_vddc;
+       int i;
+       u16 pwr_efficiency_ratio;
+       u8 max_ps_percent;
+       bool disable_uvd_power_tune;
+       int ret;
+
+       if (ni_pi->enable_power_containment == false)
+               return 0;
+
+       if (state->performance_level_count == 0)
+               return -EINVAL;
+
+       if (smc_state->levelCount != state->performance_level_count)
+               return -EINVAL;
+
+       disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
+
+       smc_state->levels[0].dpm2.MaxPS = 0;
+       smc_state->levels[0].dpm2.NearTDPDec = 0;
+       smc_state->levels[0].dpm2.AboveSafeInc = 0;
+       smc_state->levels[0].dpm2.BelowSafeInc = 0;
+       smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+       for (i = 1; i < state->performance_level_count; i++) {
+               prev_sclk = state->performance_levels[i-1].sclk;
+               max_sclk  = state->performance_levels[i].sclk;
+               if (i == 1)
+                       max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
+               else
+                       max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
+
+               if (prev_sclk > max_sclk)
+                       return -EINVAL;
+
+               if ((max_ps_percent == 0) ||
+                   (prev_sclk == max_sclk) ||
+                   disable_uvd_power_tune)
+                       min_sclk = max_sclk;
+               else if (i == 1)
+                       min_sclk = prev_sclk;
+               else
+                       min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
+
+               if (min_sclk < state->performance_levels[0].sclk)
+                       min_sclk = state->performance_levels[0].sclk;
+
+               if (min_sclk == 0)
+                       return -EINVAL;
+
+               ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                               state->performance_levels[i-1].vddc, &vddc);
+               if (ret)
+                       return ret;
+
+               ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
+               if (ret)
+                       return ret;
+
+               ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                               state->performance_levels[i].vddc, &vddc);
+               if (ret)
+                       return ret;
+
+               ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
+               if (ret)
+                       return ret;
+
+               pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
+                                                                          prev_std_vddc, curr_std_vddc);
+
+               smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
+               smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
+               smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
+               smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
+               smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
+       }
+
+       return 0;
+}
+
+static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
+                                        struct amdgpu_ps *amdgpu_state,
+                                        SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       u32 sq_power_throttle, sq_power_throttle2;
+       bool enable_sq_ramping = ni_pi->enable_sq_ramping;
+       int i;
+
+       if (state->performance_level_count == 0)
+               return -EINVAL;
+
+       if (smc_state->levelCount != state->performance_level_count)
+               return -EINVAL;
+
+       if (adev->pm.dpm.sq_ramping_threshold == 0)
+               return -EINVAL;
+
+       if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+               enable_sq_ramping = false;
+
+       if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+               enable_sq_ramping = false;
+
+       if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+               enable_sq_ramping = false;
+
+       if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+               enable_sq_ramping = false;
+
+       if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+               enable_sq_ramping = false;
+
+       for (i = 0; i < state->performance_level_count; i++) {
+               sq_power_throttle = 0;
+               sq_power_throttle2 = 0;
+
+               if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
+                   enable_sq_ramping) {
+                       sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
+                       sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
+                       sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
+                       sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
+                       sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+               } else {
+                       sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
+                       sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+               }
+
+               smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
+               smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
+       }
+
+       return 0;
+}
+
+static int si_enable_power_containment(struct amdgpu_device *adev,
+                                      struct amdgpu_ps *amdgpu_new_state,
+                                      bool enable)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       PPSMC_Result smc_result;
+       int ret = 0;
+
+       if (ni_pi->enable_power_containment) {
+               if (enable) {
+                       if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+                               smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
+                               if (smc_result != PPSMC_Result_OK) {
+                                       ret = -EINVAL;
+                                       ni_pi->pc_enabled = false;
+                               } else {
+                                       ni_pi->pc_enabled = true;
+                               }
+                       }
+               } else {
+                       smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
+                       if (smc_result != PPSMC_Result_OK)
+                               ret = -EINVAL;
+                       ni_pi->pc_enabled = false;
+               }
+       }
+
+       return ret;
+}
+
+static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int ret = 0;
+       struct si_dte_data *dte_data = &si_pi->dte_data;
+       Smc_SIslands_DTE_Configuration *dte_tables = NULL;
+       u32 table_size;
+       u8 tdep_count;
+       u32 i;
+
+       if (dte_data == NULL)
+               si_pi->enable_dte = false;
+
+       if (si_pi->enable_dte == false)
+               return 0;
+
+       if (dte_data->k <= 0)
+               return -EINVAL;
+
+       dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
+       if (dte_tables == NULL) {
+               si_pi->enable_dte = false;
+               return -ENOMEM;
+       }
+
+       table_size = dte_data->k;
+
+       if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
+               table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
+
+       tdep_count = dte_data->tdep_count;
+       if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
+               tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
+
+       dte_tables->K = cpu_to_be32(table_size);
+       dte_tables->T0 = cpu_to_be32(dte_data->t0);
+       dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
+       dte_tables->WindowSize = dte_data->window_size;
+       dte_tables->temp_select = dte_data->temp_select;
+       dte_tables->DTE_mode = dte_data->dte_mode;
+       dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
+
+       if (tdep_count > 0)
+               table_size--;
+
+       for (i = 0; i < table_size; i++) {
+               dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
+               dte_tables->R[i]   = cpu_to_be32(dte_data->r[i]);
+       }
+
+       dte_tables->Tdep_count = tdep_count;
+
+       for (i = 0; i < (u32)tdep_count; i++) {
+               dte_tables->T_limits[i] = dte_data->t_limits[i];
+               dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
+               dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
+       }
+
+       ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
+                                         (u8 *)dte_tables,
+                                         sizeof(Smc_SIslands_DTE_Configuration),
+                                         si_pi->sram_end);
+       kfree(dte_tables);
+
+       return ret;
+}
+
+static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
+                                         u16 *max, u16 *min)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct amdgpu_cac_leakage_table *table =
+               &adev->pm.dpm.dyn_state.cac_leakage_table;
+       u32 i;
+       u32 v0_loadline;
+
+       if (table == NULL)
+               return -EINVAL;
+
+       *max = 0;
+       *min = 0xFFFF;
+
+       for (i = 0; i < table->count; i++) {
+               if (table->entries[i].vddc > *max)
+                       *max = table->entries[i].vddc;
+               if (table->entries[i].vddc < *min)
+                       *min = table->entries[i].vddc;
+       }
+
+       if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
+               return -EINVAL;
+
+       v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
+
+       if (v0_loadline > 0xFFFFUL)
+               return -EINVAL;
+
+       *min = (u16)v0_loadline;
+
+       if ((*min > *max) || (*max == 0) || (*min == 0))
+               return -EINVAL;
+
+       return 0;
+}
+
+static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
+{
+       return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
+               SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+}
+
+static int si_init_dte_leakage_table(struct amdgpu_device *adev,
+                                    PP_SIslands_CacConfig *cac_tables,
+                                    u16 vddc_max, u16 vddc_min, u16 vddc_step,
+                                    u16 t0, u16 t_step)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 leakage;
+       unsigned int i, j;
+       s32 t;
+       u32 smc_leakage;
+       u32 scaling_factor;
+       u16 voltage;
+
+       scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+       for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
+               t = (1000 * (i * t_step + t0));
+
+               for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+                       voltage = vddc_max - (vddc_step * j);
+
+                       si_calculate_leakage_for_v_and_t(adev,
+                                                        &si_pi->powertune_data->leakage_coefficients,
+                                                        voltage,
+                                                        t,
+                                                        si_pi->dyn_powertune_data.cac_leakage,
+                                                        &leakage);
+
+                       smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+                       if (smc_leakage > 0xFFFF)
+                               smc_leakage = 0xFFFF;
+
+                       cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+                               cpu_to_be16((u16)smc_leakage);
+               }
+       }
+       return 0;
+}
+
+static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
+                                           PP_SIslands_CacConfig *cac_tables,
+                                           u16 vddc_max, u16 vddc_min, u16 vddc_step)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 leakage;
+       unsigned int i, j;
+       u32 smc_leakage;
+       u32 scaling_factor;
+       u16 voltage;
+
+       scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+       for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+               voltage = vddc_max - (vddc_step * j);
+
+               si_calculate_leakage_for_v(adev,
+                                          &si_pi->powertune_data->leakage_coefficients,
+                                          si_pi->powertune_data->fixed_kt,
+                                          voltage,
+                                          si_pi->dyn_powertune_data.cac_leakage,
+                                          &leakage);
+
+               smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+               if (smc_leakage > 0xFFFF)
+                       smc_leakage = 0xFFFF;
+
+               for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
+                       cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+                               cpu_to_be16((u16)smc_leakage);
+       }
+       return 0;
+}
+
+static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PP_SIslands_CacConfig *cac_tables = NULL;
+       u16 vddc_max, vddc_min, vddc_step;
+       u16 t0, t_step;
+       u32 load_line_slope, reg;
+       int ret = 0;
+       u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
+
+       if (ni_pi->enable_cac == false)
+               return 0;
+
+       cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
+       if (!cac_tables)
+               return -ENOMEM;
+
+       reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
+       reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
+       WREG32(CG_CAC_CTRL, reg);
+
+       si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
+       si_pi->dyn_powertune_data.dc_pwr_value =
+               si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
+       si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
+       si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
+
+       si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
+
+       ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
+       if (ret)
+               goto done_free;
+
+       vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
+       vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
+       t_step = 4;
+       t0 = 60;
+
+       if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
+               ret = si_init_dte_leakage_table(adev, cac_tables,
+                                               vddc_max, vddc_min, vddc_step,
+                                               t0, t_step);
+       else
+               ret = si_init_simplified_leakage_table(adev, cac_tables,
+                                                      vddc_max, vddc_min, vddc_step);
+       if (ret)
+               goto done_free;
+
+       load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
+
+       cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
+       cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
+       cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
+       cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
+       cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
+       cac_tables->R_LL = cpu_to_be32(load_line_slope);
+       cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
+       cac_tables->calculation_repeats = cpu_to_be32(2);
+       cac_tables->dc_cac = cpu_to_be32(0);
+       cac_tables->log2_PG_LKG_SCALE = 12;
+       cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
+       cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
+       cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
+
+       ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
+                                         (u8 *)cac_tables,
+                                         sizeof(PP_SIslands_CacConfig),
+                                         si_pi->sram_end);
+
+       if (ret)
+               goto done_free;
+
+       ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
+
+done_free:
+       if (ret) {
+               ni_pi->enable_cac = false;
+               ni_pi->enable_power_containment = false;
+       }
+
+       kfree(cac_tables);
+
+       return ret;
+}
+
+static int si_program_cac_config_registers(struct amdgpu_device *adev,
+                                          const struct si_cac_config_reg *cac_config_regs)
+{
+       const struct si_cac_config_reg *config_regs = cac_config_regs;
+       u32 data = 0, offset;
+
+       if (!config_regs)
+               return -EINVAL;
+
+       while (config_regs->offset != 0xFFFFFFFF) {
+               switch (config_regs->type) {
+               case SISLANDS_CACCONFIG_CGIND:
+                       offset = SMC_CG_IND_START + config_regs->offset;
+                       if (offset < SMC_CG_IND_END)
+                               data = RREG32_SMC(offset);
+                       break;
+               default:
+                       data = RREG32(config_regs->offset);
+                       break;
+               }
+
+               data &= ~config_regs->mask;
+               data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+
+               switch (config_regs->type) {
+               case SISLANDS_CACCONFIG_CGIND:
+                       offset = SMC_CG_IND_START + config_regs->offset;
+                       if (offset < SMC_CG_IND_END)
+                               WREG32_SMC(offset, data);
+                       break;
+               default:
+                       WREG32(config_regs->offset, data);
+                       break;
+               }
+               config_regs++;
+       }
+       return 0;
+}
+
+static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int ret;
+
+       if ((ni_pi->enable_cac == false) ||
+           (ni_pi->cac_configuration_required == false))
+               return 0;
+
+       ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
+       if (ret)
+               return ret;
+       ret = si_program_cac_config_registers(adev, si_pi->cac_override);
+       if (ret)
+               return ret;
+       ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int si_enable_smc_cac(struct amdgpu_device *adev,
+                            struct amdgpu_ps *amdgpu_new_state,
+                            bool enable)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PPSMC_Result smc_result;
+       int ret = 0;
+
+       if (ni_pi->enable_cac) {
+               if (enable) {
+                       if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+                               if (ni_pi->support_cac_long_term_average) {
+                                       smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
+                                       if (smc_result != PPSMC_Result_OK)
+                                               ni_pi->support_cac_long_term_average = false;
+                               }
+
+                               smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
+                               if (smc_result != PPSMC_Result_OK) {
+                                       ret = -EINVAL;
+                                       ni_pi->cac_enabled = false;
+                               } else {
+                                       ni_pi->cac_enabled = true;
+                               }
+
+                               if (si_pi->enable_dte) {
+                                       smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
+                                       if (smc_result != PPSMC_Result_OK)
+                                               ret = -EINVAL;
+                               }
+                       }
+               } else if (ni_pi->cac_enabled) {
+                       if (si_pi->enable_dte)
+                               smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
+
+                       smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
+
+                       ni_pi->cac_enabled = false;
+
+                       if (ni_pi->support_cac_long_term_average)
+                               smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
+               }
+       }
+       return ret;
+}
+
+static int si_init_smc_spll_table(struct amdgpu_device *adev)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
+       SISLANDS_SMC_SCLK_VALUE sclk_params;
+       u32 fb_div, p_div;
+       u32 clk_s, clk_v;
+       u32 sclk = 0;
+       int ret = 0;
+       u32 tmp;
+       int i;
+
+       if (si_pi->spll_table_start == 0)
+               return -EINVAL;
+
+       spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
+       if (spll_table == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < 256; i++) {
+               ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
+               if (ret)
+                       break;
+               p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
+               fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+               clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
+               clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+
+               fb_div &= ~0x00001FFF;
+               fb_div >>= 1;
+               clk_v >>= 6;
+
+               if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+                       ret = -EINVAL;
+               if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
+                       ret = -EINVAL;
+               if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+                       ret = -EINVAL;
+               if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
+                       ret = -EINVAL;
+
+               if (ret)
+                       break;
+
+               tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
+                       ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
+               spll_table->freq[i] = cpu_to_be32(tmp);
+
+               tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
+                       ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
+               spll_table->ss[i] = cpu_to_be32(tmp);
+
+               sclk += 512;
+       }
+
+
+       if (!ret)
+               ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
+                                                 (u8 *)spll_table,
+                                                 sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
+                                                 si_pi->sram_end);
+
+       if (ret)
+               ni_pi->enable_power_containment = false;
+
+       kfree(spll_table);
+
+       return ret;
+}
+
+struct si_dpm_quirk {
+       u32 chip_vendor;
+       u32 chip_device;
+       u32 subsys_vendor;
+       u32 subsys_device;
+       u32 max_sclk;
+       u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+       /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+       { 0, 0, 0, 0 },
+};
+
+static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
+                                                  u16 vce_voltage)
+{
+       u16 highest_leakage = 0;
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int i;
+
+       for (i = 0; i < si_pi->leakage_voltage.count; i++){
+               if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
+                       highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
+       }
+
+       if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
+               return highest_leakage;
+
+       return vce_voltage;
+}
+
+static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
+                                   u32 evclk, u32 ecclk, u16 *voltage)
+{
+       u32 i;
+       int ret = -EINVAL;
+       struct amdgpu_vce_clock_voltage_dependency_table *table =
+               &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+       if (((evclk == 0) && (ecclk == 0)) ||
+           (table && (table->count == 0))) {
+               *voltage = 0;
+               return 0;
+       }
+
+       for (i = 0; i < table->count; i++) {
+               if ((evclk <= table->entries[i].evclk) &&
+                   (ecclk <= table->entries[i].ecclk)) {
+                       *voltage = table->entries[i].v;
+                       ret = 0;
+                       break;
+               }
+       }
+
+       /* if no match return the highest voltage */
+       if (ret)
+               *voltage = table->entries[table->count - 1].v;
+
+       *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
+
+       return ret;
+}
+
+static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+{
+
+       u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+       /* we never hit the non-gddr5 limit so disable it */
+       u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+
+       if (vblank_time < switch_limit)
+               return true;
+       else
+               return false;
+
+}
+
+static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
+                               u32 arb_freq_src, u32 arb_freq_dest)
+{
+       u32 mc_arb_dram_timing;
+       u32 mc_arb_dram_timing2;
+       u32 burst_time;
+       u32 mc_cg_config;
+
+       switch (arb_freq_src) {
+       case MC_CG_ARB_FREQ_F0:
+               mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+               mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+               burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
+               break;
+       case MC_CG_ARB_FREQ_F1:
+               mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_1);
+               mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
+               burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
+               break;
+       case MC_CG_ARB_FREQ_F2:
+               mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_2);
+               mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
+               burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
+               break;
+       case MC_CG_ARB_FREQ_F3:
+               mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_3);
+               mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
+               burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (arb_freq_dest) {
+       case MC_CG_ARB_FREQ_F0:
+               WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+               WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+               WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
+               break;
+       case MC_CG_ARB_FREQ_F1:
+               WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+               WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+               WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
+               break;
+       case MC_CG_ARB_FREQ_F2:
+               WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
+               WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
+               WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
+               break;
+       case MC_CG_ARB_FREQ_F3:
+               WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
+               WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
+               WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
+       WREG32(MC_CG_CONFIG, mc_cg_config);
+       WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
+
+       return 0;
+}
+
+static void ni_update_current_ps(struct amdgpu_device *adev,
+                         struct amdgpu_ps *rps)
+{
+       struct si_ps *new_ps = si_get_ps(rps);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+       eg_pi->current_rps = *rps;
+       ni_pi->current_ps = *new_ps;
+       eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+}
+
+static void ni_update_requested_ps(struct amdgpu_device *adev,
+                           struct amdgpu_ps *rps)
+{
+       struct si_ps *new_ps = si_get_ps(rps);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+       eg_pi->requested_rps = *rps;
+       ni_pi->requested_ps = *new_ps;
+       eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+}
+
+static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
+                                          struct amdgpu_ps *new_ps,
+                                          struct amdgpu_ps *old_ps)
+{
+       struct si_ps *new_state = si_get_ps(new_ps);
+       struct si_ps *current_state = si_get_ps(old_ps);
+
+       if ((new_ps->vclk == old_ps->vclk) &&
+           (new_ps->dclk == old_ps->dclk))
+               return;
+
+       if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
+           current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+               return;
+
+       amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
+                                         struct amdgpu_ps *new_ps,
+                                         struct amdgpu_ps *old_ps)
+{
+       struct si_ps *new_state = si_get_ps(new_ps);
+       struct si_ps *current_state = si_get_ps(old_ps);
+
+       if ((new_ps->vclk == old_ps->vclk) &&
+           (new_ps->dclk == old_ps->dclk))
+               return;
+
+       if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
+           current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+               return;
+
+       amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
+{
+       unsigned int i;
+
+       for (i = 0; i < table->count; i++)
+               if (voltage <= table->entries[i].value)
+                       return table->entries[i].value;
+
+       return table->entries[table->count - 1].value;
+}
+
+static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
+                               u32 max_clock, u32 requested_clock)
+{
+       unsigned int i;
+
+       if ((clocks == NULL) || (clocks->count == 0))
+               return (requested_clock < max_clock) ? requested_clock : max_clock;
+
+       for (i = 0; i < clocks->count; i++) {
+               if (clocks->values[i] >= requested_clock)
+                       return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
+       }
+
+       return (clocks->values[clocks->count - 1] < max_clock) ?
+               clocks->values[clocks->count - 1] : max_clock;
+}
+
+static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
+                             u32 max_mclk, u32 requested_mclk)
+{
+       return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
+                                   max_mclk, requested_mclk);
+}
+
+static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
+                             u32 max_sclk, u32 requested_sclk)
+{
+       return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
+                                   max_sclk, requested_sclk);
+}
+
+static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
+                                                           u32 *max_clock)
+{
+       u32 i, clock = 0;
+
+       if ((table == NULL) || (table->count == 0)) {
+               *max_clock = clock;
+               return;
+       }
+
+       for (i = 0; i < table->count; i++) {
+               if (clock < table->entries[i].clk)
+                       clock = table->entries[i].clk;
+       }
+       *max_clock = clock;
+}
+
+static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
+                                              u32 clock, u16 max_voltage, u16 *voltage)
+{
+       u32 i;
+
+       if ((table == NULL) || (table->count == 0))
+               return;
+
+       for (i= 0; i < table->count; i++) {
+               if (clock <= table->entries[i].clk) {
+                       if (*voltage < table->entries[i].v)
+                               *voltage = (u16)((table->entries[i].v < max_voltage) ?
+                                          table->entries[i].v : max_voltage);
+                       return;
+               }
+       }
+
+       *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
+}
+
+static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
+                                         const struct amdgpu_clock_and_voltage_limits *max_limits,
+                                         struct rv7xx_pl *pl)
+{
+
+       if ((pl->mclk == 0) || (pl->sclk == 0))
+               return;
+
+       if (pl->mclk == pl->sclk)
+               return;
+
+       if (pl->mclk > pl->sclk) {
+               if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
+                       pl->sclk = btc_get_valid_sclk(adev,
+                                                     max_limits->sclk,
+                                                     (pl->mclk +
+                                                     (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
+                                                     adev->pm.dpm.dyn_state.mclk_sclk_ratio);
+       } else {
+               if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
+                       pl->mclk = btc_get_valid_mclk(adev,
+                                                     max_limits->mclk,
+                                                     pl->sclk -
+                                                     adev->pm.dpm.dyn_state.sclk_mclk_delta);
+       }
+}
+
+static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
+                                         u16 max_vddc, u16 max_vddci,
+                                         u16 *vddc, u16 *vddci)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       u16 new_voltage;
+
+       if ((0 == *vddc) || (0 == *vddci))
+               return;
+
+       if (*vddc > *vddci) {
+               if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+                       new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
+                                                      (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+                       *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
+               }
+       } else {
+               if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+                       new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
+                                                      (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+                       *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
+               }
+       }
+}
+
+static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
+                                              u32 sys_mask,
+                                              enum amdgpu_pcie_gen asic_gen,
+                                              enum amdgpu_pcie_gen default_gen)
+{
+       switch (asic_gen) {
+       case AMDGPU_PCIE_GEN1:
+               return AMDGPU_PCIE_GEN1;
+       case AMDGPU_PCIE_GEN2:
+               return AMDGPU_PCIE_GEN2;
+       case AMDGPU_PCIE_GEN3:
+               return AMDGPU_PCIE_GEN3;
+       default:
+               if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+                       return AMDGPU_PCIE_GEN3;
+               else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+                       return AMDGPU_PCIE_GEN2;
+               else
+                       return AMDGPU_PCIE_GEN1;
+       }
+       return AMDGPU_PCIE_GEN1;
+}
+
+static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+                           u32 *p, u32 *u)
+{
+       u32 b_c = 0;
+       u32 i_c;
+       u32 tmp;
+
+       i_c = (i * r_c) / 100;
+       tmp = i_c >> p_b;
+
+       while (tmp) {
+               b_c++;
+               tmp >>= 1;
+       }
+
+       *u = (b_c + 1) / 2;
+       *p = i_c / (1 << (2 * (*u)));
+}
+
+static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
+{
+       u32 k, a, ah, al;
+       u32 t1;
+
+       if ((fl == 0) || (fh == 0) || (fl > fh))
+               return -EINVAL;
+
+       k = (100 * fh) / fl;
+       t1 = (t * (k - 100));
+       a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
+       a = (a + 5) / 10;
+       ah = ((a * t) + 5000) / 10000;
+       al = a - ah;
+
+       *th = t - ah;
+       *tl = t + al;
+
+       return 0;
+}
+
+static bool r600_is_uvd_state(u32 class, u32 class2)
+{
+       if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+               return true;
+       if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+               return true;
+       if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+               return true;
+       if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+               return true;
+       if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+               return true;
+       return false;
+}
+
+static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
+{
+       return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+static void rv770_get_max_vddc(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       u16 vddc;
+
+       if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
+               pi->max_vddc = 0;
+       else
+               pi->max_vddc = vddc;
+}
+
+static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct amdgpu_atom_ss ss;
+
+       pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+                                                      ASIC_INTERNAL_ENGINE_SS, 0);
+       pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+                                                      ASIC_INTERNAL_MEMORY_SS, 0);
+
+       if (pi->sclk_ss || pi->mclk_ss)
+               pi->dynamic_ss = true;
+       else
+               pi->dynamic_ss = false;
+}
+
+
+static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *rps)
+{
+       struct  si_ps *ps = si_get_ps(rps);
+       struct amdgpu_clock_and_voltage_limits *max_limits;
+       bool disable_mclk_switching = false;
+       bool disable_sclk_switching = false;
+       u32 mclk, sclk;
+       u16 vddc, vddci, min_vce_voltage = 0;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+       u32 max_sclk = 0, max_mclk = 0;
+       int i;
+       struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+       /* Apply dpm quirks */
+       while (p && p->chip_device != 0) {
+               if (adev->pdev->vendor == p->chip_vendor &&
+                   adev->pdev->device == p->chip_device &&
+                   adev->pdev->subsystem_vendor == p->subsys_vendor &&
+                   adev->pdev->subsystem_device == p->subsys_device) {
+                       max_sclk = p->max_sclk;
+                       max_mclk = p->max_mclk;
+                       break;
+               }
+               ++p;
+       }
+
+       if (rps->vce_active) {
+               rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+               rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+               si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
+                                        &min_vce_voltage);
+       } else {
+               rps->evclk = 0;
+               rps->ecclk = 0;
+       }
+
+       if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+           si_dpm_vblank_too_short(adev))
+               disable_mclk_switching = true;
+
+       if (rps->vclk || rps->dclk) {
+               disable_mclk_switching = true;
+               disable_sclk_switching = true;
+       }
+
+       if (adev->pm.dpm.ac_power)
+               max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+       else
+               max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+       for (i = ps->performance_level_count - 2; i >= 0; i--) {
+               if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
+                       ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
+       }
+       if (adev->pm.dpm.ac_power == false) {
+               for (i = 0; i < ps->performance_level_count; i++) {
+                       if (ps->performance_levels[i].mclk > max_limits->mclk)
+                               ps->performance_levels[i].mclk = max_limits->mclk;
+                       if (ps->performance_levels[i].sclk > max_limits->sclk)
+                               ps->performance_levels[i].sclk = max_limits->sclk;
+                       if (ps->performance_levels[i].vddc > max_limits->vddc)
+                               ps->performance_levels[i].vddc = max_limits->vddc;
+                       if (ps->performance_levels[i].vddci > max_limits->vddci)
+                               ps->performance_levels[i].vddci = max_limits->vddci;
+               }
+       }
+
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (max_sclk_vddc) {
+                       if (ps->performance_levels[i].sclk > max_sclk_vddc)
+                               ps->performance_levels[i].sclk = max_sclk_vddc;
+               }
+               if (max_mclk_vddci) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddci)
+                               ps->performance_levels[i].mclk = max_mclk_vddci;
+               }
+               if (max_mclk_vddc) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddc)
+                               ps->performance_levels[i].mclk = max_mclk_vddc;
+               }
+               if (max_mclk) {
+                       if (ps->performance_levels[i].mclk > max_mclk)
+                               ps->performance_levels[i].mclk = max_mclk;
+               }
+               if (max_sclk) {
+                       if (ps->performance_levels[i].sclk > max_sclk)
+                               ps->performance_levels[i].sclk = max_sclk;
+               }
+       }
+
+       /* XXX validate the min clocks required for display */
+
+       if (disable_mclk_switching) {
+               mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
+               vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
+       } else {
+               mclk = ps->performance_levels[0].mclk;
+               vddci = ps->performance_levels[0].vddci;
+       }
+
+       if (disable_sclk_switching) {
+               sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+               vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+       } else {
+               sclk = ps->performance_levels[0].sclk;
+               vddc = ps->performance_levels[0].vddc;
+       }
+
+       if (rps->vce_active) {
+               if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+                       sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+               if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
+                       mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
+       }
+
+       /* adjusted low state */
+       ps->performance_levels[0].sclk = sclk;
+       ps->performance_levels[0].mclk = mclk;
+       ps->performance_levels[0].vddc = vddc;
+       ps->performance_levels[0].vddci = vddci;
+
+       if (disable_sclk_switching) {
+               sclk = ps->performance_levels[0].sclk;
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (sclk < ps->performance_levels[i].sclk)
+                               sclk = ps->performance_levels[i].sclk;
+               }
+               for (i = 0; i < ps->performance_level_count; i++) {
+                       ps->performance_levels[i].sclk = sclk;
+                       ps->performance_levels[i].vddc = vddc;
+               }
+       } else {
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+                               ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+                       if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+                               ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+               }
+       }
+
+       if (disable_mclk_switching) {
+               mclk = ps->performance_levels[0].mclk;
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (mclk < ps->performance_levels[i].mclk)
+                               mclk = ps->performance_levels[i].mclk;
+               }
+               for (i = 0; i < ps->performance_level_count; i++) {
+                       ps->performance_levels[i].mclk = mclk;
+                       ps->performance_levels[i].vddci = vddci;
+               }
+       } else {
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
+                               ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
+                       if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
+                               ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
+               }
+       }
+
+       for (i = 0; i < ps->performance_level_count; i++)
+               btc_adjust_clock_combinations(adev, max_limits,
+                                             &ps->performance_levels[i]);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (ps->performance_levels[i].vddc < min_vce_voltage)
+                       ps->performance_levels[i].vddc = min_vce_voltage;
+               btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                  ps->performance_levels[i].sclk,
+                                                  max_limits->vddc,  &ps->performance_levels[i].vddc);
+               btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                  ps->performance_levels[i].mclk,
+                                                  max_limits->vddci, &ps->performance_levels[i].vddci);
+               btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                  ps->performance_levels[i].mclk,
+                                                  max_limits->vddc,  &ps->performance_levels[i].vddc);
+               btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+                                                  adev->clock.current_dispclk,
+                                                  max_limits->vddc,  &ps->performance_levels[i].vddc);
+       }
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               btc_apply_voltage_delta_rules(adev,
+                                             max_limits->vddc, max_limits->vddci,
+                                             &ps->performance_levels[i].vddc,
+                                             &ps->performance_levels[i].vddci);
+       }
+
+       ps->dc_compatible = true;
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
+                       ps->dc_compatible = false;
+       }
+}
+
+#if 0
+static int si_read_smc_soft_register(struct amdgpu_device *adev,
+                                    u16 reg_offset, u32 *value)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       return amdgpu_si_read_smc_sram_dword(adev,
+                                            si_pi->soft_regs_start + reg_offset, value,
+                                            si_pi->sram_end);
+}
+#endif
+
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+                                     u16 reg_offset, u32 value)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       return amdgpu_si_write_smc_sram_dword(adev,
+                                             si_pi->soft_regs_start + reg_offset,
+                                             value, si_pi->sram_end);
+}
+
+static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
+{
+       bool ret = false;
+       u32 tmp, width, row, column, bank, density;
+       bool is_memory_gddr5, is_special;
+
+       tmp = RREG32(MC_SEQ_MISC0);
+       is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
+       is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
+               & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
+
+       WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
+       width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
+
+       tmp = RREG32(MC_ARB_RAMCFG);
+       row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
+       column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
+       bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+
+       density = (1 << (row + column - 20 + bank)) * width;
+
+       if ((adev->pdev->device == 0x6819) &&
+           is_memory_gddr5 && is_special && (density == 0x400))
+               ret = true;
+
+       return ret;
+}
+
+static void si_get_leakage_vddc(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u16 vddc, count = 0;
+       int i, ret;
+
+       for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
+               ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
+
+               if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
+                       si_pi->leakage_voltage.entries[count].voltage = vddc;
+                       si_pi->leakage_voltage.entries[count].leakage_index =
+                               SISLANDS_LEAKAGE_INDEX0 + i;
+                       count++;
+               }
+       }
+       si_pi->leakage_voltage.count = count;
+}
+
+static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
+                                                    u32 index, u16 *leakage_voltage)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int i;
+
+       if (leakage_voltage == NULL)
+               return -EINVAL;
+
+       if ((index & 0xff00) != 0xff00)
+               return -EINVAL;
+
+       if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
+               return -EINVAL;
+
+       if (index < SISLANDS_LEAKAGE_INDEX0)
+               return -EINVAL;
+
+       for (i = 0; i < si_pi->leakage_voltage.count; i++) {
+               if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
+                       *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
+                       return 0;
+               }
+       }
+       return -EAGAIN;
+}
+
+static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       bool want_thermal_protection;
+       enum amdgpu_dpm_event_src dpm_event_src;
+
+       switch (sources) {
+       case 0:
+       default:
+               want_thermal_protection = false;
+               break;
+       case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+               want_thermal_protection = true;
+               dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+               break;
+       case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+               want_thermal_protection = true;
+               dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+               break;
+       case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+             (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+               want_thermal_protection = true;
+               dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+               break;
+       }
+
+       if (want_thermal_protection) {
+               WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+               if (pi->thermal_protection)
+                       WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+       } else {
+               WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+       }
+}
+
+static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
+                                          enum amdgpu_dpm_auto_throttle_src source,
+                                          bool enable)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+       if (enable) {
+               if (!(pi->active_auto_throttle_sources & (1 << source))) {
+                       pi->active_auto_throttle_sources |= 1 << source;
+                       si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+               }
+       } else {
+               if (pi->active_auto_throttle_sources & (1 << source)) {
+                       pi->active_auto_throttle_sources &= ~(1 << source);
+                       si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+               }
+       }
+}
+
+static void si_start_dpm(struct amdgpu_device *adev)
+{
+       WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_stop_dpm(struct amdgpu_device *adev)
+{
+       WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
+{
+       if (enable)
+               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+       else
+               WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+}
+
+#if 0
+static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
+                                              u32 thermal_level)
+{
+       PPSMC_Result ret;
+
+       if (thermal_level == 0) {
+               ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+               if (ret == PPSMC_Result_OK)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
+{
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
+}
+#endif
+
+#if 0
+static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
+{
+       if (ac_power)
+               return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
+                       0 : -EINVAL;
+
+       return 0;
+}
+#endif
+
+static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+                                                     PPSMC_Msg msg, u32 parameter)
+{
+       WREG32(SMC_SCRATCH0, parameter);
+       return amdgpu_si_send_msg_to_smc(adev, msg);
+}
+
+static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
+{
+       if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+               return -EINVAL;
+
+       return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+                                  enum amdgpu_dpm_forced_level level)
+{
+       struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
+       struct  si_ps *ps = si_get_ps(rps);
+       u32 levels = ps->performance_level_count;
+
+       if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+                       return -EINVAL;
+
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
+                       return -EINVAL;
+       } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+                       return -EINVAL;
+
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
+                       return -EINVAL;
+       } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+                       return -EINVAL;
+
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+                       return -EINVAL;
+       }
+
+       adev->pm.dpm.forced_level = level;
+
+       return 0;
+}
+
+#if 0
+static int si_set_boot_state(struct amdgpu_device *adev)
+{
+       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+#endif
+
+static int si_set_sw_state(struct amdgpu_device *adev)
+{
+       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static int si_halt_smc(struct amdgpu_device *adev)
+{
+       if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
+               return -EINVAL;
+
+       return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static int si_resume_smc(struct amdgpu_device *adev)
+{
+       if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
+               return -EINVAL;
+
+       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static void si_dpm_start_smc(struct amdgpu_device *adev)
+{
+       amdgpu_si_program_jump_on_start(adev);
+       amdgpu_si_start_smc(adev);
+       amdgpu_si_smc_clock(adev, true);
+}
+
+static void si_dpm_stop_smc(struct amdgpu_device *adev)
+{
+       amdgpu_si_reset_smc(adev);
+       amdgpu_si_smc_clock(adev, false);
+}
+
+static int si_process_firmware_header(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+       int ret;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->state_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->soft_regs_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->mc_reg_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->fan_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->arb_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->cac_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->dte_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->spll_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->papm_cfg_table_start = tmp;
+
+       return ret;
+}
+
+static void si_read_clock_registers(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+       si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
+       si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
+       si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
+       si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
+       si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+       si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+       si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+       si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+       si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+       si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+       si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+       si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+       si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+       si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void si_enable_thermal_protection(struct amdgpu_device *adev,
+                                         bool enable)
+{
+       if (enable)
+               WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+       else
+               WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+static void si_enable_acpi_power_management(struct amdgpu_device *adev)
+{
+       WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+#if 0
+static int si_enter_ulp_state(struct amdgpu_device *adev)
+{
+       WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+       udelay(25000);
+
+       return 0;
+}
+
+static int si_exit_ulp_state(struct amdgpu_device *adev)
+{
+       int i;
+
+       WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+       udelay(7000);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(SMC_RESP_0) == 1)
+                       break;
+               udelay(1000);
+       }
+
+       return 0;
+}
+#endif
+
+static int si_notify_smc_display_change(struct amdgpu_device *adev,
+                                    bool has_display)
+{
+       PPSMC_Msg msg = has_display ?
+               PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+       return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static void si_program_response_times(struct amdgpu_device *adev)
+{
+       u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+       u32 vddc_dly, acpi_dly, vbi_dly;
+       u32 reference_clock;
+
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+       voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
+       backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
+
+       if (voltage_response_time == 0)
+               voltage_response_time = 1000;
+
+       acpi_delay_time = 15000;
+       vbi_time_out = 100000;
+
+       reference_clock = amdgpu_asic_get_xclk(adev);
+
+       vddc_dly = (voltage_response_time  * reference_clock) / 100;
+       acpi_dly = (acpi_delay_time * reference_clock) / 100;
+       vbi_dly  = (vbi_time_out * reference_clock) / 100;
+
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg,  vddc_dly);
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi,  acpi_dly);
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+}
+
+static void si_program_ds_registers(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       u32 tmp;
+
+       /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
+       if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
+               tmp = 0x10;
+       else
+               tmp = 0x1;
+
+       if (eg_pi->sclk_deep_sleep) {
+               WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
+               WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
+                        ~AUTOSCALE_ON_SS_CLEAR);
+       }
+}
+
+static void si_program_display_gap(struct amdgpu_device *adev)
+{
+       u32 tmp, pipe;
+       int i;
+
+       tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+       if (adev->pm.dpm.new_active_crtc_count > 0)
+               tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+       else
+               tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+       if (adev->pm.dpm.new_active_crtc_count > 1)
+               tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+       else
+               tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+       WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+       tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+       pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+       if ((adev->pm.dpm.new_active_crtc_count > 0) &&
+           (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+               /* find the first active crtc */
+               for (i = 0; i < adev->mode_info.num_crtc; i++) {
+                       if (adev->pm.dpm.new_active_crtcs & (1 << i))
+                               break;
+               }
+               if (i == adev->mode_info.num_crtc)
+                       pipe = 0;
+               else
+                       pipe = i;
+
+               tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+               tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+               WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
+       }
+
+       /* Setting this to false forces the performance state to low if the crtcs are disabled.
+        * This can be a problem on PowerXpress systems or if you want to use the card
+        * for offscreen rendering or compute if there are no crtcs enabled.
+        */
+       si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+}
+
+static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+       if (enable) {
+               if (pi->sclk_ss)
+                       WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+       } else {
+               WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+               WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+       }
+}
+
+static void si_setup_bsp(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       u32 xclk = amdgpu_asic_get_xclk(adev);
+
+       r600_calculate_u_and_p(pi->asi,
+                              xclk,
+                              16,
+                              &pi->bsp,
+                              &pi->bsu);
+
+       r600_calculate_u_and_p(pi->pasi,
+                              xclk,
+                              16,
+                              &pi->pbsp,
+                              &pi->pbsu);
+
+
+        pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+       pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+
+       WREG32(CG_BSP, pi->dsp);
+}
+
+static void si_program_git(struct amdgpu_device *adev)
+{
+       WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+}
+
+static void si_program_tp(struct amdgpu_device *adev)
+{
+       int i;
+       enum r600_td td = R600_TD_DFLT;
+
+       for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+               WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+
+       if (td == R600_TD_AUTO)
+               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+       else
+               WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+
+       if (td == R600_TD_UP)
+               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+
+       if (td == R600_TD_DOWN)
+               WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+static void si_program_tpp(struct amdgpu_device *adev)
+{
+       WREG32(CG_TPC, R600_TPC_DFLT);
+}
+
+static void si_program_sstp(struct amdgpu_device *adev)
+{
+       WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void si_enable_display_gap(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+       tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+       tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+               DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
+       tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+       tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
+               DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+       WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void si_program_vc(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+       WREG32(CG_FTV, pi->vrc);
+}
+
+static void si_clear_vc(struct amdgpu_device *adev)
+{
+       WREG32(CG_FTV, 0);
+}
+
+static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+       u8 mc_para_index;
+
+       if (memory_clock < 10000)
+               mc_para_index = 0;
+       else if (memory_clock >= 80000)
+               mc_para_index = 0x0f;
+       else
+               mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+       return mc_para_index;
+}
+
+static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+       u8 mc_para_index;
+
+       if (strobe_mode) {
+               if (memory_clock < 12500)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 47500)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (u8)((memory_clock - 10000) / 2500);
+       } else {
+               if (memory_clock < 65000)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 135000)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (u8)((memory_clock - 60000) / 5000);
+       }
+       return mc_para_index;
+}
+
+static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       bool strobe_mode = false;
+       u8 result = 0;
+
+       if (mclk <= pi->mclk_strobe_mode_threshold)
+               strobe_mode = true;
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+               result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
+       else
+               result = si_get_ddr3_mclk_frequency_ratio(mclk);
+
+       if (strobe_mode)
+               result |= SISLANDS_SMC_STROBE_ENABLE;
+
+       return result;
+}
+
+static int si_upload_firmware(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       amdgpu_si_reset_smc(adev);
+       amdgpu_si_smc_clock(adev, false);
+
+       return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
+}
+
+static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
+                                             const struct atom_voltage_table *table,
+                                             const struct amdgpu_phase_shedding_limits_table *limits)
+{
+       u32 data, num_bits, num_levels;
+
+       if ((table == NULL) || (limits == NULL))
+               return false;
+
+       data = table->mask_low;
+
+       num_bits = hweight32(data);
+
+       if (num_bits == 0)
+               return false;
+
+       num_levels = (1 << num_bits);
+
+       if (table->count != num_levels)
+               return false;
+
+       if (limits->count != (num_levels - 1))
+               return false;
+
+       return true;
+}
+
+static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
+                                             u32 max_voltage_steps,
+                                             struct atom_voltage_table *voltage_table)
+{
+       unsigned int i, diff;
+
+       if (voltage_table->count <= max_voltage_steps)
+               return;
+
+       diff = voltage_table->count - max_voltage_steps;
+
+       for (i= 0; i < max_voltage_steps; i++)
+               voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+       voltage_table->count = max_voltage_steps;
+}
+
+static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
+                                    struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
+                                    struct atom_voltage_table *voltage_table)
+{
+       u32 i;
+
+       if (voltage_dependency_table == NULL)
+               return -EINVAL;
+
+       voltage_table->mask_low = 0;
+       voltage_table->phase_delay = 0;
+
+       voltage_table->count = voltage_dependency_table->count;
+       for (i = 0; i < voltage_table->count; i++) {
+               voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+               voltage_table->entries[i].smio_low = 0;
+       }
+
+       return 0;
+}
+
+static int si_construct_voltage_tables(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int ret;
+
+       if (pi->voltage_control) {
+               ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+                                                   VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+               if (ret)
+                       return ret;
+
+               if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+                       si_trim_voltage_table_to_fit_state_table(adev,
+                                                                SISLANDS_MAX_NO_VREG_STEPS,
+                                                                &eg_pi->vddc_voltage_table);
+       } else if (si_pi->voltage_control_svi2) {
+               ret = si_get_svi2_voltage_table(adev,
+                                               &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                               &eg_pi->vddc_voltage_table);
+               if (ret)
+                       return ret;
+       } else {
+               return -EINVAL;
+       }
+
+       if (eg_pi->vddci_control) {
+               ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
+                                                   VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
+               if (ret)
+                       return ret;
+
+               if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+                       si_trim_voltage_table_to_fit_state_table(adev,
+                                                                SISLANDS_MAX_NO_VREG_STEPS,
+                                                                &eg_pi->vddci_voltage_table);
+       }
+       if (si_pi->vddci_control_svi2) {
+               ret = si_get_svi2_voltage_table(adev,
+                                               &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                               &eg_pi->vddci_voltage_table);
+               if (ret)
+                       return ret;
+       }
+
+       if (pi->mvdd_control) {
+               ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
+                                                   VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
+
+               if (ret) {
+                       pi->mvdd_control = false;
+                       return ret;
+               }
+
+               if (si_pi->mvdd_voltage_table.count == 0) {
+                       pi->mvdd_control = false;
+                       return -EINVAL;
+               }
+
+               if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+                       si_trim_voltage_table_to_fit_state_table(adev,
+                                                                SISLANDS_MAX_NO_VREG_STEPS,
+                                                                &si_pi->mvdd_voltage_table);
+       }
+
+       if (si_pi->vddc_phase_shed_control) {
+               ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+                                                   VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
+               if (ret)
+                       si_pi->vddc_phase_shed_control = false;
+
+               if ((si_pi->vddc_phase_shed_table.count == 0) ||
+                   (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
+                       si_pi->vddc_phase_shed_control = false;
+       }
+
+       return 0;
+}
+
+static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
+                                         const struct atom_voltage_table *voltage_table,
+                                         SISLANDS_SMC_STATETABLE *table)
+{
+       unsigned int i;
+
+       for (i = 0; i < voltage_table->count; i++)
+               table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+}
+
+static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
+                                         SISLANDS_SMC_STATETABLE *table)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u8 i;
+
+       if (si_pi->voltage_control_svi2) {
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+                       si_pi->svc_gpio_id);
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+                       si_pi->svd_gpio_id);
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+                                          2);
+       } else {
+               if (eg_pi->vddc_voltage_table.count) {
+                       si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
+                       table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+                               cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+                       for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+                               if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+                                       table->maxVDDCIndexInPPTable = i;
+                                       break;
+                               }
+                       }
+               }
+
+               if (eg_pi->vddci_voltage_table.count) {
+                       si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
+
+                       table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+                               cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+               }
+
+
+               if (si_pi->mvdd_voltage_table.count) {
+                       si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
+
+                       table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+                               cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+               }
+
+               if (si_pi->vddc_phase_shed_control) {
+                       if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
+                                                             &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+                               si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
+
+                               table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+                                       cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+
+                               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+                                                          (u32)si_pi->vddc_phase_shed_table.phase_delay);
+                       } else {
+                               si_pi->vddc_phase_shed_control = false;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+                                    const struct atom_voltage_table *table,
+                                    u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+       unsigned int i;
+
+       for (i = 0; i < table->count; i++) {
+               if (value <= table->entries[i].value) {
+                       voltage->index = (u8)i;
+                       voltage->value = cpu_to_be16(table->entries[i].value);
+                       break;
+               }
+       }
+
+       if (i >= table->count)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
+                                 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (pi->mvdd_control) {
+               if (mclk <= pi->mvdd_split_frequency)
+                       voltage->index = 0;
+               else
+                       voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
+
+               voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
+       }
+       return 0;
+}
+
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+                                   SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+                                   u16 *std_voltage)
+{
+       u16 v_index;
+       bool voltage_found = false;
+       *std_voltage = be16_to_cpu(voltage->value);
+
+       if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+               if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
+                       if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+                               return -EINVAL;
+
+                       for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+                               if (be16_to_cpu(voltage->value) ==
+                                   (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+                                       voltage_found = true;
+                                       if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+                                               *std_voltage =
+                                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+                                       else
+                                               *std_voltage =
+                                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+                                       break;
+                               }
+                       }
+
+                       if (!voltage_found) {
+                               for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+                                       if (be16_to_cpu(voltage->value) <=
+                                           (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+                                               voltage_found = true;
+                                               if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+                                                       *std_voltage =
+                                                               adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+                                               else
+                                                       *std_voltage =
+                                                               adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+                                               break;
+                                       }
+                               }
+                       }
+               } else {
+                       if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+                               *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
+               }
+       }
+
+       return 0;
+}
+
+static int si_populate_std_voltage_value(struct amdgpu_device *adev,
+                                        u16 value, u8 index,
+                                        SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+       voltage->index = index;
+       voltage->value = cpu_to_be16(value);
+
+       return 0;
+}
+
+static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
+                                           const struct amdgpu_phase_shedding_limits_table *limits,
+                                           u16 voltage, u32 sclk, u32 mclk,
+                                           SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
+{
+       unsigned int i;
+
+       for (i = 0; i < limits->count; i++) {
+               if ((voltage <= limits->entries[i].voltage) &&
+                   (sclk <= limits->entries[i].sclk) &&
+                   (mclk <= limits->entries[i].mclk))
+                       break;
+       }
+
+       smc_voltage->phase_settings = (u8)i;
+
+       return 0;
+}
+
+static int si_init_arb_table_index(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+       int ret;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       tmp &= 0x00FFFFFF;
+       tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+       return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
+                                             tmp, si_pi->sram_end);
+}
+
+static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
+{
+       return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int si_reset_to_default(struct amdgpu_device *adev)
+{
+       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+       int ret;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       tmp = (tmp >> 24) & 0xff;
+
+       if (tmp == MC_CG_ARB_FREQ_F0)
+               return 0;
+
+       return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
+                                           u32 engine_clock)
+{
+       u32 dram_rows;
+       u32 dram_refresh_rate;
+       u32 mc_arb_rfsh_rate;
+       u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+
+       if (tmp >= 4)
+               dram_rows = 16384;
+       else
+               dram_rows = 1 << (tmp + 10);
+
+       dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
+       mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+
+       return mc_arb_rfsh_rate;
+}
+
+static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
+                                               struct rv7xx_pl *pl,
+                                               SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
+{
+       u32 dram_timing;
+       u32 dram_timing2;
+       u32 burst_time;
+
+       arb_regs->mc_arb_rfsh_rate =
+               (u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
+
+       amdgpu_atombios_set_engine_dram_timings(adev,
+                                           pl->sclk,
+                                           pl->mclk);
+
+       dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+       dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+       burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+       arb_regs->mc_arb_dram_timing  = cpu_to_be32(dram_timing);
+       arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
+       arb_regs->mc_arb_burst_time = (u8)burst_time;
+
+       return 0;
+}
+
+static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
+                                                 struct amdgpu_ps *amdgpu_state,
+                                                 unsigned int first_arb_set)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+       int i, ret = 0;
+
+       for (i = 0; i < state->performance_level_count; i++) {
+               ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
+               if (ret)
+                       break;
+               ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                                 si_pi->arb_table_start +
+                                                 offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+                                                 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
+                                                 (u8 *)&arb_regs,
+                                                 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+                                                 si_pi->sram_end);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
+                                              struct amdgpu_ps *amdgpu_new_state)
+{
+       return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
+                                                     SISLANDS_DRIVER_STATE_ARB_INDEX);
+}
+
+static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
+                                         struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (pi->mvdd_control)
+               return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
+                                                si_pi->mvdd_bootup_value, voltage);
+
+       return 0;
+}
+
+static int si_populate_smc_initial_state(struct amdgpu_device *adev,
+                                        struct amdgpu_ps *amdgpu_initial_state,
+                                        SISLANDS_SMC_STATETABLE *table)
+{
+       struct  si_ps *initial_state = si_get_ps(amdgpu_initial_state);
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 reg;
+       int ret;
+
+       table->initialState.levels[0].mclk.vDLL_CNTL =
+               cpu_to_be32(si_pi->clock_registers.dll_cntl);
+       table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+               cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
+       table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+               cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
+       table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+               cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
+       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+               cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
+       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+               cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
+       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+               cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
+       table->initialState.levels[0].mclk.vMPLL_SS =
+               cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+       table->initialState.levels[0].mclk.vMPLL_SS2 =
+               cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+       table->initialState.levels[0].mclk.mclk_value =
+               cpu_to_be32(initial_state->performance_levels[0].mclk);
+
+       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
+       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
+       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
+       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
+       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
+       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
+
+       table->initialState.levels[0].sclk.sclk_value =
+               cpu_to_be32(initial_state->performance_levels[0].sclk);
+
+       table->initialState.levels[0].arbRefreshState =
+               SISLANDS_INITIAL_STATE_ARB_INDEX;
+
+       table->initialState.levels[0].ACIndex = 0;
+
+       ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                       initial_state->performance_levels[0].vddc,
+                                       &table->initialState.levels[0].vddc);
+
+       if (!ret) {
+               u16 std_vddc;
+
+               ret = si_get_std_voltage_value(adev,
+                                              &table->initialState.levels[0].vddc,
+                                              &std_vddc);
+               if (!ret)
+                       si_populate_std_voltage_value(adev, std_vddc,
+                                                     table->initialState.levels[0].vddc.index,
+                                                     &table->initialState.levels[0].std_vddc);
+       }
+
+       if (eg_pi->vddci_control)
+               si_populate_voltage_value(adev,
+                                         &eg_pi->vddci_voltage_table,
+                                         initial_state->performance_levels[0].vddci,
+                                         &table->initialState.levels[0].vddci);
+
+       if (si_pi->vddc_phase_shed_control)
+               si_populate_phase_shedding_value(adev,
+                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+                                                initial_state->performance_levels[0].vddc,
+                                                initial_state->performance_levels[0].sclk,
+                                                initial_state->performance_levels[0].mclk,
+                                                &table->initialState.levels[0].vddc);
+
+       si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+
+       reg = CG_R(0xffff) | CG_L(0);
+       table->initialState.levels[0].aT = cpu_to_be32(reg);
+       table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+       table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+               table->initialState.levels[0].strobeMode =
+                       si_get_strobe_mode_settings(adev,
+                                                   initial_state->performance_levels[0].mclk);
+
+               if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
+                       table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+               else
+                       table->initialState.levels[0].mcFlags =  0;
+       }
+
+       table->initialState.levelCount = 1;
+
+       table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+       table->initialState.levels[0].dpm2.MaxPS = 0;
+       table->initialState.levels[0].dpm2.NearTDPDec = 0;
+       table->initialState.levels[0].dpm2.AboveSafeInc = 0;
+       table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+       table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+       reg = MIN_POWER_MASK | MAX_POWER_MASK;
+       table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+       reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+       table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+       return 0;
+}
+
+static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
+                                     SISLANDS_SMC_STATETABLE *table)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+       u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+       u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+       u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+       u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+       u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+       u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+       u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+       u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+       u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+       u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+       u32 reg;
+       int ret;
+
+       table->ACPIState = table->initialState;
+
+       table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       if (pi->acpi_vddc) {
+               ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                               pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+               if (!ret) {
+                       u16 std_vddc;
+
+                       ret = si_get_std_voltage_value(adev,
+                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+                       if (!ret)
+                               si_populate_std_voltage_value(adev, std_vddc,
+                                                             table->ACPIState.levels[0].vddc.index,
+                                                             &table->ACPIState.levels[0].std_vddc);
+               }
+               table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+
+               if (si_pi->vddc_phase_shed_control) {
+                       si_populate_phase_shedding_value(adev,
+                                                        &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+                                                        pi->acpi_vddc,
+                                                        0,
+                                                        0,
+                                                        &table->ACPIState.levels[0].vddc);
+               }
+       } else {
+               ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                               pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+               if (!ret) {
+                       u16 std_vddc;
+
+                       ret = si_get_std_voltage_value(adev,
+                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+
+                       if (!ret)
+                               si_populate_std_voltage_value(adev, std_vddc,
+                                                             table->ACPIState.levels[0].vddc.index,
+                                                             &table->ACPIState.levels[0].std_vddc);
+               }
+               table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
+                                                                                   si_pi->sys_pcie_mask,
+                                                                                   si_pi->boot_pcie_gen,
+                                                                                   AMDGPU_PCIE_GEN1);
+
+               if (si_pi->vddc_phase_shed_control)
+                       si_populate_phase_shedding_value(adev,
+                                                        &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+                                                        pi->min_vddc_in_table,
+                                                        0,
+                                                        0,
+                                                        &table->ACPIState.levels[0].vddc);
+       }
+
+       if (pi->acpi_vddc) {
+               if (eg_pi->acpi_vddci)
+                       si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+                                                 eg_pi->acpi_vddci,
+                                                 &table->ACPIState.levels[0].vddci);
+       }
+
+       mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+       mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+       dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+       spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+       spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+       table->ACPIState.levels[0].mclk.vDLL_CNTL =
+               cpu_to_be32(dll_cntl);
+       table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+               cpu_to_be32(mclk_pwrmgt_cntl);
+       table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+               cpu_to_be32(mpll_ad_func_cntl);
+       table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+               cpu_to_be32(mpll_dq_func_cntl);
+       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+               cpu_to_be32(mpll_func_cntl);
+       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+               cpu_to_be32(mpll_func_cntl_1);
+       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+               cpu_to_be32(mpll_func_cntl_2);
+       table->ACPIState.levels[0].mclk.vMPLL_SS =
+               cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+       table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+               cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+               cpu_to_be32(spll_func_cntl);
+       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+               cpu_to_be32(spll_func_cntl_2);
+       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+               cpu_to_be32(spll_func_cntl_3);
+       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+               cpu_to_be32(spll_func_cntl_4);
+
+       table->ACPIState.levels[0].mclk.mclk_value = 0;
+       table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+       si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+
+       if (eg_pi->dynamic_ac_timing)
+               table->ACPIState.levels[0].ACIndex = 0;
+
+       table->ACPIState.levels[0].dpm2.MaxPS = 0;
+       table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
+       table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
+       table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+       table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+       reg = MIN_POWER_MASK | MAX_POWER_MASK;
+       table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+       reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+       table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+       return 0;
+}
+
+static int si_populate_ulv_state(struct amdgpu_device *adev,
+                                SISLANDS_SMC_SWSTATE *state)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+       u32 sclk_in_sr = 1350; /* ??? */
+       int ret;
+
+       ret = si_convert_power_level_to_smc(adev, &ulv->pl,
+                                           &state->levels[0]);
+       if (!ret) {
+               if (eg_pi->sclk_deep_sleep) {
+                       if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+                               state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+                       else
+                               state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+               }
+               if (ulv->one_pcie_lane_in_ulv)
+                       state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
+               state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+               state->levels[0].ACIndex = 1;
+               state->levels[0].std_vddc = state->levels[0].vddc;
+               state->levelCount = 1;
+
+               state->flags |= PPSMC_SWSTATE_FLAG_DC;
+       }
+
+       return ret;
+}
+
+static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+       SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+       int ret;
+
+       ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
+                                                  &arb_regs);
+       if (ret)
+               return ret;
+
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
+                                  ulv->volt_change_delay);
+
+       ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                         si_pi->arb_table_start +
+                                         offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+                                         sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
+                                         (u8 *)&arb_regs,
+                                         sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+                                         si_pi->sram_end);
+
+       return ret;
+}
+
+static void si_get_mvdd_configuration(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+       pi->mvdd_split_frequency = 30000;
+}
+
+static int si_init_smc_table(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
+       const struct si_ulv_param *ulv = &si_pi->ulv;
+       SISLANDS_SMC_STATETABLE  *table = &si_pi->smc_statetable;
+       int ret;
+       u32 lane_width;
+       u32 vr_hot_gpio;
+
+       si_populate_smc_voltage_tables(adev, table);
+
+       switch (adev->pm.int_thermal_type) {
+       case THERMAL_TYPE_SI:
+       case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+               table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+               break;
+       case THERMAL_TYPE_NONE:
+               table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+               break;
+       default:
+               table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+               break;
+       }
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+               table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
+               if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
+                       table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+       }
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+               table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+               table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
+               table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
+               table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
+               vr_hot_gpio = adev->pm.dpm.backbias_response_time;
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
+                                          vr_hot_gpio);
+       }
+
+       ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
+       if (ret)
+               return ret;
+
+       ret = si_populate_smc_acpi_state(adev, table);
+       if (ret)
+               return ret;
+
+       table->driverState = table->initialState;
+
+       ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
+                                                    SISLANDS_INITIAL_STATE_ARB_INDEX);
+       if (ret)
+               return ret;
+
+       if (ulv->supported && ulv->pl.vddc) {
+               ret = si_populate_ulv_state(adev, &table->ULVState);
+               if (ret)
+                       return ret;
+
+               ret = si_program_ulv_memory_timing_parameters(adev);
+               if (ret)
+                       return ret;
+
+               WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
+               WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+
+               lane_width = amdgpu_get_pcie_lanes(adev);
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+       } else {
+               table->ULVState = table->initialState;
+       }
+
+       return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
+                                          (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
+                                          si_pi->sram_end);
+}
+
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+                                   u32 engine_clock,
+                                   SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct atom_clock_dividers dividers;
+       u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+       u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+       u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+       u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+       u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
+       u64 tmp;
+       u32 reference_clock = adev->clock.spll.reference_freq;
+       u32 reference_divider;
+       u32 fbdiv;
+       int ret;
+
+       ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+                                            engine_clock, false, &dividers);
+       if (ret)
+               return ret;
+
+       reference_divider = 1 + dividers.ref_div;
+
+       tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
+       do_div(tmp, reference_clock);
+       fbdiv = (u32) tmp;
+
+       spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+       spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+       spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+       spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+       spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+       spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+       spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+       spll_func_cntl_3 |= SPLL_DITHEN;
+
+       if (pi->sclk_ss) {
+               struct amdgpu_atom_ss ss;
+               u32 vco_freq = engine_clock * dividers.post_div;
+
+               if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+                                                    ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+                       u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+                       u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+                       cg_spll_spread_spectrum &= ~CLK_S_MASK;
+                       cg_spll_spread_spectrum |= CLK_S(clk_s);
+                       cg_spll_spread_spectrum |= SSEN;
+
+                       cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+                       cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+               }
+       }
+
+       sclk->sclk_value = engine_clock;
+       sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
+       sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
+       sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
+       sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
+       sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
+       sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
+
+       return 0;
+}
+
+static int si_populate_sclk_value(struct amdgpu_device *adev,
+                                 u32 engine_clock,
+                                 SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+       SISLANDS_SMC_SCLK_VALUE sclk_tmp;
+       int ret;
+
+       ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
+       if (!ret) {
+               sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
+               sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
+               sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
+               sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
+               sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
+               sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
+               sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
+       }
+
+       return ret;
+}
+
+static int si_populate_mclk_value(struct amdgpu_device *adev,
+                                 u32 engine_clock,
+                                 u32 memory_clock,
+                                 SISLANDS_SMC_MCLK_VALUE *mclk,
+                                 bool strobe_mode,
+                                 bool dll_state_on)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32  dll_cntl = si_pi->clock_registers.dll_cntl;
+       u32  mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+       u32  mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+       u32  mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+       u32  mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+       u32  mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+       u32  mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+       u32  mpll_ss1 = si_pi->clock_registers.mpll_ss1;
+       u32  mpll_ss2 = si_pi->clock_registers.mpll_ss2;
+       struct atom_mpll_param mpll_param;
+       int ret;
+
+       ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
+       if (ret)
+               return ret;
+
+       mpll_func_cntl &= ~BWCTRL_MASK;
+       mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+       mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+       mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+               CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+       mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+       mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+               mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+               mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+                       YCLK_POST_DIV(mpll_param.post_div);
+       }
+
+       if (pi->mclk_ss) {
+               struct amdgpu_atom_ss ss;
+               u32 freq_nom;
+               u32 tmp;
+               u32 reference_clock = adev->clock.mpll.reference_freq;
+
+               if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+                       freq_nom = memory_clock * 4;
+               else
+                       freq_nom = memory_clock * 2;
+
+               tmp = freq_nom / reference_clock;
+               tmp = tmp * tmp;
+               if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+                                                    ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+                       u32 clks = reference_clock * 5 / ss.rate;
+                       u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+                       mpll_ss1 &= ~CLKV_MASK;
+                       mpll_ss1 |= CLKV(clkv);
+
+                       mpll_ss2 &= ~CLKS_MASK;
+                       mpll_ss2 |= CLKS(clks);
+               }
+       }
+
+       mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+       mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+       if (dll_state_on)
+               mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+       else
+               mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+       mclk->mclk_value = cpu_to_be32(memory_clock);
+       mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+       mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
+       mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
+       mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+       mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+       mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+       mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
+       mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
+       mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+       return 0;
+}
+
+static void si_populate_smc_sp(struct amdgpu_device *adev,
+                              struct amdgpu_ps *amdgpu_state,
+                              SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct  si_ps *ps = si_get_ps(amdgpu_state);
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       int i;
+
+       for (i = 0; i < ps->performance_level_count - 1; i++)
+               smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+       smc_state->levels[ps->performance_level_count - 1].bSP =
+               cpu_to_be32(pi->psp);
+}
+
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+                                        struct rv7xx_pl *pl,
+                                        SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int ret;
+       bool dll_state_on;
+       u16 std_vddc;
+       bool gmc_pg = false;
+
+       if (eg_pi->pcie_performance_request &&
+           (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+               level->gen2PCIE = (u8)si_pi->force_pcie_gen;
+       else
+               level->gen2PCIE = (u8)pl->pcie_gen;
+
+       ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
+       if (ret)
+               return ret;
+
+       level->mcFlags =  0;
+
+       if (pi->mclk_stutter_mode_threshold &&
+           (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+           !eg_pi->uvd_enabled &&
+           (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+           (adev->pm.dpm.new_active_crtc_count <= 2)) {
+               level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
+
+               if (gmc_pg)
+                       level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
+       }
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+               if (pl->mclk > pi->mclk_edc_enable_threshold)
+                       level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
+
+               if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+                       level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
+
+               level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
+
+               if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
+                       if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
+                           ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+                               dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+                       else
+                               dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+               } else {
+                       dll_state_on = false;
+               }
+       } else {
+               level->strobeMode = si_get_strobe_mode_settings(adev,
+                                                               pl->mclk);
+
+               dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+       }
+
+       ret = si_populate_mclk_value(adev,
+                                    pl->sclk,
+                                    pl->mclk,
+                                    &level->mclk,
+                                    (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
+       if (ret)
+               return ret;
+
+       ret = si_populate_voltage_value(adev,
+                                       &eg_pi->vddc_voltage_table,
+                                       pl->vddc, &level->vddc);
+       if (ret)
+               return ret;
+
+
+       ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
+       if (ret)
+               return ret;
+
+       ret = si_populate_std_voltage_value(adev, std_vddc,
+                                           level->vddc.index, &level->std_vddc);
+       if (ret)
+               return ret;
+
+       if (eg_pi->vddci_control) {
+               ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+                                               pl->vddci, &level->vddci);
+               if (ret)
+                       return ret;
+       }
+
+       if (si_pi->vddc_phase_shed_control) {
+               ret = si_populate_phase_shedding_value(adev,
+                                                      &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+                                                      pl->vddc,
+                                                      pl->sclk,
+                                                      pl->mclk,
+                                                      &level->vddc);
+               if (ret)
+                       return ret;
+       }
+
+       level->MaxPoweredUpCU = si_pi->max_cu;
+
+       ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
+
+       return ret;
+}
+
+static int si_populate_smc_t(struct amdgpu_device *adev,
+                            struct amdgpu_ps *amdgpu_state,
+                            SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       u32 a_t;
+       u32 t_l, t_h;
+       u32 high_bsp;
+       int i, ret;
+
+       if (state->performance_level_count >= 9)
+               return -EINVAL;
+
+       if (state->performance_level_count < 2) {
+               a_t = CG_R(0xffff) | CG_L(0);
+               smc_state->levels[0].aT = cpu_to_be32(a_t);
+               return 0;
+       }
+
+       smc_state->levels[0].aT = cpu_to_be32(0);
+
+       for (i = 0; i <= state->performance_level_count - 2; i++) {
+               ret = r600_calculate_at(
+                       (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
+                       100 * R600_AH_DFLT,
+                       state->performance_levels[i + 1].sclk,
+                       state->performance_levels[i].sclk,
+                       &t_l,
+                       &t_h);
+
+               if (ret) {
+                       t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
+                       t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
+               }
+
+               a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
+               a_t |= CG_R(t_l * pi->bsp / 20000);
+               smc_state->levels[i].aT = cpu_to_be32(a_t);
+
+               high_bsp = (i == state->performance_level_count - 2) ?
+                       pi->pbsp : pi->bsp;
+               a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+               smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
+       }
+
+       return 0;
+}
+
+static int si_disable_ulv(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+
+       if (ulv->supported)
+               return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+                       0 : -EINVAL;
+
+       return 0;
+}
+
+static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
+                                      struct amdgpu_ps *amdgpu_state)
+{
+       const struct si_power_info *si_pi = si_get_pi(adev);
+       const struct si_ulv_param *ulv = &si_pi->ulv;
+       const struct  si_ps *state = si_get_ps(amdgpu_state);
+       int i;
+
+       if (state->performance_levels[0].mclk != ulv->pl.mclk)
+               return false;
+
+       /* XXX validate against display requirements! */
+
+       for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
+               if (adev->clock.current_dispclk <=
+                   adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
+                       if (ulv->pl.vddc <
+                           adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
+                               return false;
+               }
+       }
+
+       if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
+               return false;
+
+       return true;
+}
+
+static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
+                                                      struct amdgpu_ps *amdgpu_new_state)
+{
+       const struct si_power_info *si_pi = si_get_pi(adev);
+       const struct si_ulv_param *ulv = &si_pi->ulv;
+
+       if (ulv->supported) {
+               if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
+                       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+                               0 : -EINVAL;
+       }
+       return 0;
+}
+
+static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
+                                        struct amdgpu_ps *amdgpu_state,
+                                        SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       int i, ret;
+       u32 threshold;
+       u32 sclk_in_sr = 1350; /* ??? */
+
+       if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
+               return -EINVAL;
+
+       threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
+
+       if (amdgpu_state->vclk && amdgpu_state->dclk) {
+               eg_pi->uvd_enabled = true;
+               if (eg_pi->smu_uvd_hs)
+                       smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
+       } else {
+               eg_pi->uvd_enabled = false;
+       }
+
+       if (state->dc_compatible)
+               smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+       smc_state->levelCount = 0;
+       for (i = 0; i < state->performance_level_count; i++) {
+               if (eg_pi->sclk_deep_sleep) {
+                       if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
+                               if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+                                       smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+                               else
+                                       smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+                       }
+               }
+
+               ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
+                                                   &smc_state->levels[i]);
+               smc_state->levels[i].arbRefreshState =
+                       (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
+
+               if (ret)
+                       return ret;
+
+               if (ni_pi->enable_power_containment)
+                       smc_state->levels[i].displayWatermark =
+                               (state->performance_levels[i].sclk < threshold) ?
+                               PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+               else
+                       smc_state->levels[i].displayWatermark = (i < 2) ?
+                               PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+
+               if (eg_pi->dynamic_ac_timing)
+                       smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
+               else
+                       smc_state->levels[i].ACIndex = 0;
+
+               smc_state->levelCount++;
+       }
+
+       si_write_smc_soft_register(adev,
+                                  SI_SMC_SOFT_REGISTER_watermark_threshold,
+                                  threshold / 512);
+
+       si_populate_smc_sp(adev, amdgpu_state, smc_state);
+
+       ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
+       if (ret)
+               ni_pi->enable_power_containment = false;
+
+       ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
+       if (ret)
+               ni_pi->enable_sq_ramping = false;
+
+       return si_populate_smc_t(adev, amdgpu_state, smc_state);
+}
+
+static int si_upload_sw_state(struct amdgpu_device *adev,
+                             struct amdgpu_ps *amdgpu_new_state)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct  si_ps *new_state = si_get_ps(amdgpu_new_state);
+       int ret;
+       u32 address = si_pi->state_table_start +
+               offsetof(SISLANDS_SMC_STATETABLE, driverState);
+       u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+               ((new_state->performance_level_count - 1) *
+                sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
+       SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
+
+       memset(smc_state, 0, state_size);
+
+       ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
+       if (ret)
+               return ret;
+
+       return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+                                          state_size, si_pi->sram_end);
+}
+
+static int si_upload_ulv_state(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+       int ret = 0;
+
+       if (ulv->supported && ulv->pl.vddc) {
+               u32 address = si_pi->state_table_start +
+                       offsetof(SISLANDS_SMC_STATETABLE, ULVState);
+               SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
+               u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+
+               memset(smc_state, 0, state_size);
+
+               ret = si_populate_ulv_state(adev, smc_state);
+               if (!ret)
+                       ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+                                                         state_size, si_pi->sram_end);
+       }
+
+       return ret;
+}
+
+static int si_upload_smc_data(struct amdgpu_device *adev)
+{
+       struct amdgpu_crtc *amdgpu_crtc = NULL;
+       int i;
+
+       if (adev->pm.dpm.new_active_crtc_count == 0)
+               return 0;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
+                       amdgpu_crtc = adev->mode_info.crtcs[i];
+                       break;
+               }
+       }
+
+       if (amdgpu_crtc == NULL)
+               return 0;
+
+       if (amdgpu_crtc->line_time <= 0)
+               return 0;
+
+       if (si_write_smc_soft_register(adev,
+                                      SI_SMC_SOFT_REGISTER_crtc_index,
+                                      amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
+               return 0;
+
+       if (si_write_smc_soft_register(adev,
+                                      SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+                                      amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+               return 0;
+
+       if (si_write_smc_soft_register(adev,
+                                      SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+                                      amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+               return 0;
+
+       return 0;
+}
+
+static int si_set_mc_special_registers(struct amdgpu_device *adev,
+                                      struct si_mc_reg_table *table)
+{
+       u8 i, j, k;
+       u32 temp_reg;
+
+       for (i = 0, j = table->last; i < table->last; i++) {
+               if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                       return -EINVAL;
+               switch (table->mc_reg_address[i].s1) {
+               case MC_SEQ_MISC1:
+                       temp_reg = RREG32(MC_PMG_CMD_EMRS);
+                       table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
+                       table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
+                       for (k = 0; k < table->num_entries; k++)
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       ((temp_reg & 0xffff0000)) |
+                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+                       j++;
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               return -EINVAL;
+
+                       temp_reg = RREG32(MC_PMG_CMD_MRS);
+                       table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
+                       table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                               if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+                       }
+                       j++;
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               return -EINVAL;
+
+                       if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+                               table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
+                               table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
+                               for (k = 0; k < table->num_entries; k++)
+                                       table->mc_reg_table_entry[k].mc_data[j] =
+                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+                               j++;
+                               if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                                       return -EINVAL;
+                       }
+                       break;
+               case MC_SEQ_RESERVE_M:
+                       temp_reg = RREG32(MC_PMG_CMD_MRS1);
+                       table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
+                       table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
+                       for(k = 0; k < table->num_entries; k++)
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                       j++;
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               return -EINVAL;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       table->last = j;
+
+       return 0;
+}
+
+static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+       bool result = true;
+       switch (in_reg) {
+       case  MC_SEQ_RAS_TIMING:
+               *out_reg = MC_SEQ_RAS_TIMING_LP;
+               break;
+       case MC_SEQ_CAS_TIMING:
+               *out_reg = MC_SEQ_CAS_TIMING_LP;
+               break;
+       case MC_SEQ_MISC_TIMING:
+               *out_reg = MC_SEQ_MISC_TIMING_LP;
+               break;
+       case MC_SEQ_MISC_TIMING2:
+               *out_reg = MC_SEQ_MISC_TIMING2_LP;
+               break;
+       case MC_SEQ_RD_CTL_D0:
+               *out_reg = MC_SEQ_RD_CTL_D0_LP;
+               break;
+       case MC_SEQ_RD_CTL_D1:
+               *out_reg = MC_SEQ_RD_CTL_D1_LP;
+               break;
+       case MC_SEQ_WR_CTL_D0:
+               *out_reg = MC_SEQ_WR_CTL_D0_LP;
+               break;
+       case MC_SEQ_WR_CTL_D1:
+               *out_reg = MC_SEQ_WR_CTL_D1_LP;
+               break;
+       case MC_PMG_CMD_EMRS:
+               *out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
+               break;
+       case MC_PMG_CMD_MRS:
+               *out_reg = MC_SEQ_PMG_CMD_MRS_LP;
+               break;
+       case MC_PMG_CMD_MRS1:
+               *out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
+               break;
+       case MC_SEQ_PMG_TIMING:
+               *out_reg = MC_SEQ_PMG_TIMING_LP;
+               break;
+       case MC_PMG_CMD_MRS2:
+               *out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
+               break;
+       case MC_SEQ_WR_CTL_2:
+               *out_reg = MC_SEQ_WR_CTL_2_LP;
+               break;
+       default:
+               result = false;
+               break;
+       }
+
+       return result;
+}
+
+static void si_set_valid_flag(struct si_mc_reg_table *table)
+{
+       u8 i, j;
+
+       for (i = 0; i < table->last; i++) {
+               for (j = 1; j < table->num_entries; j++) {
+                       if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
+                               table->valid_flag |= 1 << i;
+                               break;
+                       }
+               }
+       }
+}
+
+static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
+{
+       u32 i;
+       u16 address;
+
+       for (i = 0; i < table->last; i++)
+               table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+                       address : table->mc_reg_address[i].s1;
+
+}
+
+static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+                                     struct si_mc_reg_table *si_table)
+{
+       u8 i, j;
+
+       if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+               return -EINVAL;
+       if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+               return -EINVAL;
+
+       for (i = 0; i < table->last; i++)
+               si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+       si_table->last = table->last;
+
+       for (i = 0; i < table->num_entries; i++) {
+               si_table->mc_reg_table_entry[i].mclk_max =
+                       table->mc_reg_table_entry[i].mclk_max;
+               for (j = 0; j < table->last; j++) {
+                       si_table->mc_reg_table_entry[i].mc_data[j] =
+                               table->mc_reg_table_entry[i].mc_data[j];
+               }
+       }
+       si_table->num_entries = table->num_entries;
+
+       return 0;
+}
+
+static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct atom_mc_reg_table *table;
+       struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
+       u8 module_index = rv770_get_memory_module_index(adev);
+       int ret;
+
+       table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+       WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+       WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+       WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+       WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+       WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+       WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+       WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+       WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+       WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+       WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+       WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+       WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+       WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+       ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+       if (ret)
+               goto init_mc_done;
+
+       ret = si_copy_vbios_mc_reg_table(table, si_table);
+       if (ret)
+               goto init_mc_done;
+
+       si_set_s0_mc_reg_index(si_table);
+
+       ret = si_set_mc_special_registers(adev, si_table);
+       if (ret)
+               goto init_mc_done;
+
+       si_set_valid_flag(si_table);
+
+init_mc_done:
+       kfree(table);
+
+       return ret;
+
+}
+
+static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
+                                        SMC_SIslands_MCRegisters *mc_reg_table)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 i, j;
+
+       for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+               if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+                       if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               break;
+                       mc_reg_table->address[i].s0 =
+                               cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+                       mc_reg_table->address[i].s1 =
+                               cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
+                       i++;
+               }
+       }
+       mc_reg_table->last = (u8)i;
+}
+
+static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
+                                   SMC_SIslands_MCRegisterSet *data,
+                                   u32 num_entries, u32 valid_flag)
+{
+       u32 i, j;
+
+       for(i = 0, j = 0; j < num_entries; j++) {
+               if (valid_flag & (1 << j)) {
+                       data->value[i] = cpu_to_be32(entry->mc_data[j]);
+                       i++;
+               }
+       }
+}
+
+static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
+                                                struct rv7xx_pl *pl,
+                                                SMC_SIslands_MCRegisterSet *mc_reg_table_data)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 i = 0;
+
+       for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
+               if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+                       break;
+       }
+
+       if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
+               --i;
+
+       si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
+                               mc_reg_table_data, si_pi->mc_reg_table.last,
+                               si_pi->mc_reg_table.valid_flag);
+}
+
+static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
+                                          struct amdgpu_ps *amdgpu_state,
+                                          SMC_SIslands_MCRegisters *mc_reg_table)
+{
+       struct si_ps *state = si_get_ps(amdgpu_state);
+       int i;
+
+       for (i = 0; i < state->performance_level_count; i++) {
+               si_convert_mc_reg_table_entry_to_smc(adev,
+                                                    &state->performance_levels[i],
+                                                    &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
+       }
+}
+
+static int si_populate_mc_reg_table(struct amdgpu_device *adev,
+                                   struct amdgpu_ps *amdgpu_boot_state)
+{
+       struct  si_ps *boot_state = si_get_ps(amdgpu_boot_state);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+       SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+       memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
+
+       si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
+
+       si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
+                                            &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
+
+       si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+                               &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
+                               si_pi->mc_reg_table.last,
+                               si_pi->mc_reg_table.valid_flag);
+
+       if (ulv->supported && ulv->pl.vddc != 0)
+               si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
+                                                    &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
+       else
+               si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+                                       &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
+                                       si_pi->mc_reg_table.last,
+                                       si_pi->mc_reg_table.valid_flag);
+
+       si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
+
+       return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
+                                          (u8 *)smc_mc_reg_table,
+                                          sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
+}
+
+static int si_upload_mc_reg_table(struct amdgpu_device *adev,
+                                 struct amdgpu_ps *amdgpu_new_state)
+{
+       struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 address = si_pi->mc_reg_table_start +
+               offsetof(SMC_SIslands_MCRegisters,
+                        data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
+       SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+       memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+       si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
+
+       return amdgpu_si_copy_bytes_to_smc(adev, address,
+                                          (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
+                                          sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
+                                          si_pi->sram_end);
+}
+
+static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
+{
+       if (enable)
+               WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+       else
+               WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+                                                     struct amdgpu_ps *amdgpu_state)
+{
+       struct si_ps *state = si_get_ps(amdgpu_state);
+       int i;
+       u16 pcie_speed, max_speed = 0;
+
+       for (i = 0; i < state->performance_level_count; i++) {
+               pcie_speed = state->performance_levels[i].pcie_gen;
+               if (max_speed < pcie_speed)
+                       max_speed = pcie_speed;
+       }
+       return max_speed;
+}
+
+static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
+{
+       u32 speed_cntl;
+
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+       speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+       return (u16)speed_cntl;
+}
+
+static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
+                                                            struct amdgpu_ps *amdgpu_new_state,
+                                                            struct amdgpu_ps *amdgpu_current_state)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+       enum amdgpu_pcie_gen current_link_speed;
+
+       if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+               current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
+       else
+               current_link_speed = si_pi->force_pcie_gen;
+
+       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+       si_pi->pspp_notify_required = false;
+       if (target_link_speed > current_link_speed) {
+               switch (target_link_speed) {
+#if defined(CONFIG_ACPI)
+               case AMDGPU_PCIE_GEN3:
+                       if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+                               break;
+                       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
+                       if (current_link_speed == AMDGPU_PCIE_GEN2)
+                               break;
+               case AMDGPU_PCIE_GEN2:
+                       if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+                               break;
+#endif
+               default:
+                       si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
+                       break;
+               }
+       } else {
+               if (target_link_speed < current_link_speed)
+                       si_pi->pspp_notify_required = true;
+       }
+}
+
+static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
+                                                          struct amdgpu_ps *amdgpu_new_state,
+                                                          struct amdgpu_ps *amdgpu_current_state)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+       u8 request;
+
+       if (si_pi->pspp_notify_required) {
+               if (target_link_speed == AMDGPU_PCIE_GEN3)
+                       request = PCIE_PERF_REQ_PECI_GEN3;
+               else if (target_link_speed == AMDGPU_PCIE_GEN2)
+                       request = PCIE_PERF_REQ_PECI_GEN2;
+               else
+                       request = PCIE_PERF_REQ_PECI_GEN1;
+
+               if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+                   (si_get_current_pcie_speed(adev) > 0))
+                       return;
+
+#if defined(CONFIG_ACPI)
+               amdgpu_acpi_pcie_performance_request(adev, request, false);
+#endif
+       }
+}
+
+#if 0
+static int si_ds_request(struct amdgpu_device *adev,
+                        bool ds_status_on, u32 count_write)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+
+       if (eg_pi->sclk_deep_sleep) {
+               if (ds_status_on)
+                       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
+                               PPSMC_Result_OK) ?
+                               0 : -EINVAL;
+               else
+                       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
+                               PPSMC_Result_OK) ? 0 : -EINVAL;
+       }
+       return 0;
+}
+#endif
+
+static void si_set_max_cu_value(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (adev->asic_type == CHIP_VERDE) {
+               switch (adev->pdev->device) {
+               case 0x6820:
+               case 0x6825:
+               case 0x6821:
+               case 0x6823:
+               case 0x6827:
+                       si_pi->max_cu = 10;
+                       break;
+               case 0x682D:
+               case 0x6824:
+               case 0x682F:
+               case 0x6826:
+                       si_pi->max_cu = 8;
+                       break;
+               case 0x6828:
+               case 0x6830:
+               case 0x6831:
+               case 0x6838:
+               case 0x6839:
+               case 0x683D:
+                       si_pi->max_cu = 10;
+                       break;
+               case 0x683B:
+               case 0x683F:
+               case 0x6829:
+                       si_pi->max_cu = 8;
+                       break;
+               default:
+                       si_pi->max_cu = 0;
+                       break;
+               }
+       } else {
+               si_pi->max_cu = 0;
+       }
+}
+
+static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
+                                                            struct amdgpu_clock_voltage_dependency_table *table)
+{
+       u32 i;
+       int j;
+       u16 leakage_voltage;
+
+       if (table) {
+               for (i = 0; i < table->count; i++) {
+                       switch (si_get_leakage_voltage_from_leakage_index(adev,
+                                                                         table->entries[i].v,
+                                                                         &leakage_voltage)) {
+                       case 0:
+                               table->entries[i].v = leakage_voltage;
+                               break;
+                       case -EAGAIN:
+                               return -EINVAL;
+                       case -EINVAL:
+                       default:
+                               break;
+                       }
+               }
+
+               for (j = (table->count - 2); j >= 0; j--) {
+                       table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
+                               table->entries[j].v : table->entries[j + 1].v;
+               }
+       }
+       return 0;
+}
+
+static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       ret = si_patch_single_dependency_table_based_on_leakage(adev,
+                                                               &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+       if (ret)
+               DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
+       ret = si_patch_single_dependency_table_based_on_leakage(adev,
+                                                               &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+       if (ret)
+               DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
+       ret = si_patch_single_dependency_table_based_on_leakage(adev,
+                                                               &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+       if (ret)
+               DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
+       return ret;
+}
+
+static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
+                                         struct amdgpu_ps *amdgpu_new_state,
+                                         struct amdgpu_ps *amdgpu_current_state)
+{
+       u32 lane_width;
+       u32 new_lane_width =
+               (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+       u32 current_lane_width =
+               (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+
+       if (new_lane_width != current_lane_width) {
+               amdgpu_set_pcie_lanes(adev, new_lane_width);
+               lane_width = amdgpu_get_pcie_lanes(adev);
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+       }
+}
+
+static void si_dpm_setup_asic(struct amdgpu_device *adev)
+{
+       si_read_clock_registers(adev);
+       si_enable_acpi_power_management(adev);
+}
+
+static int si_thermal_enable_alert(struct amdgpu_device *adev,
+                                  bool enable)
+{
+       u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+       if (enable) {
+               PPSMC_Result result;
+
+               thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+               WREG32(CG_THERMAL_INT, thermal_int);
+               result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+               if (result != PPSMC_Result_OK) {
+                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+                       return -EINVAL;
+               }
+       } else {
+               thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+               WREG32(CG_THERMAL_INT, thermal_int);
+       }
+
+       return 0;
+}
+
+static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
+                                           int min_temp, int max_temp)
+{
+       int low_temp = 0 * 1000;
+       int high_temp = 255 * 1000;
+
+       if (low_temp < min_temp)
+               low_temp = min_temp;
+       if (high_temp > max_temp)
+               high_temp = max_temp;
+       if (high_temp < low_temp) {
+               DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+               return -EINVAL;
+       }
+
+       WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+       WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+       WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+       adev->pm.dpm.thermal.min_temp = low_temp;
+       adev->pm.dpm.thermal.max_temp = high_temp;
+
+       return 0;
+}
+
+static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+
+       if (si_pi->fan_ctrl_is_in_default_mode) {
+               tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+               si_pi->fan_ctrl_default_mode = tmp;
+               tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+               si_pi->t_min = tmp;
+               si_pi->fan_ctrl_is_in_default_mode = false;
+       }
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+       tmp |= TMIN(0);
+       WREG32(CG_FDO_CTRL2, tmp);
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+       tmp |= FDO_PWM_MODE(mode);
+       WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+       u32 duty100;
+       u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       u16 fdo_min, slope1, slope2;
+       u32 reference_clock, tmp;
+       int ret;
+       u64 tmp64;
+
+       if (!si_pi->fan_table_start) {
+               adev->pm.dpm.fan.ucode_fan_control = false;
+               return 0;
+       }
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+       if (duty100 == 0) {
+               adev->pm.dpm.fan.ucode_fan_control = false;
+               return 0;
+       }
+
+       tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (u16)tmp64;
+
+       t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
+       t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
+
+       pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
+       pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
+
+       slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
+       fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
+       fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
+       fan_table.slope1 = cpu_to_be16(slope1);
+       fan_table.slope2 = cpu_to_be16(slope2);
+       fan_table.fdo_min = cpu_to_be16(fdo_min);
+       fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
+       fan_table.hys_up = cpu_to_be16(1);
+       fan_table.hys_slope = cpu_to_be16(1);
+       fan_table.temp_resp_lim = cpu_to_be16(5);
+       reference_clock = amdgpu_asic_get_xclk(adev);
+
+       fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
+                                               reference_clock) / 1600);
+       fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+       tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+       fan_table.temp_src = (uint8_t)tmp;
+
+       ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                         si_pi->fan_table_start,
+                                         (u8 *)(&fan_table),
+                                         sizeof(fan_table),
+                                         si_pi->sram_end);
+
+       if (ret) {
+               DRM_ERROR("Failed to load fan table to the SMC.");
+               adev->pm.dpm.fan.ucode_fan_control = false;
+       }
+
+       return ret;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PPSMC_Result ret;
+
+       ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
+       if (ret == PPSMC_Result_OK) {
+               si_pi->fan_is_controlled_by_smc = true;
+               return 0;
+       } else {
+               return -EINVAL;
+       }
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PPSMC_Result ret;
+
+       ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
+
+       if (ret == PPSMC_Result_OK) {
+               si_pi->fan_is_controlled_by_smc = false;
+               return 0;
+       } else {
+               return -EINVAL;
+       }
+}
+
+static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+                                     u32 *speed)
+{
+       u32 duty, duty100;
+       u64 tmp64;
+
+       if (adev->pm.no_fan)
+               return -ENOENT;
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+       duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (u64)duty * 100;
+       do_div(tmp64, duty100);
+       *speed = (u32)tmp64;
+
+       if (*speed > 100)
+               *speed = 100;
+
+       return 0;
+}
+
+static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+                                     u32 speed)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+       u32 duty, duty100;
+       u64 tmp64;
+
+       if (adev->pm.no_fan)
+               return -ENOENT;
+
+       if (si_pi->fan_is_controlled_by_smc)
+               return -EINVAL;
+
+       if (speed > 100)
+               return -EINVAL;
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (u64)speed * duty100;
+       do_div(tmp64, 100);
+       duty = (u32)tmp64;
+
+       tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+       tmp |= FDO_STATIC_DUTY(duty);
+       WREG32(CG_FDO_CTRL0, tmp);
+
+       return 0;
+}
+
+static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+{
+       if (mode) {
+               /* stop auto-manage */
+               if (adev->pm.dpm.fan.ucode_fan_control)
+                       si_fan_ctrl_stop_smc_fan_control(adev);
+               si_fan_ctrl_set_static_mode(adev, mode);
+       } else {
+               /* restart auto-manage */
+               if (adev->pm.dpm.fan.ucode_fan_control)
+                       si_thermal_start_smc_fan_control(adev);
+               else
+                       si_fan_ctrl_set_default_mode(adev);
+       }
+}
+
+static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+
+       if (si_pi->fan_is_controlled_by_smc)
+               return 0;
+
+       tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+       return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
+                                        u32 *speed)
+{
+       u32 tach_period;
+       u32 xclk = amdgpu_asic_get_xclk(adev);
+
+       if (adev->pm.no_fan)
+               return -ENOENT;
+
+       if (adev->pm.fan_pulses_per_revolution == 0)
+               return -ENOENT;
+
+       tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+       if (tach_period == 0)
+               return -ENOENT;
+
+       *speed = 60 * xclk * 10000 / tach_period;
+
+       return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
+                                        u32 speed)
+{
+       u32 tach_period, tmp;
+       u32 xclk = amdgpu_asic_get_xclk(adev);
+
+       if (adev->pm.no_fan)
+               return -ENOENT;
+
+       if (adev->pm.fan_pulses_per_revolution == 0)
+               return -ENOENT;
+
+       if ((speed < adev->pm.fan_min_rpm) ||
+           (speed > adev->pm.fan_max_rpm))
+               return -EINVAL;
+
+       if (adev->pm.dpm.fan.ucode_fan_control)
+               si_fan_ctrl_stop_smc_fan_control(adev);
+
+       tach_period = 60 * xclk * 10000 / (8 * speed);
+       tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+       tmp |= TARGET_PERIOD(tach_period);
+       WREG32(CG_TACH_CTRL, tmp);
+
+       si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
+
+       return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+
+       if (!si_pi->fan_ctrl_is_in_default_mode) {
+               tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+               tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+               WREG32(CG_FDO_CTRL2, tmp);
+
+               tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+               tmp |= TMIN(si_pi->t_min);
+               WREG32(CG_FDO_CTRL2, tmp);
+               si_pi->fan_ctrl_is_in_default_mode = true;
+       }
+}
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
+{
+       if (adev->pm.dpm.fan.ucode_fan_control) {
+               si_fan_ctrl_start_smc_fan_control(adev);
+               si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
+       }
+}
+
+static void si_thermal_initialize(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       if (adev->pm.fan_pulses_per_revolution) {
+               tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+               tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
+               WREG32(CG_TACH_CTRL, tmp);
+       }
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+       tmp |= TACH_PWM_RESP_RATE(0x28);
+       WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
+{
+       int ret;
+
+       si_thermal_initialize(adev);
+       ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = si_thermal_enable_alert(adev, true);
+       if (ret)
+               return ret;
+       if (adev->pm.dpm.fan.ucode_fan_control) {
+               ret = si_halt_smc(adev);
+               if (ret)
+                       return ret;
+               ret = si_thermal_setup_fan_table(adev);
+               if (ret)
+                       return ret;
+               ret = si_resume_smc(adev);
+               if (ret)
+                       return ret;
+               si_thermal_start_smc_fan_control(adev);
+       }
+
+       return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
+{
+       if (!adev->pm.no_fan) {
+               si_fan_ctrl_set_default_mode(adev);
+               si_fan_ctrl_stop_smc_fan_control(adev);
+       }
+}
+
+static int si_dpm_enable(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+       int ret;
+
+       if (amdgpu_si_is_smc_running(adev))
+               return -EINVAL;
+       if (pi->voltage_control || si_pi->voltage_control_svi2)
+               si_enable_voltage_control(adev, true);
+       if (pi->mvdd_control)
+               si_get_mvdd_configuration(adev);
+       if (pi->voltage_control || si_pi->voltage_control_svi2) {
+               ret = si_construct_voltage_tables(adev);
+               if (ret) {
+                       DRM_ERROR("si_construct_voltage_tables failed\n");
+                       return ret;
+               }
+       }
+       if (eg_pi->dynamic_ac_timing) {
+               ret = si_initialize_mc_reg_table(adev);
+               if (ret)
+                       eg_pi->dynamic_ac_timing = false;
+       }
+       if (pi->dynamic_ss)
+               si_enable_spread_spectrum(adev, true);
+       if (pi->thermal_protection)
+               si_enable_thermal_protection(adev, true);
+       si_setup_bsp(adev);
+       si_program_git(adev);
+       si_program_tp(adev);
+       si_program_tpp(adev);
+       si_program_sstp(adev);
+       si_enable_display_gap(adev);
+       si_program_vc(adev);
+       ret = si_upload_firmware(adev);
+       if (ret) {
+               DRM_ERROR("si_upload_firmware failed\n");
+               return ret;
+       }
+       ret = si_process_firmware_header(adev);
+       if (ret) {
+               DRM_ERROR("si_process_firmware_header failed\n");
+               return ret;
+       }
+       ret = si_initial_switch_from_arb_f0_to_f1(adev);
+       if (ret) {
+               DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
+               return ret;
+       }
+       ret = si_init_smc_table(adev);
+       if (ret) {
+               DRM_ERROR("si_init_smc_table failed\n");
+               return ret;
+       }
+       ret = si_init_smc_spll_table(adev);
+       if (ret) {
+               DRM_ERROR("si_init_smc_spll_table failed\n");
+               return ret;
+       }
+       ret = si_init_arb_table_index(adev);
+       if (ret) {
+               DRM_ERROR("si_init_arb_table_index failed\n");
+               return ret;
+       }
+       if (eg_pi->dynamic_ac_timing) {
+               ret = si_populate_mc_reg_table(adev, boot_ps);
+               if (ret) {
+                       DRM_ERROR("si_populate_mc_reg_table failed\n");
+                       return ret;
+               }
+       }
+       ret = si_initialize_smc_cac_tables(adev);
+       if (ret) {
+               DRM_ERROR("si_initialize_smc_cac_tables failed\n");
+               return ret;
+       }
+       ret = si_initialize_hardware_cac_manager(adev);
+       if (ret) {
+               DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
+               return ret;
+       }
+       ret = si_initialize_smc_dte_tables(adev);
+       if (ret) {
+               DRM_ERROR("si_initialize_smc_dte_tables failed\n");
+               return ret;
+       }
+       ret = si_populate_smc_tdp_limits(adev, boot_ps);
+       if (ret) {
+               DRM_ERROR("si_populate_smc_tdp_limits failed\n");
+               return ret;
+       }
+       ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
+       if (ret) {
+               DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
+               return ret;
+       }
+       si_program_response_times(adev);
+       si_program_ds_registers(adev);
+       si_dpm_start_smc(adev);
+       ret = si_notify_smc_display_change(adev, false);
+       if (ret) {
+               DRM_ERROR("si_notify_smc_display_change failed\n");
+               return ret;
+       }
+       si_enable_sclk_control(adev, true);
+       si_start_dpm(adev);
+
+       si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+       si_thermal_start_thermal_controller(adev);
+       ni_update_current_ps(adev, boot_ps);
+
+       return 0;
+}
+
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+       int ret;
+
+       ret = si_thermal_enable_alert(adev, false);
+       if (ret)
+               return ret;
+       ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = si_thermal_enable_alert(adev, true);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static void si_dpm_disable(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+
+       if (!amdgpu_si_is_smc_running(adev))
+               return;
+       si_thermal_stop_thermal_controller(adev);
+       si_disable_ulv(adev);
+       si_clear_vc(adev);
+       if (pi->thermal_protection)
+               si_enable_thermal_protection(adev, false);
+       si_enable_power_containment(adev, boot_ps, false);
+       si_enable_smc_cac(adev, boot_ps, false);
+       si_enable_spread_spectrum(adev, false);
+       si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+       si_stop_dpm(adev);
+       si_reset_to_default(adev);
+       si_dpm_stop_smc(adev);
+       si_force_switch_to_arb_f0(adev);
+
+       ni_update_current_ps(adev, boot_ps);
+}
+
+static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+       struct amdgpu_ps *new_ps = &requested_ps;
+
+       ni_update_requested_ps(adev, new_ps);
+       si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
+
+       return 0;
+}
+
+static int si_power_control_set_level(struct amdgpu_device *adev)
+{
+       struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
+       int ret;
+
+       ret = si_restrict_performance_levels_before_switch(adev);
+       if (ret)
+               return ret;
+       ret = si_halt_smc(adev);
+       if (ret)
+               return ret;
+       ret = si_populate_smc_tdp_limits(adev, new_ps);
+       if (ret)
+               return ret;
+       ret = si_populate_smc_tdp_limits_2(adev, new_ps);
+       if (ret)
+               return ret;
+       ret = si_resume_smc(adev);
+       if (ret)
+               return ret;
+       ret = si_set_sw_state(adev);
+       if (ret)
+               return ret;
+       return 0;
+}
+
+static int si_dpm_set_power_state(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+       struct amdgpu_ps *old_ps = &eg_pi->current_rps;
+       int ret;
+
+       ret = si_disable_ulv(adev);
+       if (ret) {
+               DRM_ERROR("si_disable_ulv failed\n");
+               return ret;
+       }
+       ret = si_restrict_performance_levels_before_switch(adev);
+       if (ret) {
+               DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
+               return ret;
+       }
+       if (eg_pi->pcie_performance_request)
+               si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
+       ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
+       ret = si_enable_power_containment(adev, new_ps, false);
+       if (ret) {
+               DRM_ERROR("si_enable_power_containment failed\n");
+               return ret;
+       }
+       ret = si_enable_smc_cac(adev, new_ps, false);
+       if (ret) {
+               DRM_ERROR("si_enable_smc_cac failed\n");
+               return ret;
+       }
+       ret = si_halt_smc(adev);
+       if (ret) {
+               DRM_ERROR("si_halt_smc failed\n");
+               return ret;
+       }
+       ret = si_upload_sw_state(adev, new_ps);
+       if (ret) {
+               DRM_ERROR("si_upload_sw_state failed\n");
+               return ret;
+       }
+       ret = si_upload_smc_data(adev);
+       if (ret) {
+               DRM_ERROR("si_upload_smc_data failed\n");
+               return ret;
+       }
+       ret = si_upload_ulv_state(adev);
+       if (ret) {
+               DRM_ERROR("si_upload_ulv_state failed\n");
+               return ret;
+       }
+       if (eg_pi->dynamic_ac_timing) {
+               ret = si_upload_mc_reg_table(adev, new_ps);
+               if (ret) {
+                       DRM_ERROR("si_upload_mc_reg_table failed\n");
+                       return ret;
+               }
+       }
+       ret = si_program_memory_timing_parameters(adev, new_ps);
+       if (ret) {
+               DRM_ERROR("si_program_memory_timing_parameters failed\n");
+               return ret;
+       }
+       si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
+
+       ret = si_resume_smc(adev);
+       if (ret) {
+               DRM_ERROR("si_resume_smc failed\n");
+               return ret;
+       }
+       ret = si_set_sw_state(adev);
+       if (ret) {
+               DRM_ERROR("si_set_sw_state failed\n");
+               return ret;
+       }
+       ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
+       if (eg_pi->pcie_performance_request)
+               si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
+       ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
+       if (ret) {
+               DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
+               return ret;
+       }
+       ret = si_enable_smc_cac(adev, new_ps, true);
+       if (ret) {
+               DRM_ERROR("si_enable_smc_cac failed\n");
+               return ret;
+       }
+       ret = si_enable_power_containment(adev, new_ps, true);
+       if (ret) {
+               DRM_ERROR("si_enable_power_containment failed\n");
+               return ret;
+       }
+
+       ret = si_power_control_set_level(adev);
+       if (ret) {
+               DRM_ERROR("si_power_control_set_level failed\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+
+       ni_update_current_ps(adev, new_ps);
+}
+
+#if 0
+void si_dpm_reset_asic(struct amdgpu_device *adev)
+{
+       si_restrict_performance_levels_before_switch(adev);
+       si_disable_ulv(adev);
+       si_set_boot_state(adev);
+}
+#endif
+
+static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+       si_program_display_gap(adev);
+}
+
+
+static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+                                         struct amdgpu_ps *rps,
+                                         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+                                         u8 table_rev)
+{
+       rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+       rps->class = le16_to_cpu(non_clock_info->usClassification);
+       rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+       if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+               rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+               rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+       } else if (r600_is_uvd_state(rps->class, rps->class2)) {
+               rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+               rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+       } else {
+               rps->vclk = 0;
+               rps->dclk = 0;
+       }
+
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+               adev->pm.dpm.boot_ps = rps;
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+               adev->pm.dpm.uvd_ps = rps;
+}
+
+static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
+                                     struct amdgpu_ps *rps, int index,
+                                     union pplib_clock_info *clock_info)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct  si_ps *ps = si_get_ps(rps);
+       u16 leakage_voltage;
+       struct rv7xx_pl *pl = &ps->performance_levels[index];
+       int ret;
+
+       ps->performance_level_count = index + 1;
+
+       pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+       pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
+       pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+       pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
+
+       pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
+       pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
+       pl->flags = le32_to_cpu(clock_info->si.ulFlags);
+       pl->pcie_gen = r600_get_pcie_gen_support(adev,
+                                                si_pi->sys_pcie_mask,
+                                                si_pi->boot_pcie_gen,
+                                                clock_info->si.ucPCIEGen);
+
+       /* patch up vddc if necessary */
+       ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
+                                                       &leakage_voltage);
+       if (ret == 0)
+               pl->vddc = leakage_voltage;
+
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+               pi->acpi_vddc = pl->vddc;
+               eg_pi->acpi_vddci = pl->vddci;
+               si_pi->acpi_pcie_gen = pl->pcie_gen;
+       }
+
+       if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+           index == 0) {
+               /* XXX disable for A0 tahiti */
+               si_pi->ulv.supported = false;
+               si_pi->ulv.pl = *pl;
+               si_pi->ulv.one_pcie_lane_in_ulv = false;
+               si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+               si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
+               si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
+       }
+
+       if (pi->min_vddc_in_table > pl->vddc)
+               pi->min_vddc_in_table = pl->vddc;
+
+       if (pi->max_vddc_in_table < pl->vddc)
+               pi->max_vddc_in_table = pl->vddc;
+
+       /* patch up boot state */
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+               u16 vddc, vddci, mvdd;
+               amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
+               pl->mclk = adev->clock.default_mclk;
+               pl->sclk = adev->clock.default_sclk;
+               pl->vddc = vddc;
+               pl->vddci = vddci;
+               si_pi->mvdd_bootup_value = mvdd;
+       }
+
+       if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+           ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+       }
+}
+
+union pplib_power_state {
+       struct _ATOM_PPLIB_STATE v1;
+       struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static int si_parse_power_table(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+       union pplib_power_state *power_state;
+       int i, j, k, non_clock_array_index, clock_array_index;
+       union pplib_clock_info *clock_info;
+       struct _StateArray *state_array;
+       struct _ClockInfoArray *clock_info_array;
+       struct _NonClockInfoArray *non_clock_info_array;
+       union power_info *power_info;
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+       u16 data_offset;
+       u8 frev, crev;
+       u8 *power_state_offset;
+       struct  si_ps *ps;
+
+       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                  &frev, &crev, &data_offset))
+               return -EINVAL;
+       power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+       amdgpu_add_thermal_controller(adev);
+
+       state_array = (struct _StateArray *)
+               (mode_info->atom_context->bios + data_offset +
+                le16_to_cpu(power_info->pplib.usStateArrayOffset));
+       clock_info_array = (struct _ClockInfoArray *)
+               (mode_info->atom_context->bios + data_offset +
+                le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+       non_clock_info_array = (struct _NonClockInfoArray *)
+               (mode_info->atom_context->bios + data_offset +
+                le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+       adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+                                 state_array->ucNumEntries, GFP_KERNEL);
+       if (!adev->pm.dpm.ps)
+               return -ENOMEM;
+       power_state_offset = (u8 *)state_array->states;
+       for (i = 0; i < state_array->ucNumEntries; i++) {
+               u8 *idx;
+               power_state = (union pplib_power_state *)power_state_offset;
+               non_clock_array_index = power_state->v2.nonClockInfoIndex;
+               non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+                       &non_clock_info_array->nonClockInfo[non_clock_array_index];
+               ps = kzalloc(sizeof(struct  si_ps), GFP_KERNEL);
+               if (ps == NULL) {
+                       kfree(adev->pm.dpm.ps);
+                       return -ENOMEM;
+               }
+               adev->pm.dpm.ps[i].ps_priv = ps;
+               si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+                                             non_clock_info,
+                                             non_clock_info_array->ucEntrySize);
+               k = 0;
+               idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+               for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+                       clock_array_index = idx[j];
+                       if (clock_array_index >= clock_info_array->ucNumEntries)
+                               continue;
+                       if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
+                               break;
+                       clock_info = (union pplib_clock_info *)
+                               ((u8 *)&clock_info_array->clockInfo[0] +
+                                (clock_array_index * clock_info_array->ucEntrySize));
+                       si_parse_pplib_clock_info(adev,
+                                                 &adev->pm.dpm.ps[i], k,
+                                                 clock_info);
+                       k++;
+               }
+               power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+       }
+       adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+       /* fill in the vce power states */
+       for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+               u32 sclk, mclk;
+               clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+               clock_info = (union pplib_clock_info *)
+                       &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+               sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+               sclk |= clock_info->si.ucEngineClockHigh << 16;
+               mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+               mclk |= clock_info->si.ucMemoryClockHigh << 16;
+               adev->pm.dpm.vce_states[i].sclk = sclk;
+               adev->pm.dpm.vce_states[i].mclk = mclk;
+       }
+
+       return 0;
+}
+
+static int si_dpm_init(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi;
+       struct evergreen_power_info *eg_pi;
+       struct ni_power_info *ni_pi;
+       struct si_power_info *si_pi;
+       struct atom_clock_dividers dividers;
+       int ret;
+       u32 mask;
+
+       si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
+       if (si_pi == NULL)
+               return -ENOMEM;
+       adev->pm.dpm.priv = si_pi;
+       ni_pi = &si_pi->ni;
+       eg_pi = &ni_pi->eg;
+       pi = &eg_pi->rv7xx;
+
+       ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+       if (ret)
+               si_pi->sys_pcie_mask = 0;
+       else
+               si_pi->sys_pcie_mask = mask;
+       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+       si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
+
+       si_set_max_cu_value(adev);
+
+       rv770_get_max_vddc(adev);
+       si_get_leakage_vddc(adev);
+       si_patch_dependency_tables_based_on_leakage(adev);
+
+       pi->acpi_vddc = 0;
+       eg_pi->acpi_vddci = 0;
+       pi->min_vddc_in_table = 0;
+       pi->max_vddc_in_table = 0;
+
+       ret = amdgpu_get_platform_caps(adev);
+       if (ret)
+               return ret;
+
+       ret = amdgpu_parse_extended_power_table(adev);
+       if (ret)
+               return ret;
+
+       ret = si_parse_power_table(adev);
+       if (ret)
+               return ret;
+
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+               kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+       if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+               amdgpu_free_extended_power_table(adev);
+               return -ENOMEM;
+       }
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+       if (adev->pm.dpm.voltage_response_time == 0)
+               adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+       if (adev->pm.dpm.backbias_response_time == 0)
+               adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+       ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+                                            0, false, &dividers);
+       if (ret)
+               pi->ref_div = dividers.ref_div + 1;
+       else
+               pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+       eg_pi->smu_uvd_hs = false;
+
+       pi->mclk_strobe_mode_threshold = 40000;
+       if (si_is_special_1gb_platform(adev))
+               pi->mclk_stutter_mode_threshold = 0;
+       else
+               pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
+       pi->mclk_edc_enable_threshold = 40000;
+       eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+       ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
+
+       pi->voltage_control =
+               amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+                                           VOLTAGE_OBJ_GPIO_LUT);
+       if (!pi->voltage_control) {
+               si_pi->voltage_control_svi2 =
+                       amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+                                                   VOLTAGE_OBJ_SVID2);
+               if (si_pi->voltage_control_svi2)
+                       amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+                                                 &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+       }
+
+       pi->mvdd_control =
+               amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+                                           VOLTAGE_OBJ_GPIO_LUT);
+
+       eg_pi->vddci_control =
+               amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+                                           VOLTAGE_OBJ_GPIO_LUT);
+       if (!eg_pi->vddci_control)
+               si_pi->vddci_control_svi2 =
+                       amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+                                                   VOLTAGE_OBJ_SVID2);
+
+       si_pi->vddc_phase_shed_control =
+               amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+                                           VOLTAGE_OBJ_PHASE_LUT);
+
+       rv770_get_engine_memory_ss(adev);
+
+       pi->asi = RV770_ASI_DFLT;
+       pi->pasi = CYPRESS_HASI_DFLT;
+       pi->vrc = SISLANDS_VRC_DFLT;
+
+       pi->gfx_clock_gating = true;
+
+       eg_pi->sclk_deep_sleep = true;
+       si_pi->sclk_deep_sleep_above_low = false;
+
+       if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+               pi->thermal_protection = true;
+       else
+               pi->thermal_protection = false;
+
+       eg_pi->dynamic_ac_timing = true;
+
+       eg_pi->light_sleep = true;
+#if defined(CONFIG_ACPI)
+       eg_pi->pcie_performance_request =
+               amdgpu_acpi_is_pcie_performance_request_supported(adev);
+#else
+       eg_pi->pcie_performance_request = false;
+#endif
+
+       si_pi->sram_end = SMC_RAM_END;
+
+       adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+       adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+       adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+       adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+       adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+       adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+       adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+       si_initialize_powertune_defaults(adev);
+
+       /* make sure dc limits are valid */
+       if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+           (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+                       adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+       si_pi->fan_ctrl_is_in_default_mode = true;
+
+       return 0;
+}
+
+static void si_dpm_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (adev->pm.dpm.ps)
+               for (i = 0; i < adev->pm.dpm.num_ps; i++)
+                       kfree(adev->pm.dpm.ps[i].ps_priv);
+       kfree(adev->pm.dpm.ps);
+       kfree(adev->pm.dpm.priv);
+       kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+       amdgpu_free_extended_power_table(adev);
+}
+
+static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+                                                   struct seq_file *m)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct amdgpu_ps *rps = &eg_pi->current_rps;
+       struct  si_ps *ps = si_get_ps(rps);
+       struct rv7xx_pl *pl;
+       u32 current_index =
+               (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+               CURRENT_STATE_INDEX_SHIFT;
+
+       if (current_index >= ps->performance_level_count) {
+               seq_printf(m, "invalid dpm profile %d\n", current_index);
+       } else {
+               pl = &ps->performance_levels[current_index];
+               seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+               seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+                          current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+       }
+}
+
+static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     unsigned type,
+                                     enum amdgpu_interrupt_state state)
+{
+       u32 cg_thermal_int;
+
+       switch (type) {
+       case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+                       cg_thermal_int |= THERM_INT_MASK_HIGH;
+                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+                       cg_thermal_int &= ~THERM_INT_MASK_HIGH;
+                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+                       break;
+               default:
+                       break;
+               }
+               break;
+
+       case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+                       cg_thermal_int |= THERM_INT_MASK_LOW;
+                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+                       cg_thermal_int &= ~THERM_INT_MASK_LOW;
+                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+                       break;
+               default:
+                       break;
+               }
+               break;
+
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+                                   struct amdgpu_irq_src *source,
+                                   struct amdgpu_iv_entry *entry)
+{
+       bool queue_thermal = false;
+
+       if (entry == NULL)
+               return -EINVAL;
+
+       switch (entry->src_id) {
+       case 230: /* thermal low to high */
+               DRM_DEBUG("IH: thermal low to high\n");
+               adev->pm.dpm.thermal.high_to_low = false;
+               queue_thermal = true;
+               break;
+       case 231: /* thermal high to low */
+               DRM_DEBUG("IH: thermal high to low\n");
+               adev->pm.dpm.thermal.high_to_low = true;
+               queue_thermal = true;
+               break;
+       default:
+               break;
+       }
+
+       if (queue_thermal)
+               schedule_work(&adev->pm.dpm.thermal.work);
+
+       return 0;
+}
+
+static int si_dpm_late_init(void *handle)
+{
+       int ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!amdgpu_dpm)
+               return 0;
+
+       /* init the sysfs and debugfs files late */
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               return ret;
+
+       ret = si_set_temperature_range(adev);
+       if (ret)
+               return ret;
+#if 0 //TODO ?
+       si_dpm_powergate_uvd(adev, true);
+#endif
+       return 0;
+}
+
+/**
+ * si_dpm_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dpm_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err;
+
+       DRM_DEBUG("\n");
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               chip_name = "tahiti";
+               break;
+       case CHIP_PITCAIRN:
+               if ((adev->pdev->revision == 0x81) ||
+                   (adev->pdev->device == 0x6810) ||
+                   (adev->pdev->device == 0x6811) ||
+                   (adev->pdev->device == 0x6816) ||
+                   (adev->pdev->device == 0x6817) ||
+                   (adev->pdev->device == 0x6806))
+                       chip_name = "pitcairn_k";
+               else
+                       chip_name = "pitcairn";
+               break;
+       case CHIP_VERDE:
+               if ((adev->pdev->revision == 0x81) ||
+                   (adev->pdev->revision == 0x83) ||
+                   (adev->pdev->revision == 0x87) ||
+                   (adev->pdev->device == 0x6820) ||
+                   (adev->pdev->device == 0x6821) ||
+                   (adev->pdev->device == 0x6822) ||
+                   (adev->pdev->device == 0x6823) ||
+                   (adev->pdev->device == 0x682A) ||
+                   (adev->pdev->device == 0x682B))
+                       chip_name = "verde_k";
+               else
+                       chip_name = "verde";
+               break;
+       case CHIP_OLAND:
+               if ((adev->pdev->revision == 0xC7) ||
+                   (adev->pdev->revision == 0x80) ||
+                   (adev->pdev->revision == 0x81) ||
+                   (adev->pdev->revision == 0x83) ||
+                   (adev->pdev->device == 0x6604) ||
+                   (adev->pdev->device == 0x6605))
+                       chip_name = "oland_k";
+               else
+                       chip_name = "oland";
+               break;
+       case CHIP_HAINAN:
+               if ((adev->pdev->revision == 0x81) ||
+                   (adev->pdev->revision == 0x83) ||
+                   (adev->pdev->revision == 0xC3) ||
+                   (adev->pdev->device == 0x6664) ||
+                   (adev->pdev->device == 0x6665) ||
+                   (adev->pdev->device == 0x6667))
+                       chip_name = "hainan_k";
+               else
+                       chip_name = "hainan";
+               break;
+       default: BUG();
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+       err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+       if (err) {
+               DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
+                         err, fw_name);
+               release_firmware(adev->pm.fw);
+               adev->pm.fw = NULL;
+       }
+       return err;
+
+}
+
+static int si_dpm_sw_init(void *handle)
+{
+       int ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+       if (ret)
+               return ret;
+
+       ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+       if (ret)
+               return ret;
+
+       /* default to balanced state */
+       adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+       adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+       adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+       adev->pm.default_sclk = adev->clock.default_sclk;
+       adev->pm.default_mclk = adev->clock.default_mclk;
+       adev->pm.current_sclk = adev->clock.default_sclk;
+       adev->pm.current_mclk = adev->clock.default_mclk;
+       adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+       if (amdgpu_dpm == 0)
+               return 0;
+
+       ret = si_dpm_init_microcode(adev);
+       if (ret)
+               return ret;
+
+       INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+       mutex_lock(&adev->pm.mutex);
+       ret = si_dpm_init(adev);
+       if (ret)
+               goto dpm_failed;
+       adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+       if (amdgpu_dpm == 1)
+               amdgpu_pm_print_power_states(adev);
+       mutex_unlock(&adev->pm.mutex);
+       DRM_INFO("amdgpu: dpm initialized\n");
+
+       return 0;
+
+dpm_failed:
+       si_dpm_fini(adev);
+       mutex_unlock(&adev->pm.mutex);
+       DRM_ERROR("amdgpu: dpm initialization failed\n");
+       return ret;
+}
+
+static int si_dpm_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       mutex_lock(&adev->pm.mutex);
+       amdgpu_pm_sysfs_fini(adev);
+       si_dpm_fini(adev);
+       mutex_unlock(&adev->pm.mutex);
+
+       return 0;
+}
+
+static int si_dpm_hw_init(void *handle)
+{
+       int ret;
+
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!amdgpu_dpm)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       si_dpm_setup_asic(adev);
+       ret = si_dpm_enable(adev);
+       if (ret)
+               adev->pm.dpm_enabled = false;
+       else
+               adev->pm.dpm_enabled = true;
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+static int si_dpm_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pm.dpm_enabled) {
+               mutex_lock(&adev->pm.mutex);
+               si_dpm_disable(adev);
+               mutex_unlock(&adev->pm.mutex);
+       }
+
+       return 0;
+}
+
+static int si_dpm_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pm.dpm_enabled) {
+               mutex_lock(&adev->pm.mutex);
+               /* disable dpm */
+               si_dpm_disable(adev);
+               /* reset the power state */
+               adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+               mutex_unlock(&adev->pm.mutex);
+       }
+       return 0;
+}
+
+static int si_dpm_resume(void *handle)
+{
+       int ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pm.dpm_enabled) {
+               /* asic init will reset to the boot state */
+               mutex_lock(&adev->pm.mutex);
+               si_dpm_setup_asic(adev);
+               ret = si_dpm_enable(adev);
+               if (ret)
+                       adev->pm.dpm_enabled = false;
+               else
+                       adev->pm.dpm_enabled = true;
+               mutex_unlock(&adev->pm.mutex);
+               if (adev->pm.dpm_enabled)
+                       amdgpu_pm_compute_clocks(adev);
+       }
+       return 0;
+}
+
+static bool si_dpm_is_idle(void *handle)
+{
+       /* XXX */
+       return true;
+}
+
+static int si_dpm_wait_for_idle(void *handle)
+{
+       /* XXX */
+       return 0;
+}
+
+static int si_dpm_soft_reset(void *handle)
+{
+       return 0;
+}
+
+static int si_dpm_set_clockgating_state(void *handle,
+                                       enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int si_dpm_set_powergating_state(void *handle,
+                                       enum amd_powergating_state state)
+{
+       return 0;
+}
+
+/* get temperature in millidegrees */
+static int si_dpm_get_temp(struct amdgpu_device *adev)
+{
+       u32 temp;
+       int actual_temp = 0;
+
+       temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+               CTF_TEMP_SHIFT;
+
+       if (temp & 0x200)
+               actual_temp = 255;
+       else
+               actual_temp = temp & 0x1ff;
+
+       actual_temp = (actual_temp * 1000);
+
+       return actual_temp;
+}
+
+static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+       if (low)
+               return requested_state->performance_levels[0].sclk;
+       else
+               return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+       if (low)
+               return requested_state->performance_levels[0].mclk;
+       else
+               return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
+static void si_dpm_print_power_state(struct amdgpu_device *adev,
+                                    struct amdgpu_ps *rps)
+{
+       struct  si_ps *ps = si_get_ps(rps);
+       struct rv7xx_pl *pl;
+       int i;
+
+       amdgpu_dpm_print_class_info(rps->class, rps->class2);
+       amdgpu_dpm_print_cap_info(rps->caps);
+       DRM_INFO("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+       for (i = 0; i < ps->performance_level_count; i++) {
+               pl = &ps->performance_levels[i];
+               if (adev->asic_type >= CHIP_TAHITI)
+                       DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+                                i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+               else
+                       DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+                                i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+       }
+       amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static int si_dpm_early_init(void *handle)
+{
+
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_dpm_set_dpm_funcs(adev);
+       si_dpm_set_irq_funcs(adev);
+       return 0;
+}
+
+
+const struct amd_ip_funcs si_dpm_ip_funcs = {
+       .name = "si_dpm",
+       .early_init = si_dpm_early_init,
+       .late_init = si_dpm_late_init,
+       .sw_init = si_dpm_sw_init,
+       .sw_fini = si_dpm_sw_fini,
+       .hw_init = si_dpm_hw_init,
+       .hw_fini = si_dpm_hw_fini,
+       .suspend = si_dpm_suspend,
+       .resume = si_dpm_resume,
+       .is_idle = si_dpm_is_idle,
+       .wait_for_idle = si_dpm_wait_for_idle,
+       .soft_reset = si_dpm_soft_reset,
+       .set_clockgating_state = si_dpm_set_clockgating_state,
+       .set_powergating_state = si_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+       .get_temperature = &si_dpm_get_temp,
+       .pre_set_power_state = &si_dpm_pre_set_power_state,
+       .set_power_state = &si_dpm_set_power_state,
+       .post_set_power_state = &si_dpm_post_set_power_state,
+       .display_configuration_changed = &si_dpm_display_configuration_changed,
+       .get_sclk = &si_dpm_get_sclk,
+       .get_mclk = &si_dpm_get_mclk,
+       .print_power_state = &si_dpm_print_power_state,
+       .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
+       .force_performance_level = &si_dpm_force_performance_level,
+       .vblank_too_short = &si_dpm_vblank_too_short,
+       .set_fan_control_mode = &si_dpm_set_fan_control_mode,
+       .get_fan_control_mode = &si_dpm_get_fan_control_mode,
+       .set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
+       .get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+};
+
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+       if (adev->pm.funcs == NULL)
+               adev->pm.funcs = &si_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
+       .set = si_dpm_set_interrupt_state,
+       .process = si_dpm_process_interrupt,
+};
+
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+       adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
new file mode 100644 (file)
index 0000000..51ce21c
--- /dev/null
@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SI_DPM_H__
+#define __SI_DPM_H__
+
+#include "amdgpu_atombios.h"
+#include "sislands_smc.h"
+
+#define MC_CG_CONFIG                                    0x96f
+#define MC_ARB_CG                                       0x9fa
+#define                CG_ARB_REQ(x)                           ((x) << 0)
+#define                CG_ARB_REQ_MASK                         (0xff << 0)
+
+#define        MC_ARB_DRAM_TIMING_1                            0x9fc
+#define        MC_ARB_DRAM_TIMING_2                            0x9fd
+#define        MC_ARB_DRAM_TIMING_3                            0x9fe
+#define        MC_ARB_DRAM_TIMING2_1                           0x9ff
+#define        MC_ARB_DRAM_TIMING2_2                           0xa00
+#define        MC_ARB_DRAM_TIMING2_3                           0xa01
+
+#define MAX_NO_OF_MVDD_VALUES 2
+#define MAX_NO_VREG_STEPS 32
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+#define RV770_ASI_DFLT                                1000
+#define CYPRESS_HASI_DFLT                               400000
+#define PCIE_PERF_REQ_PECI_GEN1         2
+#define PCIE_PERF_REQ_PECI_GEN2         3
+#define PCIE_PERF_REQ_PECI_GEN3         4
+#define RV770_DEFAULT_VCLK_FREQ  53300 /* 10 khz */
+#define RV770_DEFAULT_DCLK_FREQ  40000 /* 10 khz */
+
+#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
+
+#define RV770_SMC_TABLE_ADDRESS 0xB000
+#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE    3
+
+#define SMC_STROBE_RATIO    0x0F
+#define SMC_STROBE_ENABLE   0x10
+
+#define SMC_MC_EDC_RD_FLAG  0x01
+#define SMC_MC_EDC_WR_FLAG  0x02
+#define SMC_MC_RTT_ENABLE   0x04
+#define SMC_MC_STUTTER_EN   0x08
+
+#define RV770_SMC_VOLTAGEMASK_VDDC 0
+#define RV770_SMC_VOLTAGEMASK_MVDD 1
+#define RV770_SMC_VOLTAGEMASK_VDDCI 2
+#define RV770_SMC_VOLTAGEMASK_MAX  4
+
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define NISLANDS_SMC_STROBE_RATIO    0x0F
+#define NISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define NISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define NISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define NISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define NISLANDS_SMC_MC_STUTTER_EN   0x08
+
+#define MAX_NO_VREG_STEPS 32
+
+#define NISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define NISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define NISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT               0
+#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT                  1
+#define SISLANDS_MCREGISTERTABLE_ULV_SLOT                   2
+#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT     3
+
+#define SISLANDS_LEAKAGE_INDEX0     0xff01
+#define SISLANDS_MAX_LEAKAGE_COUNT  4
+
+#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
+#define SISLANDS_INITIAL_STATE_ARB_INDEX    0
+#define SISLANDS_ACPI_STATE_ARB_INDEX       1
+#define SISLANDS_ULV_STATE_ARB_INDEX        2
+#define SISLANDS_DRIVER_STATE_ARB_INDEX     3
+
+#define SISLANDS_DPM2_MAX_PULSE_SKIP        256
+
+#define SISLANDS_DPM2_NEAR_TDP_DEC          10
+#define SISLANDS_DPM2_ABOVE_SAFE_INC        5
+#define SISLANDS_DPM2_BELOW_SAFE_INC        20
+
+#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT            80
+
+#define SISLANDS_DPM2_MAXPS_PERCENT_H                   99
+#define SISLANDS_DPM2_MAXPS_PERCENT_M                   99
+
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
+#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER                 0x12
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
+#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE                  0x1E
+#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO                 0xF
+
+#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN         10
+
+#define SISLANDS_VRC_DFLT                               0xC000B3
+#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT             1687
+#define SISLANDS_CGULVPARAMETER_DFLT                    0x00040035
+#define SISLANDS_CGULVCONTROL_DFLT                      0x1f007550
+
+#define SI_ASI_DFLT                                10000
+#define SI_BSP_DFLT                                0x41EB
+#define SI_BSU_DFLT                                0x2
+#define SI_AH_DFLT                                 5
+#define SI_RLP_DFLT                                25
+#define SI_RMP_DFLT                                65
+#define SI_LHP_DFLT                                40
+#define SI_LMP_DFLT                                15
+#define SI_TD_DFLT                                 0
+#define SI_UTC_DFLT_00                             0x24
+#define SI_UTC_DFLT_01                             0x22
+#define SI_UTC_DFLT_02                             0x22
+#define SI_UTC_DFLT_03                             0x22
+#define SI_UTC_DFLT_04                             0x22
+#define SI_UTC_DFLT_05                             0x22
+#define SI_UTC_DFLT_06                             0x22
+#define SI_UTC_DFLT_07                             0x22
+#define SI_UTC_DFLT_08                             0x22
+#define SI_UTC_DFLT_09                             0x22
+#define SI_UTC_DFLT_10                             0x22
+#define SI_UTC_DFLT_11                             0x22
+#define SI_UTC_DFLT_12                             0x22
+#define SI_UTC_DFLT_13                             0x22
+#define SI_UTC_DFLT_14                             0x22
+#define SI_DTC_DFLT_00                             0x24
+#define SI_DTC_DFLT_01                             0x22
+#define SI_DTC_DFLT_02                             0x22
+#define SI_DTC_DFLT_03                             0x22
+#define SI_DTC_DFLT_04                             0x22
+#define SI_DTC_DFLT_05                             0x22
+#define SI_DTC_DFLT_06                             0x22
+#define SI_DTC_DFLT_07                             0x22
+#define SI_DTC_DFLT_08                             0x22
+#define SI_DTC_DFLT_09                             0x22
+#define SI_DTC_DFLT_10                             0x22
+#define SI_DTC_DFLT_11                             0x22
+#define SI_DTC_DFLT_12                             0x22
+#define SI_DTC_DFLT_13                             0x22
+#define SI_DTC_DFLT_14                             0x22
+#define SI_VRC_DFLT                                0x0000C003
+#define SI_VOLTAGERESPONSETIME_DFLT                1000
+#define SI_BACKBIASRESPONSETIME_DFLT               1000
+#define SI_VRU_DFLT                                0x3
+#define SI_SPLLSTEPTIME_DFLT                       0x1000
+#define SI_SPLLSTEPUNIT_DFLT                       0x3
+#define SI_TPU_DFLT                                0
+#define SI_TPC_DFLT                                0x200
+#define SI_SSTU_DFLT                               0
+#define SI_SST_DFLT                                0x00C8
+#define SI_GICST_DFLT                              0x200
+#define SI_FCT_DFLT                                0x0400
+#define SI_FCTU_DFLT                               0
+#define SI_CTXCGTT3DRPHC_DFLT                      0x20
+#define SI_CTXCGTT3DRSDC_DFLT                      0x40
+#define SI_VDDC3DOORPHC_DFLT                       0x100
+#define SI_VDDC3DOORSDC_DFLT                       0x7
+#define SI_VDDC3DOORSU_DFLT                        0
+#define SI_MPLLLOCKTIME_DFLT                       100
+#define SI_MPLLRESETTIME_DFLT                      150
+#define SI_VCOSTEPPCT_DFLT                          20
+#define SI_ENDINGVCOSTEPPCT_DFLT                    5
+#define SI_REFERENCEDIVIDER_DFLT                    4
+
+#define SI_PM_NUMBER_OF_TC 15
+#define SI_PM_NUMBER_OF_SCLKS 20
+#define SI_PM_NUMBER_OF_MCLKS 4
+#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define SI_TEMP_RANGE_MIN (90 * 1000)
+#define SI_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum ni_dc_cac_level
+{
+       NISLANDS_DCCAC_LEVEL_0 = 0,
+       NISLANDS_DCCAC_LEVEL_1,
+       NISLANDS_DCCAC_LEVEL_2,
+       NISLANDS_DCCAC_LEVEL_3,
+       NISLANDS_DCCAC_LEVEL_4,
+       NISLANDS_DCCAC_LEVEL_5,
+       NISLANDS_DCCAC_LEVEL_6,
+       NISLANDS_DCCAC_LEVEL_7,
+       NISLANDS_DCCAC_MAX_LEVELS
+};
+
+enum si_cac_config_reg_type
+{
+       SISLANDS_CACCONFIG_MMR = 0,
+       SISLANDS_CACCONFIG_CGIND,
+       SISLANDS_CACCONFIG_MAX
+};
+
+enum si_power_level {
+       SI_POWER_LEVEL_LOW = 0,
+       SI_POWER_LEVEL_MEDIUM = 1,
+       SI_POWER_LEVEL_HIGH = 2,
+       SI_POWER_LEVEL_CTXSW = 3,
+};
+
+enum si_td {
+       SI_TD_AUTO,
+       SI_TD_UP,
+       SI_TD_DOWN,
+};
+
+enum si_display_watermark {
+       SI_DISPLAY_WATERMARK_LOW = 0,
+       SI_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum si_display_gap
+{
+    SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    SI_PM_DISPLAY_GAP_VBLANK       = 1,
+    SI_PM_DISPLAY_GAP_WATERMARK    = 2,
+    SI_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+
+extern const struct amd_ip_funcs si_dpm_ip_funcs;
+
+struct ni_leakage_coeffients
+{
+       u32 at;
+       u32 bt;
+       u32 av;
+       u32 bv;
+       s32 t_slope;
+       s32 t_intercept;
+       u32 t_ref;
+};
+
+struct SMC_Evergreen_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
+
+struct evergreen_mc_reg_entry {
+       u32 mclk_max;
+       u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct evergreen_mc_reg_table {
+       u8 last;
+       u8 num_entries;
+       u16 valid_flag;
+       struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+       SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_Evergreen_MCRegisterSet
+{
+    uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
+
+struct SMC_Evergreen_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_Evergreen_MCRegisterAddress     address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+    SMC_Evergreen_MCRegisterSet         data[5];
+};
+
+typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+
+struct SMC_NIslands_MCRegisterSet
+{
+    uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
+
+struct ni_mc_reg_entry {
+       u32 mclk_max;
+       u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_NIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
+
+struct SMC_NIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_NIslands_MCRegisterAddress      address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_NIslands_MCRegisterSet          data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
+
+struct evergreen_ulv_param {
+       bool supported;
+       struct rv7xx_pl *pl;
+};
+
+struct evergreen_arb_registers {
+       u32 mc_arb_dram_timing;
+       u32 mc_arb_dram_timing2;
+       u32 mc_arb_rfsh_rate;
+       u32 mc_arb_burst_time;
+};
+
+struct at {
+       u32 rlp;
+       u32 rmp;
+       u32 lhp;
+       u32 lmp;
+};
+
+struct ni_clock_registers {
+       u32 cg_spll_func_cntl;
+       u32 cg_spll_func_cntl_2;
+       u32 cg_spll_func_cntl_3;
+       u32 cg_spll_func_cntl_4;
+       u32 cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2;
+       u32 mclk_pwrmgt_cntl;
+       u32 dll_cntl;
+       u32 mpll_ad_func_cntl;
+       u32 mpll_ad_func_cntl_2;
+       u32 mpll_dq_func_cntl;
+       u32 mpll_dq_func_cntl_2;
+       u32 mpll_ss1;
+       u32 mpll_ss2;
+};
+
+struct RV770_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
+
+struct RV770_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
+
+
+struct RV730_SMC_MCLK_VALUE
+{
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL2;
+    uint32_t        vMPLL_FUNC_CNTL3;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
+
+struct RV770_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
+
+union RV7XX_SMC_MCLK_VALUE
+{
+    RV770_SMC_MCLK_VALUE    mclk770;
+    RV730_SMC_MCLK_VALUE    mclk730;
+};
+
+typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
+
+struct RV770_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                 arbValue;
+    union{
+        uint8_t             seqValue;
+        uint8_t             ACIndex;
+    };
+    uint8_t                 displayWatermark;
+    uint8_t                 gen2PCIE;
+    uint8_t                 gen2XSP;
+    uint8_t                 backbias;
+    uint8_t                 strobeMode;
+    uint8_t                 mcFlags;
+    uint32_t                aT;
+    uint32_t                bSP;
+    RV770_SMC_SCLK_VALUE    sclk;
+    RV7XX_SMC_MCLK_VALUE    mclk;
+    RV770_SMC_VOLTAGE_VALUE vddc;
+    RV770_SMC_VOLTAGE_VALUE mvdd;
+    RV770_SMC_VOLTAGE_VALUE vddci;
+    uint8_t                 reserved1;
+    uint8_t                 reserved2;
+    uint8_t                 stateFlags;
+    uint8_t                 padding;
+};
+
+typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
+
+struct RV770_SMC_SWSTATE
+{
+    uint8_t           flags;
+    uint8_t           padding1;
+    uint8_t           padding2;
+    uint8_t           padding3;
+    RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
+
+struct RV770_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[RV770_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
+
+struct RV770_SMC_STATETABLE
+{
+    uint8_t             thermalProtectType;
+    uint8_t             systemFlags;
+    uint8_t             maxVDDCIndexInPPTable;
+    uint8_t             extraFlags;
+    uint8_t             highSMIO[MAX_NO_VREG_STEPS];
+    uint32_t            lowSMIO[MAX_NO_VREG_STEPS];
+    RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+    RV770_SMC_SWSTATE   initialState;
+    RV770_SMC_SWSTATE   ACPIState;
+    RV770_SMC_SWSTATE   driverState;
+    RV770_SMC_SWSTATE   ULVState;
+};
+
+typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
+
+struct vddc_table_entry {
+       u16 vddc;
+       u8 vddc_index;
+       u8 high_smio;
+       u32 low_smio;
+};
+
+struct rv770_clock_registers {
+       u32 cg_spll_func_cntl;
+       u32 cg_spll_func_cntl_2;
+       u32 cg_spll_func_cntl_3;
+       u32 cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2;
+       u32 mpll_ad_func_cntl;
+       u32 mpll_ad_func_cntl_2;
+       u32 mpll_dq_func_cntl;
+       u32 mpll_dq_func_cntl_2;
+       u32 mclk_pwrmgt_cntl;
+       u32 dll_cntl;
+       u32 mpll_ss1;
+       u32 mpll_ss2;
+};
+
+struct rv730_clock_registers {
+       u32 cg_spll_func_cntl;
+       u32 cg_spll_func_cntl_2;
+       u32 cg_spll_func_cntl_3;
+       u32 cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2;
+       u32 mclk_pwrmgt_cntl;
+       u32 dll_cntl;
+       u32 mpll_func_cntl;
+       u32 mpll_func_cntl2;
+       u32 mpll_func_cntl3;
+       u32 mpll_ss;
+       u32 mpll_ss2;
+};
+
+union r7xx_clock_registers {
+       struct rv770_clock_registers rv770;
+       struct rv730_clock_registers rv730;
+};
+
+struct rv7xx_power_info {
+       /* flags */
+       bool mem_gddr5;
+       bool pcie_gen2;
+       bool dynamic_pcie_gen2;
+       bool acpi_pcie_gen2;
+       bool boot_in_gen2;
+       bool voltage_control; /* vddc */
+       bool mvdd_control;
+       bool sclk_ss;
+       bool mclk_ss;
+       bool dynamic_ss;
+       bool gfx_clock_gating;
+       bool mg_clock_gating;
+       bool mgcgtssm;
+       bool power_gating;
+       bool thermal_protection;
+       bool display_gap;
+       bool dcodt;
+       bool ulps;
+       /* registers */
+       union r7xx_clock_registers clk_regs;
+       u32 s0_vid_lower_smio_cntl;
+       /* voltage */
+       u32 vddc_mask_low;
+       u32 mvdd_mask_low;
+       u32 mvdd_split_frequency;
+       u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
+       u16 max_vddc;
+       u16 max_vddc_in_table;
+       u16 min_vddc_in_table;
+       struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
+       u8 valid_vddc_entries;
+       /* dc odt */
+       u32 mclk_odt_threshold;
+       u8 odt_value_0[2];
+       u8 odt_value_1[2];
+       /* stored values */
+       u32 boot_sclk;
+       u16 acpi_vddc;
+       u32 ref_div;
+       u32 active_auto_throttle_sources;
+       u32 mclk_stutter_mode_threshold;
+       u32 mclk_strobe_mode_threshold;
+       u32 mclk_edc_enable_threshold;
+       u32 bsp;
+       u32 bsu;
+       u32 pbsp;
+       u32 pbsu;
+       u32 dsp;
+       u32 psp;
+       u32 asi;
+       u32 pasi;
+       u32 vrc;
+       u32 restricted_levels;
+       u32 rlp;
+       u32 rmp;
+       u32 lhp;
+       u32 lmp;
+       /* smc offsets */
+       u16 state_table_start;
+       u16 soft_regs_start;
+       u16 sram_end;
+       /* scratch structs */
+       RV770_SMC_STATETABLE smc_statetable;
+};
+
+struct rv7xx_pl {
+       u32 sclk;
+       u32 mclk;
+       u16 vddc;
+       u16 vddci; /* eg+ only */
+       u32 flags;
+       enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+};
+
+struct rv7xx_ps {
+       struct rv7xx_pl high;
+       struct rv7xx_pl medium;
+       struct rv7xx_pl low;
+       bool dc_compatible;
+};
+
+struct si_ps {
+       u16 performance_level_count;
+       bool dc_compatible;
+       struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+struct ni_mc_reg_table {
+       u8 last;
+       u8 num_entries;
+       u16 valid_flag;
+       struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+       SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ni_cac_data
+{
+       struct ni_leakage_coeffients leakage_coefficients;
+       u32 i_leakage;
+       s32 leakage_minimum_temperature;
+       u32 pwr_const;
+       u32 dc_cac_value;
+       u32 bif_cac_value;
+       u32 lkge_pwr;
+       u8 mc_wr_weight;
+       u8 mc_rd_weight;
+       u8 allow_ovrflw;
+       u8 num_win_tdp;
+       u8 l2num_win_tdp;
+       u8 lts_truncate_n;
+};
+
+struct evergreen_power_info {
+       /* must be first! */
+       struct rv7xx_power_info rv7xx;
+       /* flags */
+       bool vddci_control;
+       bool dynamic_ac_timing;
+       bool abm;
+       bool mcls;
+       bool light_sleep;
+       bool memory_transition;
+       bool pcie_performance_request;
+       bool pcie_performance_request_registered;
+       bool sclk_deep_sleep;
+       bool dll_default_on;
+       bool ls_clock_gating;
+       bool smu_uvd_hs;
+       bool uvd_enabled;
+       /* stored values */
+       u16 acpi_vddci;
+       u8 mvdd_high_index;
+       u8 mvdd_low_index;
+       u32 mclk_edc_wr_enable_threshold;
+       struct evergreen_mc_reg_table mc_reg_table;
+       struct atom_voltage_table vddc_voltage_table;
+       struct atom_voltage_table vddci_voltage_table;
+       struct evergreen_arb_registers bootup_arb_registers;
+       struct evergreen_ulv_param ulv;
+       struct at ats[2];
+       /* smc offsets */
+       u16 mc_reg_table_start;
+       struct amdgpu_ps current_rps;
+       struct rv7xx_ps current_ps;
+       struct amdgpu_ps requested_rps;
+       struct rv7xx_ps requested_ps;
+};
+
+struct PP_NIslands_Dpm2PerfLevel
+{
+    uint8_t     MaxPS;
+    uint8_t     TgtAct;
+    uint8_t     MaxPS_StepInc;
+    uint8_t     MaxPS_StepDec;
+    uint8_t     PSST;
+    uint8_t     NearTDPDec;
+    uint8_t     AboveSafeInc;
+    uint8_t     BelowSafeInc;
+    uint8_t     PSDeltaLimit;
+    uint8_t     PSDeltaWin;
+    uint8_t     Reserved[6];
+};
+
+typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
+
+struct PP_NIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+};
+typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
+
+struct NISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_FUNC_CNTL_4;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
+
+struct NISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL_1;
+    uint32_t        vMPLL_FUNC_CNTL_2;
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
+
+struct NISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
+
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     arbValue;
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     reserved1;
+    uint8_t                     reserved2;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    NISLANDS_SMC_SCLK_VALUE     sclk;
+    NISLANDS_SMC_MCLK_VALUE     mclk;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    NISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    NISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint32_t                    powergate_en;
+    uint8_t                     hUp;
+    uint8_t                     hDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    reserved[2];
+    PP_NIslands_Dpm2PerfLevel   dpm2;
+};
+
+typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct NISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+
+struct NISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define NISLANDS_MAX_NO_VREG_STEPS 32
+
+struct NISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint8_t                             highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    uint32_t                            lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    NISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    PP_NIslands_DPM2Parameters          dpm2Params;
+    NISLANDS_SMC_SWSTATE                initialState;
+    NISLANDS_SMC_SWSTATE                ACPIState;
+    NISLANDS_SMC_SWSTATE                ULVState;
+    NISLANDS_SMC_SWSTATE                driverState;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
+
+struct ni_power_info {
+       /* must be first! */
+       struct evergreen_power_info eg;
+       struct ni_clock_registers clock_registers;
+       struct ni_mc_reg_table mc_reg_table;
+       u32 mclk_rtt_mode_threshold;
+       /* flags */
+       bool use_power_boost_limit;
+       bool support_cac_long_term_average;
+       bool cac_enabled;
+       bool cac_configuration_required;
+       bool driver_calculate_cac_leakage;
+       bool pc_enabled;
+       bool enable_power_containment;
+       bool enable_cac;
+       bool enable_sq_ramping;
+       /* smc offsets */
+       u16 arb_table_start;
+       u16 fan_table_start;
+       u16 cac_table_start;
+       u16 spll_table_start;
+       /* CAC stuff */
+       struct ni_cac_data cac_data;
+       u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
+       const struct ni_cac_weights *cac_weights;
+       u8 lta_window_size;
+       u8 lts_truncate;
+       struct si_ps current_ps;
+       struct si_ps requested_ps;
+       /* scratch structs */
+       SMC_NIslands_MCRegisters smc_mc_reg_table;
+       NISLANDS_SMC_STATETABLE smc_statetable;
+};
+
+struct si_cac_config_reg
+{
+       u32 offset;
+       u32 mask;
+       u32 shift;
+       u32 value;
+       enum si_cac_config_reg_type type;
+};
+
+struct si_powertune_data
+{
+       u32 cac_window;
+       u32 l2_lta_window_size_default;
+       u8 lts_truncate_default;
+       u8 shift_n_default;
+       u8 operating_temp;
+       struct ni_leakage_coeffients leakage_coefficients;
+       u32 fixed_kt;
+       u32 lkge_lut_v0_percent;
+       u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
+       bool enable_powertune_by_default;
+};
+
+struct si_dyn_powertune_data
+{
+       u32 cac_leakage;
+       s32 leakage_minimum_temperature;
+       u32 wintime;
+       u32 l2_lta_window_size;
+       u8 lts_truncate;
+       u8 shift_n;
+       u8 dc_pwr_value;
+       bool disable_uvd_powertune;
+};
+
+struct si_dte_data
+{
+       u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+       u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+       u32 k;
+       u32 t0;
+       u32 max_t;
+       u8 window_size;
+       u8 temp_select;
+       u8 dte_mode;
+       u8 tdep_count;
+       u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+       u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+       u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+       u32 t_threshold;
+       bool enable_dte_by_default;
+};
+
+struct si_clock_registers {
+       u32 cg_spll_func_cntl;
+       u32 cg_spll_func_cntl_2;
+       u32 cg_spll_func_cntl_3;
+       u32 cg_spll_func_cntl_4;
+       u32 cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2;
+       u32 dll_cntl;
+       u32 mclk_pwrmgt_cntl;
+       u32 mpll_ad_func_cntl;
+       u32 mpll_dq_func_cntl;
+       u32 mpll_func_cntl;
+       u32 mpll_func_cntl_1;
+       u32 mpll_func_cntl_2;
+       u32 mpll_ss1;
+       u32 mpll_ss2;
+};
+
+struct si_mc_reg_entry {
+       u32 mclk_max;
+       u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_mc_reg_table {
+       u8 last;
+       u8 num_entries;
+       u16 valid_flag;
+       struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+       SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_leakage_voltage_entry
+{
+       u16 voltage;
+       u16 leakage_index;
+};
+
+struct si_leakage_voltage
+{
+       u16 count;
+       struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+
+struct si_ulv_param {
+       bool supported;
+       u32 cg_ulv_control;
+       u32 cg_ulv_parameter;
+       u32 volt_change_delay;
+       struct rv7xx_pl pl;
+       bool one_pcie_lane_in_ulv;
+};
+
+struct si_power_info {
+       /* must be first! */
+       struct ni_power_info ni;
+       struct si_clock_registers clock_registers;
+       struct si_mc_reg_table mc_reg_table;
+       struct atom_voltage_table mvdd_voltage_table;
+       struct atom_voltage_table vddc_phase_shed_table;
+       struct si_leakage_voltage leakage_voltage;
+       u16 mvdd_bootup_value;
+       struct si_ulv_param ulv;
+       u32 max_cu;
+       /* pcie gen */
+       enum amdgpu_pcie_gen force_pcie_gen;
+       enum amdgpu_pcie_gen boot_pcie_gen;
+       enum amdgpu_pcie_gen acpi_pcie_gen;
+       u32 sys_pcie_mask;
+       /* flags */
+       bool enable_dte;
+       bool enable_ppm;
+       bool vddc_phase_shed_control;
+       bool pspp_notify_required;
+       bool sclk_deep_sleep_above_low;
+       bool voltage_control_svi2;
+       bool vddci_control_svi2;
+       /* smc offsets */
+       u32 sram_end;
+       u32 state_table_start;
+       u32 soft_regs_start;
+       u32 mc_reg_table_start;
+       u32 arb_table_start;
+       u32 cac_table_start;
+       u32 dte_table_start;
+       u32 spll_table_start;
+       u32 papm_cfg_table_start;
+       u32 fan_table_start;
+       /* CAC stuff */
+       const struct si_cac_config_reg *cac_weights;
+       const struct si_cac_config_reg *lcac_config;
+       const struct si_cac_config_reg *cac_override;
+       const struct si_powertune_data *powertune_data;
+       struct si_dyn_powertune_data dyn_powertune_data;
+       /* DTE stuff */
+       struct si_dte_data dte_data;
+       /* scratch structs */
+       SMC_SIslands_MCRegisters smc_mc_reg_table;
+       SISLANDS_SMC_STATETABLE smc_statetable;
+       PP_SIslands_PAPMParameters papm_parm;
+       /* SVI2 */
+       u8 svd_gpio_id;
+       u8 svc_gpio_id;
+       /* fan control */
+       bool fan_ctrl_is_in_default_mode;
+       u32 t_min;
+       u32 fan_ctrl_default_mode;
+       bool fan_is_controlled_by_smc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
new file mode 100644 (file)
index 0000000..8fae3d4
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "si/sid.h"
+#include "si_ih.h"
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+static void si_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_cntl = RREG32(IH_CNTL);
+       u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+       ih_cntl |= ENABLE_INTR;
+       ih_rb_cntl |= IH_RB_ENABLE;
+       WREG32(IH_CNTL, ih_cntl);
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       adev->irq.ih.enabled = true;
+}
+  
+static void si_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+       u32 ih_cntl = RREG32(IH_CNTL);
+
+       ih_rb_cntl &= ~IH_RB_ENABLE;
+       ih_cntl &= ~ENABLE_INTR;
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       WREG32(IH_CNTL, ih_cntl);
+       WREG32(IH_RB_RPTR, 0);
+       WREG32(IH_RB_WPTR, 0);
+       adev->irq.ih.enabled = false;
+       adev->irq.ih.rptr = 0;
+}
+
+static int si_ih_irq_init(struct amdgpu_device *adev)
+{
+       int rb_bufsz;
+       u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+       u64 wptr_off;
+
+       si_ih_disable_interrupts(adev);
+       WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+       interrupt_cntl = RREG32(INTERRUPT_CNTL);
+       interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+       interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+       WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+       WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+       rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+
+       ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
+                    IH_WPTR_OVERFLOW_CLEAR |
+                    (rb_bufsz << 1) |
+                    IH_WPTR_WRITEBACK_ENABLE;
+
+       wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+       WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+       WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       WREG32(IH_RB_RPTR, 0);
+       WREG32(IH_RB_WPTR, 0);
+
+       ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+       if (adev->irq.msi_enabled)
+               ih_cntl |= RPTR_REARM;
+       WREG32(IH_CNTL, ih_cntl);
+
+       pci_set_master(adev->pdev);
+       si_ih_enable_interrupts(adev);
+
+       return 0;
+}
+
+static void si_ih_irq_disable(struct amdgpu_device *adev)
+{
+       si_ih_disable_interrupts(adev);
+       mdelay(1);
+}
+
+static u32 si_ih_get_wptr(struct amdgpu_device *adev)
+{
+       u32 wptr, tmp;
+
+       wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+       if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
+               wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
+               dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                       wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+               adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+               tmp = RREG32(IH_RB_CNTL);
+               tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+               WREG32(IH_RB_CNTL, tmp);
+       }
+       return (wptr & adev->irq.ih.ptr_mask);
+}
+
+static void si_ih_decode_iv(struct amdgpu_device *adev,
+                            struct amdgpu_iv_entry *entry)
+{
+       u32 ring_index = adev->irq.ih.rptr >> 2;
+       uint32_t dw[4];
+
+       dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+       dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+       dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+       dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+       entry->src_id = dw[0] & 0xff;
+       entry->src_data = dw[1] & 0xfffffff;
+       entry->ring_id = dw[2] & 0xff;
+       entry->vm_id = (dw[2] >> 8) & 0xff;
+
+       adev->irq.ih.rptr += 16;
+}
+
+static void si_ih_set_rptr(struct amdgpu_device *adev)
+{
+       WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int si_ih_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_ih_set_interrupt_funcs(adev);
+
+       return 0;
+}
+
+static int si_ih_sw_init(void *handle)
+{
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+       if (r)
+               return r;
+
+       return amdgpu_irq_init(adev);
+}
+
+static int si_ih_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_irq_fini(adev);
+       amdgpu_ih_ring_fini(adev);
+
+       return 0;
+}
+
+static int si_ih_hw_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_ih_irq_init(adev);
+}
+
+static int si_ih_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_ih_irq_disable(adev);
+
+       return 0;
+}
+
+static int si_ih_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_ih_hw_fini(adev);
+}
+
+static int si_ih_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_ih_hw_init(adev);
+}
+
+static bool si_ih_is_idle(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 tmp = RREG32(SRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+               return false;
+
+       return true;
+}
+
+static int si_ih_wait_for_idle(void *handle)
+{
+       unsigned i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (si_ih_is_idle(handle))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static int si_ih_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(SRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+       if (srbm_soft_reset) {
+               tmp = RREG32(SRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(SRBM_SOFT_RESET, tmp);
+               tmp = RREG32(SRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(SRBM_SOFT_RESET, tmp);
+               tmp = RREG32(SRBM_SOFT_RESET);
+
+               udelay(50);
+       }
+
+       return 0;
+}
+
+static int si_ih_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int si_ih_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs si_ih_ip_funcs = {
+       .name = "si_ih",
+       .early_init = si_ih_early_init,
+       .late_init = NULL,
+       .sw_init = si_ih_sw_init,
+       .sw_fini = si_ih_sw_fini,
+       .hw_init = si_ih_hw_init,
+       .hw_fini = si_ih_hw_fini,
+       .suspend = si_ih_suspend,
+       .resume = si_ih_resume,
+       .is_idle = si_ih_is_idle,
+       .wait_for_idle = si_ih_wait_for_idle,
+       .soft_reset = si_ih_soft_reset,
+       .set_clockgating_state = si_ih_set_clockgating_state,
+       .set_powergating_state = si_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs si_ih_funcs = {
+       .get_wptr = si_ih_get_wptr,
+       .decode_iv = si_ih_decode_iv,
+       .set_rptr = si_ih_set_rptr
+};
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+       if (adev->irq.ih_funcs == NULL)
+               adev->irq.ih_funcs = &si_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
new file mode 100644 (file)
index 0000000..f3e3a95
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_IH_H__
+#define __SI_IH_H__
+
+extern const struct amd_ip_funcs si_ih_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
new file mode 100644 (file)
index 0000000..668ba99
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "si/sid.h"
+#include "ppsmc.h"
+#include "amdgpu_ucode.h"
+#include "sislands_smc.h"
+
+static int si_set_smc_sram_address(struct amdgpu_device *adev,
+                                  u32 smc_address, u32 limit)
+{
+       if (smc_address & 3)
+               return -EINVAL;
+       if ((smc_address + 3) > limit)
+               return -EINVAL;
+
+       WREG32(SMC_IND_INDEX_0, smc_address);
+       WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+       return 0;
+}
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+                               u32 smc_start_address,
+                               const u8 *src, u32 byte_count, u32 limit)
+{
+       unsigned long flags;
+       int ret = 0;
+       u32 data, original_data, addr, extra_shift;
+
+       if (smc_start_address & 3)
+               return -EINVAL;
+       if ((smc_start_address + byte_count) > limit)
+               return -EINVAL;
+
+       addr = smc_start_address;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       while (byte_count >= 4) {
+               /* SMC address space is BE */
+               data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+               ret = si_set_smc_sram_address(adev, addr, limit);
+               if (ret)
+                       goto done;
+
+               WREG32(SMC_IND_DATA_0, data);
+
+               src += 4;
+               byte_count -= 4;
+               addr += 4;
+       }
+
+       /* RMW for the final bytes */
+       if (byte_count > 0) {
+               data = 0;
+
+               ret = si_set_smc_sram_address(adev, addr, limit);
+               if (ret)
+                       goto done;
+
+               original_data = RREG32(SMC_IND_DATA_0);
+               extra_shift = 8 * (4 - byte_count);
+
+               while (byte_count > 0) {
+                       /* SMC address space is BE */
+                       data = (data << 8) + *src++;
+                       byte_count--;
+               }
+
+               data <<= extra_shift;
+               data |= (original_data & ~((~0UL) << extra_shift));
+
+               ret = si_set_smc_sram_address(adev, addr, limit);
+               if (ret)
+                       goto done;
+
+               WREG32(SMC_IND_DATA_0, data);
+       }
+
+done:
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return ret;
+}
+
+void amdgpu_si_start_smc(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+       tmp &= ~RST_REG;
+
+       WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void amdgpu_si_reset_smc(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       RREG32(CB_CGTT_SCLK_CTRL);
+       RREG32(CB_CGTT_SCLK_CTRL);
+       RREG32(CB_CGTT_SCLK_CTRL);
+       RREG32(CB_CGTT_SCLK_CTRL);
+
+       tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
+             RST_REG;
+       WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
+{
+       static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+
+       return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
+{
+       u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+       if (enable)
+               tmp &= ~CK_DISABLE;
+       else
+               tmp |= CK_DISABLE;
+
+       WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
+{
+       u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+       u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+       if (!(rst & RST_REG) && !(clk & CK_DISABLE))
+               return true;
+
+       return false;
+}
+
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
+                                      PPSMC_Msg msg)
+{
+       u32 tmp;
+       int i;
+
+       if (!amdgpu_si_is_smc_running(adev))
+               return PPSMC_Result_Failed;
+
+       WREG32(SMC_MESSAGE_0, msg);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(SMC_RESP_0);
+               if (tmp != 0)
+                       break;
+               udelay(1);
+       }
+
+       return (PPSMC_Result)RREG32(SMC_RESP_0);
+}
+
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+       u32 tmp;
+       int i;
+
+       if (!amdgpu_si_is_smc_running(adev))
+               return PPSMC_Result_OK;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+               if ((tmp & CKEN) == 0)
+                       break;
+               udelay(1);
+       }
+
+       return PPSMC_Result_OK;
+}
+
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
+{
+       const struct smc_firmware_header_v1_0 *hdr;
+       unsigned long flags;
+       u32 ucode_start_address;
+       u32 ucode_size;
+       const u8 *src;
+       u32 data;
+
+       if (!adev->pm.fw)
+               return -EINVAL;
+
+       hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+
+       amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+       adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+       ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+       src = (const u8 *)
+               (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+       if (ucode_size & 3)
+               return -EINVAL;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(SMC_IND_INDEX_0, ucode_start_address);
+       WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+       while (ucode_size >= 4) {
+               /* SMC address space is BE */
+               data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+               WREG32(SMC_IND_DATA_0, data);
+
+               src += 4;
+               ucode_size -= 4;
+       }
+       WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return 0;
+}
+
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                 u32 *value, u32 limit)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       ret = si_set_smc_sram_address(adev, smc_address, limit);
+       if (ret == 0)
+               *value = RREG32(SMC_IND_DATA_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return ret;
+}
+
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                  u32 value, u32 limit)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       ret = si_set_smc_sram_address(adev, smc_address, limit);
+       if (ret == 0)
+               WREG32(SMC_IND_DATA_0, value);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
new file mode 100644 (file)
index 0000000..ee4b846
--- /dev/null
@@ -0,0 +1,422 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_SISLANDS_SMC_H
+#define PP_SISLANDS_SMC_H
+
+#include "ppsmc.h"
+
+#pragma pack(push, 1)
+
+#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+
+struct PP_SIslands_Dpm2PerfLevel
+{
+    uint8_t MaxPS;
+    uint8_t TgtAct;
+    uint8_t MaxPS_StepInc;
+    uint8_t MaxPS_StepDec;
+    uint8_t PSSamplingTime;
+    uint8_t NearTDPDec;
+    uint8_t AboveSafeInc;
+    uint8_t BelowSafeInc;
+    uint8_t PSDeltaLimit;
+    uint8_t PSDeltaWin;
+    uint16_t PwrEfficiencyRatio;
+    uint8_t Reserved[4];
+};
+
+typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
+
+struct PP_SIslands_DPM2Status
+{
+    uint32_t    dpm2Flags;
+    uint8_t     CurrPSkip;
+    uint8_t     CurrPSkipPowerShift;
+    uint8_t     CurrPSkipTDP;
+    uint8_t     CurrPSkipOCP;
+    uint8_t     MaxSPLLIndex;
+    uint8_t     MinSPLLIndex;
+    uint8_t     CurrSPLLIndex;
+    uint8_t     InfSweepMode;
+    uint8_t     InfSweepDir;
+    uint8_t     TDPexceeded;
+    uint8_t     reserved;
+    uint8_t     SwitchDownThreshold;
+    uint32_t    SwitchDownCounter;
+    uint32_t    SysScalingFactor;
+};
+
+typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
+
+struct PP_SIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+    uint32_t    MinLimitDelta;
+};
+typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
+
+struct PP_SIslands_PAPMStatus
+{
+    uint32_t    EstimatedDGPU_T;
+    uint32_t    EstimatedDGPU_P;
+    uint32_t    EstimatedAPU_T;
+    uint32_t    EstimatedAPU_P;
+    uint8_t     dGPU_T_Limit_Exceeded;
+    uint8_t     reserved[3];
+};
+typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
+
+struct PP_SIslands_PAPMParameters
+{
+    uint32_t    NearTDPLimitTherm;
+    uint32_t    NearTDPLimitPAPM;
+    uint32_t    PlatformPowerLimit;
+    uint32_t    dGPU_T_Limit;
+    uint32_t    dGPU_T_Warning;
+    uint32_t    dGPU_T_Hysteresis;
+};
+typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
+
+struct SISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t    vCG_SPLL_FUNC_CNTL;
+    uint32_t    vCG_SPLL_FUNC_CNTL_2;
+    uint32_t    vCG_SPLL_FUNC_CNTL_3;
+    uint32_t    vCG_SPLL_FUNC_CNTL_4;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t    sclk_value;
+};
+
+typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
+
+struct SISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t    vMPLL_FUNC_CNTL;
+    uint32_t    vMPLL_FUNC_CNTL_1;
+    uint32_t    vMPLL_FUNC_CNTL_2;
+    uint32_t    vMPLL_AD_FUNC_CNTL;
+    uint32_t    vMPLL_DQ_FUNC_CNTL;
+    uint32_t    vMCLK_PWRMGT_CNTL;
+    uint32_t    vDLL_CNTL;
+    uint32_t    vMPLL_SS;
+    uint32_t    vMPLL_SS2;
+    uint32_t    mclk_value;
+};
+
+typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
+
+struct SISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t    value;
+    uint8_t     index;
+    uint8_t     phase_settings;
+};
+
+typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
+
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     UVDWatermark;
+    uint8_t                     VCEWatermark;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint8_t                     padding;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    SISLANDS_SMC_SCLK_VALUE     sclk;
+    SISLANDS_SMC_MCLK_VALUE     mclk;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    SISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint8_t                     hysteresisUp;
+    uint8_t                     hysteresisDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    MaxPoweredUpCU;
+    SISLANDS_SMC_VOLTAGE_VALUE  high_temp_vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  low_temp_vddc;
+    uint32_t                    reserved[2];
+    PP_SIslands_Dpm2PerfLevel   dpm2;
+};
+
+#define SISLANDS_SMC_STROBE_RATIO    0x0F
+#define SISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define SISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define SISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define SISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define SISLANDS_SMC_MC_STUTTER_EN   0x08
+#define SISLANDS_SMC_MC_PG_EN        0x10
+
+typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct SISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+struct SISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define SISLANDS_MAX_NO_VREG_STEPS 32
+
+struct SISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint32_t                            lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+    SISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    SISLANDS_SMC_VOLTAGEMASKTABLE       phaseMaskTable;
+    PP_SIslands_DPM2Parameters          dpm2Params;
+    SISLANDS_SMC_SWSTATE                initialState;
+    SISLANDS_SMC_SWSTATE                ACPIState;
+    SISLANDS_SMC_SWSTATE                ULVState;
+    SISLANDS_SMC_SWSTATE                driverState;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
+
+#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout         0x0
+#define SI_SMC_SOFT_REGISTER_delay_vreg               0xC
+#define SI_SMC_SOFT_REGISTER_delay_acpi               0x28
+#define SI_SMC_SOFT_REGISTER_seq_index                0x5C
+#define SI_SMC_SOFT_REGISTER_mvdd_chg_time            0x60
+#define SI_SMC_SOFT_REGISTER_mclk_switch_lim          0x70
+#define SI_SMC_SOFT_REGISTER_watermark_threshold      0x78
+#define SI_SMC_SOFT_REGISTER_phase_shedding_delay     0x88
+#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay    0x8C
+#define SI_SMC_SOFT_REGISTER_mc_block_delay           0x98
+#define SI_SMC_SOFT_REGISTER_ticks_per_us             0xA8
+#define SI_SMC_SOFT_REGISTER_crtc_index               0xC4
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
+#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width  0xF4
+#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen   0xFC
+#define SI_SMC_SOFT_REGISTER_vr_hot_gpio              0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type     0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd   0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc   0x120
+
+struct PP_SIslands_FanTable
+{
+       uint8_t  fdo_mode;
+       uint8_t  padding;
+       int16_t  temp_min;
+       int16_t  temp_med;
+       int16_t  temp_max;
+       int16_t  slope1;
+       int16_t  slope2;
+       int16_t  fdo_min;
+       int16_t  hys_up;
+       int16_t  hys_down;
+       int16_t  hys_slope;
+       int16_t  temp_resp_lim;
+       int16_t  temp_curr;
+       int16_t  slope_curr;
+       int16_t  pwm_curr;
+       uint32_t refresh_period;
+       int16_t  fdo_max;
+       uint8_t  temp_src;
+       int8_t  padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
+
+#define SMC_SISLANDS_SCALE_I  7
+#define SMC_SISLANDS_SCALE_R 12
+
+struct PP_SIslands_CacConfig
+{
+    uint16_t   cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
+    uint32_t   lkge_lut_V0;
+    uint32_t   lkge_lut_Vstep;
+    uint32_t   WinTime;
+    uint32_t   R_LL;
+    uint32_t   calculation_repeats;
+    uint32_t   l2numWin_TDP;
+    uint32_t   dc_cac;
+    uint8_t    lts_truncate_n;
+    uint8_t    SHIFT_N;
+    uint8_t    log2_PG_LKG_SCALE;
+    uint8_t    cac_temp;
+    uint32_t   lkge_lut_T0;
+    uint32_t   lkge_lut_Tstep;
+};
+
+typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
+
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+
+struct SMC_SIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
+
+struct SMC_SIslands_MCRegisterSet
+{
+    uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
+
+struct SMC_SIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_SIslands_MCRegisterAddress      address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_SIslands_MCRegisterSet          data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
+
+struct SMC_SIslands_MCArbDramTimingRegisterSet
+{
+    uint32_t mc_arb_dram_timing;
+    uint32_t mc_arb_dram_timing2;
+    uint8_t  mc_arb_rfsh_rate;
+    uint8_t  mc_arb_burst_time;
+    uint8_t  padding[2];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
+
+struct SMC_SIslands_MCArbDramTimingRegisters
+{
+    uint8_t                                     arb_current;
+    uint8_t                                     reserved[3];
+    SMC_SIslands_MCArbDramTimingRegisterSet     data[16];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
+
+struct SMC_SISLANDS_SPLL_DIV_TABLE
+{
+    uint32_t    freq[256];
+    uint32_t    ss[256];
+};
+
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK  0x01ffffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK   0xfe000000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT  25
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK   0x000fffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT  0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK   0xfff00000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT  20
+
+typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
+
+#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
+
+#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
+
+struct Smc_SIslands_DTE_Configuration
+{
+    uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t K;
+    uint32_t T0;
+    uint32_t MaxT;
+    uint8_t  WindowSize;
+    uint8_t  Tdep_count;
+    uint8_t  temp_select;
+    uint8_t  DTE_mode;
+    uint8_t  T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tthreshold;
+};
+
+typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
+
+#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_version                   0x0
+#define SISLANDS_SMC_FIRMWARE_HEADER_flags                     0x4
+#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters             0xC
+#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable                0x10
+#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable                  0x14
+#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable            0x18
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable           0x24
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
+#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable                 0x38
+#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration          0x40
+#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters            0x48
+
+#pragma pack(pop)
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+                               u32 smc_start_address,
+                               const u8 *src, u32 byte_count, u32 limit);
+void amdgpu_si_start_smc(struct amdgpu_device *adev);
+void amdgpu_si_reset_smc(struct amdgpu_device *adev);
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                 u32 *value, u32 limit);
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                  u32 value, u32 limit);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
deleted file mode 100644 (file)
index f06f6f4..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_smum.h"
-
-MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int tonga_dpm_early_init(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       tonga_dpm_set_funcs(adev);
-
-       return 0;
-}
-
-static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
-{
-       char fw_name[30] = "amdgpu/tonga_smc.bin";
-       int err;
-       err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
-       if (err)
-               goto out;
-       err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
-       if (err) {
-               DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
-               release_firmware(adev->pm.fw);
-               adev->pm.fw = NULL;
-       }
-       return err;
-}
-
-static int tonga_dpm_sw_init(void *handle)
-{
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       ret = tonga_dpm_init_microcode(adev);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int tonga_dpm_sw_fini(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       release_firmware(adev->pm.fw);
-       adev->pm.fw = NULL;
-
-       return 0;
-}
-
-static int tonga_dpm_hw_init(void *handle)
-{
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-
-       /* smu init only needs to be called at startup, not resume.
-        * It should be in sw_init, but requires the fw info gathered
-        * in sw_init from other IP modules.
-        */
-       ret = tonga_smu_init(adev);
-       if (ret) {
-               DRM_ERROR("SMU initialization failed\n");
-               goto fail;
-       }
-
-       ret = tonga_smu_start(adev);
-       if (ret) {
-               DRM_ERROR("SMU start failed\n");
-               goto fail;
-       }
-
-       mutex_unlock(&adev->pm.mutex);
-       return 0;
-
-fail:
-       adev->firmware.smu_load = false;
-       mutex_unlock(&adev->pm.mutex);
-       return -EINVAL;
-}
-
-static int tonga_dpm_hw_fini(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-       /* smu fini only needs to be called at teardown, not suspend.
-        * It should be in sw_fini, but we put it here for symmetry
-        * with smu init.
-        */
-       tonga_smu_fini(adev);
-       mutex_unlock(&adev->pm.mutex);
-       return 0;
-}
-
-static int tonga_dpm_suspend(void *handle)
-{
-       return tonga_dpm_hw_fini(handle);
-}
-
-static int tonga_dpm_resume(void *handle)
-{
-       return tonga_dpm_hw_init(handle);
-}
-
-static int tonga_dpm_set_clockgating_state(void *handle,
-                       enum amd_clockgating_state state)
-{
-       return 0;
-}
-
-static int tonga_dpm_set_powergating_state(void *handle,
-                       enum amd_powergating_state state)
-{
-       return 0;
-}
-
-const struct amd_ip_funcs tonga_dpm_ip_funcs = {
-       .name = "tonga_dpm",
-       .early_init = tonga_dpm_early_init,
-       .late_init = NULL,
-       .sw_init = tonga_dpm_sw_init,
-       .sw_fini = tonga_dpm_sw_fini,
-       .hw_init = tonga_dpm_hw_init,
-       .hw_fini = tonga_dpm_hw_fini,
-       .suspend = tonga_dpm_suspend,
-       .resume = tonga_dpm_resume,
-       .is_idle = NULL,
-       .wait_for_idle = NULL,
-       .soft_reset = NULL,
-       .set_clockgating_state = tonga_dpm_set_clockgating_state,
-       .set_powergating_state = tonga_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
-       .get_temperature = NULL,
-       .pre_set_power_state = NULL,
-       .set_power_state = NULL,
-       .post_set_power_state = NULL,
-       .display_configuration_changed = NULL,
-       .get_sclk = NULL,
-       .get_mclk = NULL,
-       .print_power_state = NULL,
-       .debugfs_print_current_performance_level = NULL,
-       .force_performance_level = NULL,
-       .vblank_too_short = NULL,
-       .powergate_uvd = NULL,
-};
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
-{
-       if (NULL == adev->pm.funcs)
-               adev->pm.funcs = &tonga_dpm_funcs;
-}
index c92055805a451d5eecac746181383a930906fa91..d127d59f953a8ded522884fa7f9eba77e648db2d 100644 (file)
@@ -373,10 +373,10 @@ static int tonga_ih_wait_for_idle(void *handle)
        return -ETIMEDOUT;
 }
 
-static int tonga_ih_soft_reset(void *handle)
+static int tonga_ih_check_soft_reset(void *handle)
 {
-       u32 srbm_soft_reset = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset = 0;
        u32 tmp = RREG32(mmSRBM_STATUS);
 
        if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -384,6 +384,48 @@ static int tonga_ih_soft_reset(void *handle)
                                                SOFT_RESET_IH, 1);
 
        if (srbm_soft_reset) {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
+               adev->irq.srbm_soft_reset = srbm_soft_reset;
+       } else {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
+               adev->irq.srbm_soft_reset = 0;
+       }
+
+       return 0;
+}
+
+static int tonga_ih_pre_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+               return 0;
+
+       return tonga_ih_hw_fini(adev);
+}
+
+static int tonga_ih_post_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+               return 0;
+
+       return tonga_ih_hw_init(adev);
+}
+
+static int tonga_ih_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+               return 0;
+       srbm_soft_reset = adev->irq.srbm_soft_reset;
+
+       if (srbm_soft_reset) {
+               u32 tmp;
+
                tmp = RREG32(mmSRBM_SOFT_RESET);
                tmp |= srbm_soft_reset;
                dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -427,7 +469,10 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = {
        .resume = tonga_ih_resume,
        .is_idle = tonga_ih_is_idle,
        .wait_for_idle = tonga_ih_wait_for_idle,
+       .check_soft_reset = tonga_ih_check_soft_reset,
+       .pre_soft_reset = tonga_ih_pre_soft_reset,
        .soft_reset = tonga_ih_soft_reset,
+       .post_soft_reset = tonga_ih_post_soft_reset,
        .set_clockgating_state = tonga_ih_set_clockgating_state,
        .set_powergating_state = tonga_ih_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
deleted file mode 100644 (file)
index 940de18..0000000
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_ppsmc.h"
-#include "tonga_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#define TONGA_SMC_SIZE 0x20000
-
-static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
-       uint32_t val;
-
-       if (smc_address & 3)
-               return -EINVAL;
-
-       if ((smc_address + 3) > limit)
-               return -EINVAL;
-
-       WREG32(mmSMC_IND_INDEX_0, smc_address);
-
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-       return 0;
-}
-
-static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
-       uint32_t addr;
-       uint32_t data, orig_data;
-       int result = 0;
-       uint32_t extra_shift;
-       unsigned long flags;
-
-       if (smc_start_address & 3)
-               return -EINVAL;
-
-       if ((smc_start_address + byte_count) > limit)
-               return -EINVAL;
-
-       addr = smc_start_address;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       while (byte_count >= 4) {
-               /* Bytes are written into the SMC addres space with the MSB first */
-               data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
-               result = tonga_set_smc_sram_address(adev, addr, limit);
-
-               if (result)
-                       goto out;
-
-               WREG32(mmSMC_IND_DATA_0, data);
-
-               src += 4;
-               byte_count -= 4;
-               addr += 4;
-       }
-
-       if (0 != byte_count) {
-               /* Now write odd bytes left, do a read modify write cycle */
-               data = 0;
-
-               result = tonga_set_smc_sram_address(adev, addr, limit);
-               if (result)
-                       goto out;
-
-               orig_data = RREG32(mmSMC_IND_DATA_0);
-               extra_shift = 8 * (4 - byte_count);
-
-               while (byte_count > 0) {
-                       data = (data << 8) + *src++;
-                       byte_count--;
-               }
-
-               data <<= extra_shift;
-               data |= (orig_data & ~((~0UL) << extra_shift));
-
-               result = tonga_set_smc_sram_address(adev, addr, limit);
-               if (result)
-                       goto out;
-
-               WREG32(mmSMC_IND_DATA_0, data);
-       }
-
-out:
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-static int tonga_program_jump_on_start(struct amdgpu_device *adev)
-{
-       static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
-       tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
-       return 0;
-}
-
-static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
-       return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
-       int i;
-       uint32_t val;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32(mmSMC_RESP_0);
-               if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MSG_ARG_0, 0x20000);
-       WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send message\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
-       if (!tonga_is_smc_ram_running(adev))
-       {
-               return -EINVAL;
-       }
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MESSAGE_0, msg);
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send message\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
-                                               PPSMC_Msg msg)
-{
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MESSAGE_0, msg);
-
-       return 0;
-}
-
-static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
-                                               PPSMC_Msg msg,
-                                               uint32_t parameter)
-{
-       if (!tonga_is_smc_ram_running(adev))
-               return -EINVAL;
-
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MSG_ARG_0, parameter);
-
-       return tonga_send_msg_to_smc(adev, msg);
-}
-
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
-                                       struct amdgpu_device *adev,
-                                       PPSMC_Msg msg, uint32_t parameter)
-{
-       if (wait_smu_response(adev)) {
-               DRM_ERROR("Failed to send previous message\n");
-               return -EINVAL;
-       }
-
-       WREG32(mmSMC_MSG_ARG_0, parameter);
-
-       return tonga_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
-       int i;
-       uint32_t val;
-
-       if (!tonga_is_smc_ram_running(adev))
-               return -EINVAL;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-               if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout)
-               return -EINVAL;
-
-       return 0;
-}
-#endif
-
-static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
-       const struct smc_firmware_header_v1_0 *hdr;
-       uint32_t ucode_size;
-       uint32_t ucode_start_address;
-       const uint8_t *src;
-       uint32_t val;
-       uint32_t byte_count;
-       uint32_t *data;
-       unsigned long flags;
-
-       if (!adev->pm.fw)
-               return -EINVAL;
-
-       /* Skip SMC ucode loading on SR-IOV capable boards.
-        * vbios does this for us in asic_init in that case.
-        */
-       if (adev->virtualization.supports_sr_iov)
-               return 0;
-
-       hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
-       amdgpu_ucode_print_smc_hdr(&hdr->header);
-
-       adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
-       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-       ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
-       src = (const uint8_t *)
-               (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
-       if (ucode_size & 3) {
-               DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
-               return -EINVAL;
-       }
-
-       if (ucode_size > TONGA_SMC_SIZE) {
-               DRM_ERROR("SMC address is beyond the SMC RAM area\n");
-               return -EINVAL;
-       }
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-       byte_count = ucode_size;
-       data = (uint32_t *)src;
-       for (; byte_count >= 4; data++, byte_count -= 4)
-               WREG32(mmSMC_IND_DATA_0, data[0]);
-
-       val = RREG32(mmSMC_IND_ACCESS_CNTL);
-       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-       WREG32(mmSMC_IND_ACCESS_CNTL, val);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
-       return 0;
-}
-
-#if 0 /* not used yet */
-static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
-                               uint32_t smc_address,
-                               uint32_t *value,
-                               uint32_t limit)
-{
-       int result;
-       unsigned long flags;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       result = tonga_set_smc_sram_address(adev, smc_address, limit);
-       if (result == 0)
-               *value = RREG32(mmSMC_IND_DATA_0);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
-                               uint32_t smc_address,
-                               uint32_t value,
-                               uint32_t limit)
-{
-       int result;
-       unsigned long flags;
-
-       spin_lock_irqsave(&adev->smc_idx_lock, flags);
-       result = tonga_set_smc_sram_address(adev, smc_address, limit);
-       if (result == 0)
-               WREG32(mmSMC_IND_DATA_0, value);
-       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-       return result;
-}
-
-static int tonga_smu_stop_smc(struct amdgpu_device *adev)
-{
-       uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
-       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-       return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
-{
-       switch (fw_type) {
-               case UCODE_ID_SDMA0:
-                       return AMDGPU_UCODE_ID_SDMA0;
-               case UCODE_ID_SDMA1:
-                       return AMDGPU_UCODE_ID_SDMA1;
-               case UCODE_ID_CP_CE:
-                       return AMDGPU_UCODE_ID_CP_CE;
-               case UCODE_ID_CP_PFP:
-                       return AMDGPU_UCODE_ID_CP_PFP;
-               case UCODE_ID_CP_ME:
-                       return AMDGPU_UCODE_ID_CP_ME;
-               case UCODE_ID_CP_MEC:
-               case UCODE_ID_CP_MEC_JT1:
-                       return AMDGPU_UCODE_ID_CP_MEC1;
-               case UCODE_ID_CP_MEC_JT2:
-                       return AMDGPU_UCODE_ID_CP_MEC2;
-               case UCODE_ID_RLC_G:
-                       return AMDGPU_UCODE_ID_RLC_G;
-               default:
-                       DRM_ERROR("ucode type is out of range!\n");
-                       return AMDGPU_UCODE_ID_MAXIMUM;
-       }
-}
-
-static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
-                                               uint32_t fw_type,
-                                               struct SMU_Entry *entry)
-{
-       enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
-       struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
-       const struct gfx_firmware_header_v1_0 *header = NULL;
-       uint64_t gpu_addr;
-       uint32_t data_size;
-
-       if (ucode->fw == NULL)
-               return -EINVAL;
-
-       gpu_addr  = ucode->mc_addr;
-       header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
-       data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
-       if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
-               (fw_type == UCODE_ID_CP_MEC_JT2)) {
-               gpu_addr += le32_to_cpu(header->jt_offset) << 2;
-               data_size = le32_to_cpu(header->jt_size) << 2;
-       }
-
-       entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
-       entry->id = (uint16_t)fw_type;
-       entry->image_addr_high = upper_32_bits(gpu_addr);
-       entry->image_addr_low = lower_32_bits(gpu_addr);
-       entry->meta_data_addr_high = 0;
-       entry->meta_data_addr_low = 0;
-       entry->data_size_byte = data_size;
-       entry->num_register_entries = 0;
-
-       if (fw_type == UCODE_ID_RLC_G)
-               entry->flags = 1;
-       else
-               entry->flags = 0;
-
-       return 0;
-}
-
-static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
-{
-       struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
-       struct SMU_DRAMData_TOC *toc;
-       uint32_t fw_to_load;
-
-       WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
-       tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
-       tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
-       toc = (struct SMU_DRAMData_TOC *)private->header;
-       toc->num_entries = 0;
-       toc->structure_version = 1;
-
-       if (!adev->firmware.smu_load)
-               return 0;
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for RLC\n");
-               return -EINVAL;
-       }
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for CE\n");
-               return -EINVAL;
-       }
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for PFP\n");
-               return -EINVAL;
-       }
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for ME\n");
-               return -EINVAL;
-       }
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC\n");
-               return -EINVAL;
-       }
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
-               return -EINVAL;
-       }
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
-               return -EINVAL;
-       }
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for SDMA0\n");
-               return -EINVAL;
-       }
-
-       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for SDMA1\n");
-               return -EINVAL;
-       }
-
-       tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
-       tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
-       fw_to_load = UCODE_ID_RLC_G_MASK |
-                       UCODE_ID_SDMA0_MASK |
-                       UCODE_ID_SDMA1_MASK |
-                       UCODE_ID_CP_CE_MASK |
-                       UCODE_ID_CP_ME_MASK |
-                       UCODE_ID_CP_PFP_MASK |
-                       UCODE_ID_CP_MEC_MASK;
-
-       if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
-               DRM_ERROR("Fail to request SMU load ucode\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
-       switch (fw_type) {
-               case AMDGPU_UCODE_ID_SDMA0:
-                       return UCODE_ID_SDMA0_MASK;
-               case AMDGPU_UCODE_ID_SDMA1:
-                       return UCODE_ID_SDMA1_MASK;
-               case AMDGPU_UCODE_ID_CP_CE:
-                       return UCODE_ID_CP_CE_MASK;
-               case AMDGPU_UCODE_ID_CP_PFP:
-                       return UCODE_ID_CP_PFP_MASK;
-               case AMDGPU_UCODE_ID_CP_ME:
-                       return UCODE_ID_CP_ME_MASK;
-               case AMDGPU_UCODE_ID_CP_MEC1:
-                       return UCODE_ID_CP_MEC_MASK;
-               case AMDGPU_UCODE_ID_CP_MEC2:
-                       return UCODE_ID_CP_MEC_MASK;
-               case AMDGPU_UCODE_ID_RLC_G:
-                       return UCODE_ID_RLC_G_MASK;
-               default:
-                       DRM_ERROR("ucode type is out of range!\n");
-                       return 0;
-       }
-}
-
-static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
-                                       uint32_t fw_type)
-{
-       uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
-       int i;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("check firmware loading failed\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
-       int result;
-       uint32_t val;
-       int i;
-
-       /* Assert reset */
-       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       result = tonga_smu_upload_firmware_image(adev);
-       if (result)
-               return result;
-
-       /* Clear status */
-       WREG32_SMC(ixSMU_STATUS, 0);
-
-       /* Enable clock */
-       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-       /* De-assert reset */
-       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       /* Set SMU Auto Start */
-       val = RREG32_SMC(ixSMU_INPUT_DATA);
-       val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
-       WREG32_SMC(ixSMU_INPUT_DATA, val);
-
-       /* Clear firmware interrupt enable flag */
-       WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixRCU_UC_EVENTS);
-               if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("Interrupt is not enabled by firmware\n");
-               return -EINVAL;
-       }
-
-       /* Call Test SMU message with 0x20000 offset
-        * to trigger SMU start
-        */
-       tonga_send_msg_to_smc_offset(adev);
-
-       /* Wait for done bit to be set */
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixSMU_STATUS);
-               if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("Timeout for SMU start\n");
-               return -EINVAL;
-       }
-
-       /* Check pass/failed indicator */
-       val = RREG32_SMC(ixSMU_STATUS);
-       if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
-               DRM_ERROR("SMU Firmware start failed\n");
-               return -EINVAL;
-       }
-
-       /* Wait for firmware to initialize */
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixFIRMWARE_FLAGS);
-               if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("SMU firmware initialization failed\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
-       int i, result;
-       uint32_t val;
-
-       /* wait for smc boot up */
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixRCU_UC_EVENTS);
-               val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
-               if (val)
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("SMC boot sequence is not completed\n");
-               return -EINVAL;
-       }
-
-       /* Clear firmware interrupt enable flag */
-       WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
-       /* Assert reset */
-       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       result = tonga_smu_upload_firmware_image(adev);
-       if (result)
-               return result;
-
-       /* Set smc instruct start point at 0x0 */
-       tonga_program_jump_on_start(adev);
-
-       /* Enable clock */
-       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-       /* De-assert reset */
-       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-       /* Wait for firmware to initialize */
-       for (i = 0; i < adev->usec_timeout; i++) {
-               val = RREG32_SMC(ixFIRMWARE_FLAGS);
-               if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
-                       break;
-               udelay(1);
-       }
-
-       if (i == adev->usec_timeout) {
-               DRM_ERROR("Timeout for SMC firmware initialization\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int tonga_smu_start(struct amdgpu_device *adev)
-{
-       int result;
-       uint32_t val;
-
-       if (!tonga_is_smc_ram_running(adev)) {
-               val = RREG32_SMC(ixSMU_FIRMWARE);
-               if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
-                       result = tonga_smu_start_in_non_protection_mode(adev);
-                       if (result)
-                               return result;
-               } else {
-                       result = tonga_smu_start_in_protection_mode(adev);
-                       if (result)
-                               return result;
-               }
-       }
-
-       return tonga_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
-       .check_fw_load_finish = tonga_smu_check_fw_load_finish,
-       .request_smu_load_fw = NULL,
-       .request_smu_specific_fw = NULL,
-};
-
-int tonga_smu_init(struct amdgpu_device *adev)
-{
-       struct tonga_smu_private_data *private;
-       uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-       uint32_t smu_internal_buffer_size = 200*4096;
-       struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
-       struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
-       uint64_t mc_addr;
-       void *toc_buf_ptr;
-       void *smu_buf_ptr;
-       int ret;
-
-       private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
-       if (NULL == private)
-               return -ENOMEM;
-
-       /* allocate firmware buffers */
-       if (adev->firmware.smu_load)
-               amdgpu_ucode_init_bo(adev);
-
-       adev->smu.priv = private;
-       adev->smu.fw_flags = 0;
-
-       /* Allocate FW image data structure and header buffer */
-       ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-                              true, AMDGPU_GEM_DOMAIN_VRAM,
-                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-                              NULL, NULL, toc_buf);
-       if (ret) {
-               DRM_ERROR("Failed to allocate memory for TOC buffer\n");
-               return -ENOMEM;
-       }
-
-       /* Allocate buffer for SMU internal buffer */
-       ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-                              true, AMDGPU_GEM_DOMAIN_VRAM,
-                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-                              NULL, NULL, smu_buf);
-       if (ret) {
-               DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
-               return -ENOMEM;
-       }
-
-       /* Retrieve GPU address for header buffer and internal buffer */
-       ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
-       if (ret) {
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to reserve the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.toc_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to pin the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.toc_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to map the TOC buffer\n");
-               return -EINVAL;
-       }
-
-       amdgpu_bo_unreserve(adev->smu.toc_buf);
-       private->header_addr_low = lower_32_bits(mc_addr);
-       private->header_addr_high = upper_32_bits(mc_addr);
-       private->header = toc_buf_ptr;
-
-       ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
-       if (ret) {
-               amdgpu_bo_unref(&adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to reserve the SMU internal buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to pin the SMU internal buffer\n");
-               return -EINVAL;
-       }
-
-       ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
-       if (ret) {
-               amdgpu_bo_unreserve(adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.smu_buf);
-               amdgpu_bo_unref(&adev->smu.toc_buf);
-               DRM_ERROR("Failed to map the SMU internal buffer\n");
-               return -EINVAL;
-       }
-
-       amdgpu_bo_unreserve(adev->smu.smu_buf);
-       private->smu_buffer_addr_low = lower_32_bits(mc_addr);
-       private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
-       adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
-
-       return 0;
-}
-
-int tonga_smu_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_unref(&adev->smu.toc_buf);
-       amdgpu_bo_unref(&adev->smu.smu_buf);
-       kfree(adev->smu.priv);
-       adev->smu.priv = NULL;
-       if (adev->firmware.fw_buf)
-               amdgpu_ucode_fini_bo(adev);
-
-       return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h b/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
deleted file mode 100644 (file)
index c031ff9..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_SMUMGR_H
-#define TONGA_SMUMGR_H
-
-#include "tonga_ppsmc.h"
-
-int tonga_smu_init(struct amdgpu_device *adev);
-int tonga_smu_fini(struct amdgpu_device *adev);
-int tonga_smu_start(struct amdgpu_device *adev);
-
-struct tonga_smu_private_data
-{
-       uint8_t *header;
-       uint32_t smu_buffer_addr_high;
-       uint32_t smu_buffer_addr_low;
-       uint32_t header_addr_high;
-       uint32_t header_addr_low;
-};
-
-#endif
index 132e613ed67425132ecf8a770ccb4888d8525b81..f6c941550b8ffd292524dc42cd2dc8504824d56a 100644 (file)
@@ -116,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle)
 
        ring = &adev->uvd.ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+       r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
                             &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
        return r;
@@ -526,6 +526,20 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               4; /* uvd_v4_2_ring_emit_ib */
+}
+
+static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               2 + /* uvd_v4_2_ring_emit_hdp_flush */
+               2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+               14; /* uvd_v4_2_ring_emit_fence  x1 no user fence */
+}
+
 /**
  * uvd_v4_2_mc_resume - memory controller programming
  *
@@ -756,6 +770,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
+       .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
+       .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
index 101de136ba63aacdf1034e6fd5237bf0cbe23011..400c16fe579e0ac970110b9106172aba518a56a3 100644 (file)
@@ -112,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle)
 
        ring = &adev->uvd.ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+       r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
                             &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
        return r;
@@ -577,6 +577,20 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               6; /* uvd_v5_0_ring_emit_ib */
+}
+
+static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               2 + /* uvd_v5_0_ring_emit_hdp_flush */
+               2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+               14; /* uvd_v5_0_ring_emit_fence  x1 no user fence */
+}
+
 static bool uvd_v5_0_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -807,6 +821,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
+       .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
index 7f21102bfb99d9145ee118d6800e8abb48d1a934..e0fd9f21ed9585ce37c310605f2ea56524fa60bc 100644 (file)
@@ -116,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
 
        ring = &adev->uvd.ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+       r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
                             &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
        return r;
@@ -396,21 +396,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
 
        uvd_v6_0_mc_resume(adev);
 
-       /* Set dynamic clock gating in S/W control mode */
-       if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
-               uvd_v6_0_set_sw_clock_gating(adev);
-       } else {
-               /* disable clock gating */
-               uint32_t data = RREG32(mmUVD_CGC_CTRL);
-               data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-               WREG32(mmUVD_CGC_CTRL, data);
-       }
+       /* disable clock gating */
+       WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
 
        /* disable interupt */
-       WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
+       WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
 
        /* stall UMC and register bus before resetting VCPU */
-       WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+       WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
        mdelay(1);
 
        /* put LMI, VCPU, RBC etc... into reset */
@@ -426,7 +419,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        mdelay(5);
 
        /* take UVD block out of reset */
-       WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+       WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
        mdelay(5);
 
        /* initialize UVD memory controller */
@@ -461,7 +454,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 
        /* enable UMC */
-       WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+       WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
 
        /* boot up the VCPU */
        WREG32(mmUVD_SOFT_RESET, 0);
@@ -481,11 +474,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
                        break;
 
                DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
-               WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
-                               ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+               WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
                mdelay(10);
-               WREG32_P(mmUVD_SOFT_RESET, 0,
-                        ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+               WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
                mdelay(10);
                r = -1;
        }
@@ -502,15 +493,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        /* clear the bit 4 of UVD_STATUS */
        WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 
+       /* force RBC into idle state */
        rb_bufsz = order_base_2(ring->ring_size);
-       tmp = 0;
-       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+       tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
-       /* force RBC into idle state */
        WREG32(mmUVD_RBC_RB_CNTL, tmp);
 
        /* set the write pointer delay */
@@ -531,7 +521,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
        WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 
-       WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+       WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
 
        return 0;
 }
@@ -735,6 +725,31 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 0xE);
 }
 
+static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               8; /* uvd_v6_0_ring_emit_ib */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               2 + /* uvd_v6_0_ring_emit_hdp_flush */
+               2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+               10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+               14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+       return
+               2 + /* uvd_v6_0_ring_emit_hdp_flush */
+               2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+               10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+               20 + /* uvd_v6_0_ring_emit_vm_flush */
+               14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
+}
+
 static bool uvd_v6_0_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -748,20 +763,82 @@ static int uvd_v6_0_wait_for_idle(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+               if (uvd_v6_0_is_idle(handle))
                        return 0;
        }
        return -ETIMEDOUT;
 }
 
-static int uvd_v6_0_soft_reset(void *handle)
+#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
+static int uvd_v6_0_check_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
+           REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
+           (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+       if (srbm_soft_reset) {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
+               adev->uvd.srbm_soft_reset = srbm_soft_reset;
+       } else {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
+               adev->uvd.srbm_soft_reset = 0;
+       }
+       return 0;
+}
+static int uvd_v6_0_pre_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+               return 0;
 
        uvd_v6_0_stop(adev);
+       return 0;
+}
+
+static int uvd_v6_0_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+               return 0;
+       srbm_soft_reset = adev->uvd.srbm_soft_reset;
+
+       if (srbm_soft_reset) {
+               u32 tmp;
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+       }
+
+       return 0;
+}
+
+static int uvd_v6_0_post_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+               return 0;
 
-       WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
-                       ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
        mdelay(5);
 
        return uvd_v6_0_start(adev);
@@ -902,21 +979,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-       static int curstate = -1;
 
        if (adev->asic_type == CHIP_FIJI ||
-                       adev->asic_type == CHIP_POLARIS10)
-               uvd_v6_set_bypass_mode(adev, enable);
+           adev->asic_type == CHIP_POLARIS10)
+               uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
 
        if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
                return 0;
 
-       if (curstate == state)
-               return 0;
-
-       curstate = state;
-       if (enable) {
+       if (state == AMD_CG_STATE_GATE) {
                /* disable HW gating and enable Sw gating */
                uvd_v6_0_set_sw_clock_gating(adev);
        } else {
@@ -946,6 +1017,8 @@ static int uvd_v6_0_set_powergating_state(void *handle,
        if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
                return 0;
 
+       WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+
        if (state == AMD_PG_STATE_GATE) {
                uvd_v6_0_stop(adev);
                return 0;
@@ -966,7 +1039,10 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
        .resume = uvd_v6_0_resume,
        .is_idle = uvd_v6_0_is_idle,
        .wait_for_idle = uvd_v6_0_wait_for_idle,
+       .check_soft_reset = uvd_v6_0_check_soft_reset,
+       .pre_soft_reset = uvd_v6_0_pre_soft_reset,
        .soft_reset = uvd_v6_0_soft_reset,
+       .post_soft_reset = uvd_v6_0_post_soft_reset,
        .set_clockgating_state = uvd_v6_0_set_clockgating_state,
        .set_powergating_state = uvd_v6_0_set_powergating_state,
 };
@@ -986,6 +1062,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
+       .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
 };
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@@ -1005,6 +1083,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
+       .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
index 80a37a60218136b5912ec7db9be5e8d21fb9ebd0..76e64ad04a53e2f71453c301e4ec0bef022dd9ce 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_vce.h"
 #include "cikd.h"
-
 #include "vce/vce_2_0_d.h"
 #include "vce/vce_2_0_sh_mask.h"
-
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
 #define VCE_V2_0_FW_SIZE       (256 * 1024)
 #define VCE_V2_0_STACK_SIZE    (64 * 1024)
 #define VCE_V2_0_DATA_SIZE     (23552 * AMDGPU_MAX_VCE_HANDLES)
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
 static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -96,6 +97,49 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
                WREG32(mmVCE_RB_WPTR2, ring->wptr);
 }
 
+static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
+{
+       int i, j;
+
+       for (i = 0; i < 10; ++i) {
+               for (j = 0; j < 100; ++j) {
+                       uint32_t status = RREG32(mmVCE_LMI_STATUS);
+
+                       if (status & 0x337f)
+                               return 0;
+                       mdelay(10);
+               }
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
+{
+       int i, j;
+
+       for (i = 0; i < 10; ++i) {
+               for (j = 0; j < 100; ++j) {
+                       uint32_t status = RREG32(mmVCE_STATUS);
+
+                       if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+                               return 0;
+                       mdelay(10);
+               }
+
+               DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+               WREG32_P(mmVCE_SOFT_RESET,
+                       VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               mdelay(10);
+               WREG32_P(mmVCE_SOFT_RESET, 0,
+                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               mdelay(10);
+       }
+
+       return -ETIMEDOUT;
+}
+
 /**
  * vce_v2_0_start - start VCE block
  *
@@ -106,7 +150,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
 static int vce_v2_0_start(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
-       int i, j, r;
+       int r;
 
        vce_v2_0_mc_resume(adev);
 
@@ -127,36 +171,12 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
        WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
-       WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
-       WREG32_P(mmVCE_SOFT_RESET,
-                VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
+       WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+       WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
        mdelay(100);
+       WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 
-       WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
-       for (i = 0; i < 10; ++i) {
-               uint32_t status;
-               for (j = 0; j < 100; ++j) {
-                       status = RREG32(mmVCE_STATUS);
-                       if (status & 2)
-                               break;
-                       mdelay(10);
-               }
-               r = 0;
-               if (status & 2)
-                       break;
-
-               DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
-               WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                               ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-               mdelay(10);
-               WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-               mdelay(10);
-               r = -1;
-       }
+       r = vce_v2_0_firmware_loaded(adev);
 
        /* clear BUSY flag */
        WREG32_P(mmVCE_STATUS, 0, ~1);
@@ -173,6 +193,8 @@ static int vce_v2_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vce.num_rings = 2;
+
        vce_v2_0_set_ring_funcs(adev);
        vce_v2_0_set_irq_funcs(adev);
 
@@ -182,7 +204,7 @@ static int vce_v2_0_early_init(void *handle)
 static int vce_v2_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
-       int r;
+       int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* VCE */
@@ -199,19 +221,14 @@ static int vce_v2_0_sw_init(void *handle)
        if (r)
                return r;
 
-       ring = &adev->vce.ring[0];
-       sprintf(ring->name, "vce0");
-       r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-       if (r)
-               return r;
-
-       ring = &adev->vce.ring[1];
-       sprintf(ring->name, "vce1");
-       r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-       if (r)
-               return r;
+       for (i = 0; i < adev->vce.num_rings; i++) {
+               ring = &adev->vce.ring[i];
+               sprintf(ring->name, "vce%d", i);
+               r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+                                    &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -234,29 +251,23 @@ static int vce_v2_0_sw_fini(void *handle)
 
 static int vce_v2_0_hw_init(void *handle)
 {
-       struct amdgpu_ring *ring;
-       int r;
+       int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        r = vce_v2_0_start(adev);
+       /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
        if (r)
-/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
                return 0;
 
-       ring = &adev->vce.ring[0];
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
-               return r;
-       }
+       for (i = 0; i < adev->vce.num_rings; i++)
+               adev->vce.ring[i].ready = false;
 
-       ring = &adev->vce.ring[1];
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
-               return r;
+       for (i = 0; i < adev->vce.num_rings; i++) {
+               r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+               if (r)
+                       return r;
+               else
+                       adev->vce.ring[i].ready = true;
        }
 
        DRM_INFO("VCE initialized successfully.\n");
@@ -338,47 +349,50 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
 
 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
 {
-       u32 orig, tmp;
+       if (vce_v2_0_wait_for_idle(adev)) {
+               DRM_INFO("VCE is busy, Can't set clock gateing");
+               return;
+       }
 
-       if (gated) {
-               if (vce_v2_0_wait_for_idle(adev)) {
-                       DRM_INFO("VCE is busy, Can't set clock gateing");
-                       return;
-               }
-               WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-               WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-               mdelay(100);
-               WREG32(mmVCE_STATUS, 0);
-       } else {
-               WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-               WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-               mdelay(100);
+       WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
+
+       if (vce_v2_0_lmi_clean(adev)) {
+               DRM_INFO("LMI is busy, Can't set clock gateing");
+               return;
        }
 
-       tmp = RREG32(mmVCE_CLOCK_GATING_B);
-       tmp &= ~0x00060006;
+       WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+       WREG32_P(mmVCE_SOFT_RESET,
+                VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+                ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+       WREG32(mmVCE_STATUS, 0);
+
+       if (gated)
+               WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+       /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
        if (gated) {
-               tmp |= 0xe10000;
+               /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
+               WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
        } else {
-               tmp |= 0xe1;
-               tmp &= ~0xe10000;
+               /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
+               WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
        }
-       WREG32(mmVCE_CLOCK_GATING_B, tmp);
 
-       orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-       tmp &= ~0x1fe000;
-       tmp &= ~0xff000000;
-       if (tmp != orig)
-               WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+       /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
+       WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
 
-       orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-       tmp &= ~0x3fc;
-       if (tmp != orig)
-               WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+       /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+       WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
 
-       if (gated)
-               WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-       WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+       WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
+       if(!gated) {
+               WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+               mdelay(100);
+               WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+               vce_v2_0_firmware_loaded(adev);
+               WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+       }
 }
 
 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@@ -458,9 +472,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
        WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
 
        WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
-       WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
-                ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+       WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 
        vce_v2_0_init_cg(adev);
 }
@@ -474,11 +486,11 @@ static bool vce_v2_0_is_idle(void *handle)
 
 static int vce_v2_0_wait_for_idle(void *handle)
 {
-       unsigned i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       unsigned i;
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+               if (vce_v2_0_is_idle(handle))
                        return 0;
        }
        return -ETIMEDOUT;
@@ -488,8 +500,7 @@ static int vce_v2_0_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
-                       ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+       WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
        mdelay(5);
 
        return vce_v2_0_start(adev);
@@ -516,10 +527,8 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
        DRM_DEBUG("IH: VCE\n");
        switch (entry->src_data) {
        case 0:
-               amdgpu_fence_process(&adev->vce.ring[0]);
-               break;
        case 1:
-               amdgpu_fence_process(&adev->vce.ring[1]);
+               amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
                break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -530,11 +539,28 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
        return 0;
 }
 
+static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+       u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+       if (enable)
+               tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+       else
+               tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+       WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
+
 static int vce_v2_0_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
        bool gate = false;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+
+       vce_v2_0_set_bypass_mode(adev, enable);
 
        if (state == AMD_CG_STATE_GATE)
                gate = true;
@@ -596,12 +622,16 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_vce_ring_begin_use,
        .end_use = amdgpu_vce_ring_end_use,
+       .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
+       .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
-       adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
+       int i;
+
+       for (i = 0; i < adev->vce.num_rings; i++)
+               adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
index c271abffd8dd7475ea0b25101a86d1b32b53696c..a6b4e27bee899544340f68aac3a0593ca30a24d7 100644 (file)
@@ -37,6 +37,9 @@
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
@@ -67,8 +70,10 @@ static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 
        if (ring == &adev->vce.ring[0])
                return RREG32(mmVCE_RB_RPTR);
-       else
+       else if (ring == &adev->vce.ring[1])
                return RREG32(mmVCE_RB_RPTR2);
+       else
+               return RREG32(mmVCE_RB_RPTR3);
 }
 
 /**
@@ -84,8 +89,10 @@ static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
 
        if (ring == &adev->vce.ring[0])
                return RREG32(mmVCE_RB_WPTR);
-       else
+       else if (ring == &adev->vce.ring[1])
                return RREG32(mmVCE_RB_WPTR2);
+       else
+               return RREG32(mmVCE_RB_WPTR3);
 }
 
 /**
@@ -101,108 +108,80 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 
        if (ring == &adev->vce.ring[0])
                WREG32(mmVCE_RB_WPTR, ring->wptr);
-       else
+       else if (ring == &adev->vce.ring[1])
                WREG32(mmVCE_RB_WPTR2, ring->wptr);
+       else
+               WREG32(mmVCE_RB_WPTR3, ring->wptr);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
 {
-       u32 tmp, data;
-
-       tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
-       if (override)
-               data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-       else
-               data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-
-       if (tmp != data)
-               WREG32(mmVCE_RB_ARB_CTRL, data);
+       WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
 }
 
 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
                                             bool gated)
 {
-       u32 tmp, data;
+       u32 data;
+
        /* Set Override to disable Clock Gating */
        vce_v3_0_override_vce_clock_gating(adev, true);
 
-       if (!gated) {
-               /* Force CLOCK ON for VCE_CLOCK_GATING_B,
-                * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
-                * VREG can be FORCE ON or set to Dynamic, but can't be OFF
-                */
-               tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+       /* This function enables MGCG which is controlled by firmware.
+          With the clocks in the gated state the core is still
+          accessible but the firmware will throttle the clocks on the
+          fly as necessary.
+       */
+       if (gated) {
+               data = RREG32(mmVCE_CLOCK_GATING_B);
                data |= 0x1ff;
                data &= ~0xef0000;
-               if (tmp != data)
-                       WREG32(mmVCE_CLOCK_GATING_B, data);
+               WREG32(mmVCE_CLOCK_GATING_B, data);
 
-               /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
-                * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
-                */
-               tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+               data = RREG32(mmVCE_UENC_CLOCK_GATING);
                data |= 0x3ff000;
                data &= ~0xffc00000;
-               if (tmp != data)
-                       WREG32(mmVCE_UENC_CLOCK_GATING, data);
+               WREG32(mmVCE_UENC_CLOCK_GATING, data);
 
-               /* set VCE_UENC_CLOCK_GATING_2 */
-               tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+               data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
                data |= 0x2;
-               data &= ~0x2;
-               if (tmp != data)
-                       WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+               data &= ~0x00010000;
+               WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
-               /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
-               tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+               data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
                data |= 0x37f;
-               if (tmp != data)
-                       WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+               WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
-               /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
-               tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+               data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
                data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
-                               VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
-                               VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
-                               0x8;
-               if (tmp != data)
-                       WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+                       VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+                       VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
+                       0x8;
+               WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
        } else {
-               /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
-                * {*, *_FORCE_OFF} = {*, 1}
-                * set VREG to Dynamic, as it can't be OFF
-                */
-               tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+               data = RREG32(mmVCE_CLOCK_GATING_B);
                data &= ~0x80010;
                data |= 0xe70008;
-               if (tmp != data)
-                       WREG32(mmVCE_CLOCK_GATING_B, data);
-               /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
-                * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
-                * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
-                */
-               tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+               WREG32(mmVCE_CLOCK_GATING_B, data);
+
+               data = RREG32(mmVCE_UENC_CLOCK_GATING);
                data |= 0xffc00000;
-               if (tmp != data)
-                       WREG32(mmVCE_UENC_CLOCK_GATING, data);
-               /* Set VCE_UENC_CLOCK_GATING_2 */
-               tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+               WREG32(mmVCE_UENC_CLOCK_GATING, data);
+
+               data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
                data |= 0x10000;
-               if (tmp != data)
-                       WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
-               /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
-               tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+               WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+
+               data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
                data &= ~0xffc00000;
-               if (tmp != data)
-                       WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
-               /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
-               tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+               WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+
+               data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
                data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
-                               VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
-                               VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
-                               0x8);
-               if (tmp != data)
-                       WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+                         VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+                         VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
+                         0x8);
+               WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
        }
        vce_v3_0_override_vce_clock_gating(adev, false);
 }
@@ -221,12 +200,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
                }
 
                DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
-               WREG32_P(mmVCE_SOFT_RESET,
-                       VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
                mdelay(10);
-               WREG32_P(mmVCE_SOFT_RESET, 0,
-                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
                mdelay(10);
        }
 
@@ -259,43 +235,34 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
+       ring = &adev->vce.ring[2];
+       WREG32(mmVCE_RB_RPTR3, ring->wptr);
+       WREG32(mmVCE_RB_WPTR3, ring->wptr);
+       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               if (idx == 0)
-                       WREG32_P(mmGRBM_GFX_INDEX, 0,
-                               ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-               else
-                       WREG32_P(mmGRBM_GFX_INDEX,
-                               GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-                               ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-
+               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
                vce_v3_0_mc_resume(adev, idx);
-
-               WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
-                        ~VCE_STATUS__JOB_BUSY_MASK);
+               WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
                if (adev->asic_type >= CHIP_STONEY)
                        WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
                else
-                       WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
-                               ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
-               WREG32_P(mmVCE_SOFT_RESET, 0,
-                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+                       WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
 
+               WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
                mdelay(100);
 
                r = vce_v3_0_firmware_loaded(adev);
 
                /* clear BUSY flag */
-               WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
-
-               /* Set Clock-Gating off */
-               if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
-                       vce_v3_0_set_vce_sw_clock_gating(adev, false);
+               WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
 
                if (r) {
                        DRM_ERROR("VCE not responding, giving up!!!\n");
@@ -304,7 +271,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -319,33 +286,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               if (idx == 0)
-                       WREG32_P(mmGRBM_GFX_INDEX, 0,
-                               ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-               else
-                       WREG32_P(mmGRBM_GFX_INDEX,
-                               GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-                               ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
 
                if (adev->asic_type >= CHIP_STONEY)
                        WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
                else
-                       WREG32_P(mmVCE_VCPU_CNTL, 0,
-                               ~VCE_VCPU_CNTL__CLK_EN_MASK);
+                       WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
+
                /* hold on ECPU */
-               WREG32_P(mmVCE_SOFT_RESET,
-                        VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 
                /* clear BUSY flag */
-               WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+               WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
 
                /* Set Clock-Gating off */
                if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
                        vce_v3_0_set_vce_sw_clock_gating(adev, false);
        }
 
-       WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -399,6 +358,8 @@ static int vce_v3_0_early_init(void *handle)
            (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
                return -ENOENT;
 
+       adev->vce.num_rings = 3;
+
        vce_v3_0_set_ring_funcs(adev);
        vce_v3_0_set_irq_funcs(adev);
 
@@ -409,7 +370,7 @@ static int vce_v3_0_sw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct amdgpu_ring *ring;
-       int r;
+       int r, i;
 
        /* VCE */
        r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
@@ -425,19 +386,14 @@ static int vce_v3_0_sw_init(void *handle)
        if (r)
                return r;
 
-       ring = &adev->vce.ring[0];
-       sprintf(ring->name, "vce0");
-       r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-       if (r)
-               return r;
-
-       ring = &adev->vce.ring[1];
-       sprintf(ring->name, "vce1");
-       r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-       if (r)
-               return r;
+       for (i = 0; i < adev->vce.num_rings; i++) {
+               ring = &adev->vce.ring[i];
+               sprintf(ring->name, "vce%d", i);
+               r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+                                    &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -467,10 +423,10 @@ static int vce_v3_0_hw_init(void *handle)
        if (r)
                return r;
 
-       adev->vce.ring[0].ready = false;
-       adev->vce.ring[1].ready = false;
+       for (i = 0; i < adev->vce.num_rings; i++)
+               adev->vce.ring[i].ready = false;
 
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < adev->vce.num_rings; i++) {
                r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
                if (r)
                        return r;
@@ -534,7 +490,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
        WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
        WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
        WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
-       WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+       WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
 
        WREG32(mmVCE_LMI_CTRL, 0x00398000);
        WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
@@ -573,9 +529,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
        }
 
        WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
-       WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
-                ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+       WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 }
 
 static bool vce_v3_0_is_idle(void *handle)
@@ -601,20 +555,108 @@ static int vce_v3_0_wait_for_idle(void *handle)
        return -ETIMEDOUT;
 }
 
+#define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
+#define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
+#define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
+#define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
+                                     VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
+
+static int vce_v3_0_check_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 srbm_soft_reset = 0;
+
+       /* According to VCE team , we should use VCE_STATUS instead
+        * SRBM_STATUS.VCE_BUSY bit for busy status checking.
+        * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
+        * instance's registers are accessed
+        * (0 for 1st instance, 10 for 2nd instance).
+        *
+        *VCE_STATUS
+        *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
+        *|----+----+-----------+----+----+----+----------+---------+----|
+        *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
+        *
+        * VCE team suggest use bit 3--bit 6 for busy status check
+        */
+       mutex_lock(&adev->grbm_idx_mutex);
+       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+       }
+       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+       if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+       }
+       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+
+       if (srbm_soft_reset) {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
+               adev->vce.srbm_soft_reset = srbm_soft_reset;
+       } else {
+               adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
+               adev->vce.srbm_soft_reset = 0;
+       }
+       mutex_unlock(&adev->grbm_idx_mutex);
+       return 0;
+}
+
 static int vce_v3_0_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       u32 mask = 0;
+       u32 srbm_soft_reset;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+               return 0;
+       srbm_soft_reset = adev->vce.srbm_soft_reset;
+
+       if (srbm_soft_reset) {
+               u32 tmp;
 
-       mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
-       mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+       }
+
+       return 0;
+}
+
+static int vce_v3_0_pre_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+               return 0;
 
-       WREG32_P(mmSRBM_SOFT_RESET, mask,
-                ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
-                  SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
        mdelay(5);
 
-       return vce_v3_0_start(adev);
+       return vce_v3_0_suspend(adev);
+}
+
+
+static int vce_v3_0_post_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+               return 0;
+
+       mdelay(5);
+
+       return vce_v3_0_resume(adev);
 }
 
 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -637,13 +679,12 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
 {
        DRM_DEBUG("IH: VCE\n");
 
-       WREG32_P(mmVCE_SYS_INT_STATUS,
-               VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
-               ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
+       WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
 
        switch (entry->src_data) {
        case 0:
        case 1:
+       case 2:
                amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
                break;
        default:
@@ -655,7 +696,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
        return 0;
 }
 
-static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
 {
        u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
 
@@ -674,8 +715,9 @@ static int vce_v3_0_set_clockgating_state(void *handle,
        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
        int i;
 
-       if (adev->asic_type == CHIP_POLARIS10)
-               vce_v3_set_bypass_mode(adev, enable);
+       if ((adev->asic_type == CHIP_POLARIS10) ||
+           (adev->asic_type == CHIP_TONGA))
+               vce_v3_0_set_bypass_mode(adev, enable);
 
        if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
                return 0;
@@ -686,13 +728,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                if (adev->vce.harvest_config & (1 << i))
                        continue;
 
-               if (i == 0)
-                       WREG32_P(mmGRBM_GFX_INDEX, 0,
-                                       ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-               else
-                       WREG32_P(mmGRBM_GFX_INDEX,
-                                       GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-                                       ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
 
                if (enable) {
                        /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -711,7 +747,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                vce_v3_0_set_vce_sw_clock_gating(adev, enable);
        }
 
-       WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -739,6 +775,60 @@ static int vce_v3_0_set_powergating_state(void *handle,
                return vce_v3_0_start(adev);
 }
 
+static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+               struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+       amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, ib->length_dw);
+}
+
+static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
+                        unsigned int vm_id, uint64_t pd_addr)
+{
+       amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
+       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
+       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, VCE_CMD_END);
+}
+
+static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, seq);
+}
+
+static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               5; /* vce_v3_0_ring_emit_ib */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               4 + /* vce_v3_0_emit_pipeline_sync */
+               6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+       return
+               6 + /* vce_v3_0_emit_vm_flush */
+               4 + /* vce_v3_0_emit_pipeline_sync */
+               6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
+}
+
 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
        .name = "vce_v3_0",
        .early_init = vce_v3_0_early_init,
@@ -751,12 +841,15 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
        .resume = vce_v3_0_resume,
        .is_idle = vce_v3_0_is_idle,
        .wait_for_idle = vce_v3_0_wait_for_idle,
+       .check_soft_reset = vce_v3_0_check_soft_reset,
+       .pre_soft_reset = vce_v3_0_pre_soft_reset,
        .soft_reset = vce_v3_0_soft_reset,
+       .post_soft_reset = vce_v3_0_post_soft_reset,
        .set_clockgating_state = vce_v3_0_set_clockgating_state,
        .set_powergating_state = vce_v3_0_set_powergating_state,
 };
 
-static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
+static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
        .get_rptr = vce_v3_0_ring_get_rptr,
        .get_wptr = vce_v3_0_ring_get_wptr,
        .set_wptr = vce_v3_0_ring_set_wptr,
@@ -769,12 +862,42 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_vce_ring_begin_use,
        .end_use = amdgpu_vce_ring_end_use,
+       .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
+};
+
+static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+       .get_rptr = vce_v3_0_ring_get_rptr,
+       .get_wptr = vce_v3_0_ring_get_wptr,
+       .set_wptr = vce_v3_0_ring_set_wptr,
+       .parse_cs = NULL,
+       .emit_ib = vce_v3_0_ring_emit_ib,
+       .emit_vm_flush = vce_v3_0_emit_vm_flush,
+       .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
+       .emit_fence = amdgpu_vce_ring_emit_fence,
+       .test_ring = amdgpu_vce_ring_test_ring,
+       .test_ib = amdgpu_vce_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_vce_ring_begin_use,
+       .end_use = amdgpu_vce_ring_end_use,
+       .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
-       adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
+       int i;
+
+       if (adev->asic_type >= CHIP_STONEY) {
+               for (i = 0; i < adev->vce.num_rings; i++)
+                       adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+               DRM_INFO("VCE enabled in VM mode\n");
+       } else {
+               for (i = 0; i < adev->vce.num_rings; i++)
+                       adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+               DRM_INFO("VCE enabled in physical mode\n");
+       }
 }
 
 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
index 03a31c53aec3d18e67b42cbadb4ab30f9e82c766..c0d9aad7126f4a16e067e19e76067d0a3248e8d9 100644 (file)
 #if defined(CONFIG_DRM_AMD_ACP)
 #include "amdgpu_acp.h"
 #endif
+#include "dce_virtual.h"
 
+MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
@@ -444,18 +448,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
-static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
 {
-       u32 caps = 0;
-       u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
-
-       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
-               caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
-
-       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
-               caps |= AMDGPU_VIRT_CAPS_IS_VF;
-
-       return caps;
+       uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+       /* bit0: 0 means pf and 1 means vf */
+       /* bit31: 0 means disable IOV and 1 means enable */
+       if (reg & 1)
+               adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+       if (reg & 0x80000000)
+               adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+       if (reg == 0) {
+               if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
+                       adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+       }
 }
 
 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
@@ -822,6 +829,60 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vi_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 7,
+               .minor = 4,
+               .rev = 0,
+               .funcs = &gmc_v7_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 2,
+               .minor = 4,
+               .rev = 0,
+               .funcs = &iceland_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 2,
+               .minor = 4,
+               .rev = 0,
+               .funcs = &sdma_v2_4_ip_funcs,
+       },
+};
+
 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -890,6 +951,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vi_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &tonga_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 10,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &sdma_v3_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 5,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &uvd_v5_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v3_0_ip_funcs,
+       },
+};
+
 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -958,6 +1087,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vi_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 8,
+               .minor = 5,
+               .rev = 0,
+               .funcs = &gmc_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &tonga_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 10,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &sdma_v3_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &uvd_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v3_0_ip_funcs,
+       },
+};
+
 static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -1026,6 +1223,74 @@ static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
        },
 };
 
+static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vi_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 8,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &gmc_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 3,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &tonga_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 11,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 3,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &sdma_v3_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 6,
+               .minor = 3,
+               .rev = 0,
+               .funcs = &uvd_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 3,
+               .minor = 4,
+               .rev = 0,
+               .funcs = &vce_v3_0_ip_funcs,
+       },
+};
+
 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 {
        /* ORDER MATTERS! */
@@ -1103,34 +1368,142 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 #endif
 };
 
+static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vi_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cz_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 11,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &dce_virtual_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v8_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &sdma_v3_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &uvd_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v3_0_ip_funcs,
+       },
+#if defined(CONFIG_DRM_AMD_ACP)
+       {
+               .type = AMD_IP_BLOCK_TYPE_ACP,
+               .major = 2,
+               .minor = 2,
+               .rev = 0,
+               .funcs = &acp_ip_funcs,
+       },
+#endif
+};
+
 int vi_set_ip_blocks(struct amdgpu_device *adev)
 {
-       switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               adev->ip_blocks = topaz_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
-               break;
-       case CHIP_FIJI:
-               adev->ip_blocks = fiji_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
-               break;
-       case CHIP_TONGA:
-               adev->ip_blocks = tonga_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
-               break;
-       case CHIP_POLARIS11:
-       case CHIP_POLARIS10:
-               adev->ip_blocks = polaris11_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
-               break;
-       case CHIP_CARRIZO:
-       case CHIP_STONEY:
-               adev->ip_blocks = cz_ip_blocks;
-               adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
-               break;
-       default:
-               /* FIXME: not supported yet */
-               return -EINVAL;
+       if (adev->enable_virtual_display) {
+               switch (adev->asic_type) {
+               case CHIP_TOPAZ:
+                       adev->ip_blocks = topaz_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
+                       break;
+               case CHIP_FIJI:
+                       adev->ip_blocks = fiji_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
+                       break;
+               case CHIP_TONGA:
+                       adev->ip_blocks = tonga_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
+                       break;
+               case CHIP_POLARIS11:
+               case CHIP_POLARIS10:
+                       adev->ip_blocks = polaris11_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
+                       break;
+
+               case CHIP_CARRIZO:
+               case CHIP_STONEY:
+                       adev->ip_blocks = cz_ip_blocks_vd;
+                       adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
+                       break;
+               default:
+                       /* FIXME: not supported yet */
+                       return -EINVAL;
+               }
+       } else {
+               switch (adev->asic_type) {
+               case CHIP_TOPAZ:
+                       adev->ip_blocks = topaz_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
+                       break;
+               case CHIP_FIJI:
+                       adev->ip_blocks = fiji_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+                       break;
+               case CHIP_TONGA:
+                       adev->ip_blocks = tonga_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+                       break;
+               case CHIP_POLARIS11:
+               case CHIP_POLARIS10:
+                       adev->ip_blocks = polaris11_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
+                       break;
+               case CHIP_CARRIZO:
+               case CHIP_STONEY:
+                       adev->ip_blocks = cz_ip_blocks;
+                       adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+                       break;
+               default:
+                       /* FIXME: not supported yet */
+                       return -EINVAL;
+               }
        }
 
        return 0;
@@ -1154,13 +1527,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
 {
        .read_disabled_bios = &vi_read_disabled_bios,
        .read_bios_from_rom = &vi_read_bios_from_rom,
+       .detect_hw_virtualization = vi_detect_hw_virtualization,
        .read_register = &vi_read_register,
        .reset = &vi_asic_reset,
        .set_vga_state = &vi_vga_set_state,
        .get_xclk = &vi_get_xclk,
        .set_uvd_clocks = &vi_set_uvd_clocks,
        .set_vce_clocks = &vi_set_vce_clocks,
-       .get_virtual_caps = &vi_get_virtual_caps,
 };
 
 static int vi_common_early_init(void *handle)
@@ -1248,8 +1621,17 @@ static int vi_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_MGCG |
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
-                       AMD_CG_SUPPORT_SDMA_LS;
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_VCE_MGCG;
+               /* rev0 hardware requires workarounds to support PG */
                adev->pg_flags = 0;
+               if (adev->rev_id != 0x00) {
+                       adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+                               AMD_PG_SUPPORT_GFX_SMG |
+                               AMD_PG_SUPPORT_GFX_PIPELINE |
+                               AMD_PG_SUPPORT_UVD |
+                               AMD_PG_SUPPORT_VCE;
+               }
                adev->external_rev_id = adev->rev_id + 0x1;
                break;
        case CHIP_STONEY:
@@ -1267,14 +1649,24 @@ static int vi_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_MGCG |
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
-                       AMD_CG_SUPPORT_SDMA_LS;
-               adev->external_rev_id = adev->rev_id + 0x1;
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_VCE_MGCG;
+               adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+                       AMD_PG_SUPPORT_GFX_SMG |
+                       AMD_PG_SUPPORT_GFX_PIPELINE |
+                       AMD_PG_SUPPORT_UVD |
+                       AMD_PG_SUPPORT_VCE;
+               adev->external_rev_id = adev->rev_id + 0x61;
                break;
        default:
                /* FIXME: not supported yet */
                return -EINVAL;
        }
 
+       /* in early init stage, vbios code won't work */
+       if (adev->asic_funcs->detect_hw_virtualization)
+               amdgpu_asic_detect_hw_virtualization(adev);
+
        if (amdgpu_smc_load_fw && smc_enabled)
                adev->firmware.smu_load = true;
 
@@ -1418,6 +1810,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
                WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
 }
 
+static int vi_common_set_clockgating_state_by_smu(void *handle,
+                                          enum amd_clockgating_state state)
+{
+       uint32_t msg_id, pp_state;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       void *pp_handle = adev->powerplay.pp_handle;
+
+       if (state == AMD_CG_STATE_UNGATE)
+               pp_state = 0;
+       else
+               pp_state = PP_STATE_CG | PP_STATE_LS;
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                      PP_BLOCK_SYS_MC,
+                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                      pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                      PP_BLOCK_SYS_SDMA,
+                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                      pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                      PP_BLOCK_SYS_HDP,
+                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                      pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                      PP_BLOCK_SYS_BIF,
+                      PP_STATE_SUPPORT_LS,
+                      pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                      PP_BLOCK_SYS_BIF,
+                      PP_STATE_SUPPORT_CG,
+                      pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                      PP_BLOCK_SYS_DRM,
+                      PP_STATE_SUPPORT_LS,
+                      pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                      PP_BLOCK_SYS_ROM,
+                      PP_STATE_SUPPORT_CG,
+                      pp_state);
+       amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+       return 0;
+}
+
 static int vi_common_set_clockgating_state(void *handle,
                                           enum amd_clockgating_state state)
 {
@@ -1443,6 +1892,10 @@ static int vi_common_set_clockgating_state(void *handle,
                vi_update_hdp_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                break;
+       case CHIP_TONGA:
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+               vi_common_set_clockgating_state_by_smu(adev, state);
        default:
                break;
        }
index 062ee16764800908aca8003b7a0cda6631da212a..11746f22d0c5f9e0e2b3becf23af09b8b472f4cf 100644 (file)
 #define VCE_CMD_IB_AUTO        0x00000005
 #define VCE_CMD_SEMAPHORE      0x00000006
 
+#define VCE_CMD_IB_VM           0x00000102
+#define VCE_CMD_WAIT_GE         0x00000106
+#define VCE_CMD_UPDATE_PTB      0x00000107
+#define VCE_CMD_FLUSH_TLB       0x00000108
+
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x)                         ((x) << 0)
+#define RB_MAP_PKR0_MASK                       (0x3 << 0)
+#define RB_MAP_PKR1(x)                         ((x) << 2)
+#define RB_MAP_PKR1_MASK                       (0x3 << 2)
+#define RB_XSEL2(x)                            ((x) << 4)
+#define RB_XSEL2_MASK                          (0x3 << 4)
+#define RB_XSEL                                        (1 << 6)
+#define RB_YSEL                                        (1 << 7)
+#define PKR_MAP(x)                             ((x) << 8)
+#define PKR_MAP_MASK                           (0x3 << 8)
+#define PKR_XSEL(x)                            ((x) << 10)
+#define PKR_XSEL_MASK                          (0x3 << 10)
+#define PKR_YSEL(x)                            ((x) << 12)
+#define PKR_YSEL_MASK                          (0x3 << 12)
+#define SC_MAP(x)                              ((x) << 16)
+#define SC_MAP_MASK                            (0x3 << 16)
+#define SC_XSEL(x)                             ((x) << 18)
+#define SC_XSEL_MASK                           (0x3 << 18)
+#define SC_YSEL(x)                             ((x) << 20)
+#define SC_YSEL_MASK                           (0x3 << 20)
+#define SE_MAP(x)                              ((x) << 24)
+#define SE_MAP_MASK                            (0x3 << 24)
+#define SE_XSEL(x)                             ((x) << 26)
+#define SE_XSEL_MASK                           (0x3 << 26)
+#define SE_YSEL(x)                             ((x) << 28)
+#define SE_YSEL_MASK                           (0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x)                         ((x) << 0)
+#define SE_PAIR_MAP_MASK                       (0x3 << 0)
+#define SE_PAIR_XSEL(x)                                ((x) << 2)
+#define SE_PAIR_XSEL_MASK                      (0x3 << 2)
+#define SE_PAIR_YSEL(x)                                ((x) << 4)
+#define SE_PAIR_YSEL_MASK                      (0x3 << 4)
+
 #endif
index e621eba63126a09dc65f33ee59dd6bf47785e06d..453c5d66e5c34abf0e6e3f24bbebcacdf2fcca49 100644 (file)
@@ -142,13 +142,15 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-       pr_debug("mapping doorbell page:\n");
-       pr_debug("     target user address == 0x%08llX\n",
-                       (unsigned long long) vma->vm_start);
-       pr_debug("     physical address    == 0x%08llX\n", address);
-       pr_debug("     vm_flags            == 0x%04lX\n", vma->vm_flags);
-       pr_debug("     size                == 0x%04lX\n",
-                        doorbell_process_allocation());
+       pr_debug("kfd: mapping doorbell page in %s\n"
+                "     target user address == 0x%08llX\n"
+                "     physical address    == 0x%08llX\n"
+                "     vm_flags            == 0x%04lX\n"
+                "     size                == 0x%04lX\n",
+                __func__,
+                (unsigned long long) vma->vm_start, address, vma->vm_flags,
+                doorbell_process_allocation());
+
 
        return io_remap_pfn_range(vma,
                                vma->vm_start,
@@ -184,7 +186,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
                                                        sizeof(u32)) + inx;
 
        pr_debug("kfd: get kernel queue doorbell\n"
-                        "     doorbell offset   == 0x%08d\n"
+                        "     doorbell offset   == 0x%08X\n"
                         "     kernel address    == 0x%08lX\n",
                *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
 
index 9beae87aadd54a418ebf5033b86eff2665d7ea5f..d135cd002a951edc873cfaab44be77fa4db69aec 100644 (file)
@@ -47,6 +47,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
        pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
                        __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
 
+       memset(&prop, 0, sizeof(prop));
+       memset(&nop, 0, sizeof(nop));
+
        nop.opcode = IT_NOP;
        nop.type = PM4_TYPE_3;
        nop.u32all |= PM4_COUNT_ZERO;
@@ -121,7 +124,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
        prop.eop_ring_buffer_address = kq->eop_gpu_addr;
        prop.eop_ring_buffer_size = PAGE_SIZE;
 
-       if (init_queue(&kq->queue, prop) != 0)
+       if (init_queue(&kq->queue, &prop) != 0)
                goto err_init_queue;
 
        kq->queue->device = dev;
index 80113c335966889afbe6c9a8c58f7e219b18e418..4750cabe42521c29338ff54f0de559c9321589d9 100644 (file)
@@ -619,7 +619,7 @@ int kfd_init_apertures(struct kfd_process *process);
 /* Queue Context Management */
 struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
 
-int init_queue(struct queue **q, struct queue_properties properties);
+int init_queue(struct queue **q, const struct queue_properties *properties);
 void uninit_queue(struct queue *q);
 void print_queue_properties(struct queue_properties *q);
 void print_queue(struct queue *q);
index 4f3849ac8c0770d1dbde09f9d6e371fb24c01e94..ef7c8de7060e2dab64b7f7739b8c8286ea741711 100644 (file)
@@ -404,58 +404,47 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
 {
        struct kfd_process *p;
        struct kfd_process_device *pdd;
-       int idx, i;
 
        BUG_ON(dev == NULL);
 
-       idx = srcu_read_lock(&kfd_processes_srcu);
-
        /*
         * Look for the process that matches the pasid. If there is no such
         * process, we either released it in amdkfd's own notifier, or there
         * is a bug. Unfortunately, there is no way to tell...
         */
-       hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
-               if (p->pasid == pasid) {
-
-                       srcu_read_unlock(&kfd_processes_srcu, idx);
-
-                       pr_debug("Unbinding process %d from IOMMU\n", pasid);
+       p = kfd_lookup_process_by_pasid(pasid);
+       if (!p)
+               return;
 
-                       mutex_lock(&p->mutex);
-
-                       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
-                               kfd_dbgmgr_destroy(dev->dbgmgr);
-
-                       pqm_uninit(&p->pqm);
+       pr_debug("Unbinding process %d from IOMMU\n", pasid);
 
-                       pdd = kfd_get_process_device_data(dev, p);
+       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+               kfd_dbgmgr_destroy(dev->dbgmgr);
 
-                       if (!pdd) {
-                               mutex_unlock(&p->mutex);
-                               return;
-                       }
+       pqm_uninit(&p->pqm);
 
-                       if (pdd->reset_wavefronts) {
-                               dbgdev_wave_reset_wavefronts(pdd->dev, p);
-                               pdd->reset_wavefronts = false;
-                       }
+       pdd = kfd_get_process_device_data(dev, p);
 
-                       /*
-                        * Just mark pdd as unbound, because we still need it
-                        * to call amd_iommu_unbind_pasid() in when the
-                        * process exits.
-                        * We don't call amd_iommu_unbind_pasid() here
-                        * because the IOMMU called us.
-                        */
-                       pdd->bound = false;
+       if (!pdd) {
+               mutex_unlock(&p->mutex);
+               return;
+       }
 
-                       mutex_unlock(&p->mutex);
+       if (pdd->reset_wavefronts) {
+               dbgdev_wave_reset_wavefronts(pdd->dev, p);
+               pdd->reset_wavefronts = false;
+       }
 
-                       return;
-               }
+       /*
+        * Just mark pdd as unbound, because we still need it
+        * to call amd_iommu_unbind_pasid() in when the
+        * process exits.
+        * We don't call amd_iommu_unbind_pasid() here
+        * because the IOMMU called us.
+        */
+       pdd->bound = false;
 
-       srcu_read_unlock(&kfd_processes_srcu, idx);
+       mutex_unlock(&p->mutex);
 }
 
 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
index 7b69070f7ecc5eb58833f0ff828f7a934f7bd1d6..e1fb40b84c72f84f48a13be1e2b467d6dff80bb0 100644 (file)
@@ -129,7 +129,7 @@ static int create_cp_queue(struct process_queue_manager *pqm,
        q_properties->vmid = 0;
        q_properties->queue_id = qid;
 
-       retval = init_queue(q, *q_properties);
+       retval = init_queue(q, q_properties);
        if (retval != 0)
                goto err_init_queue;
 
index 9a0c90b0702eef4ced8ff7cc9ec2ab2cb23b57c8..0ab197077f2dc03e18a6e96ea5ddc179c8d0d498 100644 (file)
@@ -63,7 +63,7 @@ void print_queue(struct queue *q)
        pr_debug("Queue Device Address: 0x%p\n", q->device);
 }
 
-int init_queue(struct queue **q, struct queue_properties properties)
+int init_queue(struct queue **q, const struct queue_properties *properties)
 {
        struct queue *tmp;
 
@@ -73,7 +73,7 @@ int init_queue(struct queue **q, struct queue_properties properties)
        if (!tmp)
                return -ENOMEM;
 
-       memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
+       memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
 
        *q = tmp;
        return 0;
index 884c96f50c3d9b62ccc4c9f81e20570d41d72f46..1e50647499593927f6238602ec7ba0efce216eb4 100644 (file)
@@ -1090,19 +1090,21 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
 {
        uint32_t hashout;
        uint32_t buf[7];
+       uint64_t local_mem_size;
        int i;
 
        if (!gpu)
                return 0;
 
+       local_mem_size = gpu->kfd2kgd->get_vmem_size(gpu->kgd);
+
        buf[0] = gpu->pdev->devfn;
        buf[1] = gpu->pdev->subsystem_vendor;
        buf[2] = gpu->pdev->subsystem_device;
        buf[3] = gpu->pdev->device;
        buf[4] = gpu->pdev->bus->number;
-       buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
-                       & 0xffffffff);
-       buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+       buf[5] = lower_32_bits(local_mem_size);
+       buf[6] = upper_32_bits(local_mem_size);
 
        for (i = 0, hashout = 0; i < 7; i++)
                hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
index a74a0d2ff1cac6008be2b634184f9edc42185b10..c934b78c9e2f056b9ba20ec9f2b182cfa1910e74 100644 (file)
  * Supported ASIC types
  */
 enum amd_asic_type {
-       CHIP_BONAIRE = 0,
+       CHIP_TAHITI = 0,
+       CHIP_PITCAIRN,
+       CHIP_VERDE,
+       CHIP_OLAND,
+       CHIP_HAINAN,
+       CHIP_BONAIRE,
        CHIP_KAVERI,
        CHIP_KABINI,
        CHIP_HAWAII,
@@ -159,8 +164,14 @@ struct amd_ip_funcs {
        bool (*is_idle)(void *handle);
        /* poll for idle */
        int (*wait_for_idle)(void *handle);
+       /* check soft reset the IP block */
+       int (*check_soft_reset)(void *handle);
+       /* pre soft reset the IP block */
+       int (*pre_soft_reset)(void *handle);
        /* soft reset the IP block */
        int (*soft_reset)(void *handle);
+       /* post soft reset the IP block */
+       int (*post_soft_reset)(void *handle);
        /* enable/disable cg for the IP block */
        int (*set_clockgating_state)(void *handle,
                                     enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
new file mode 100644 (file)
index 0000000..66e39cd
--- /dev/null
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 si_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const u32 si_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_WAVE_MGMT_1
+    0x00000000, // SPI_WAVE_MGMT_2
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 si_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const u32 si_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0, // HOLE
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const u32 si_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const u32 si_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const u32 si_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0, // HOLE
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
+{
+    {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+    {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
+    {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { NULL, 0, 0 }
+};
+static const struct cs_section_def si_cs_data[] = {
+    { si_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
new file mode 100644 (file)
index 0000000..895c8e2
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK                      0x196c
+#define SI_DC_GPIO_HPD_A                         0x196d
+#define SI_DC_GPIO_HPD_EN                        0x196e
+#define SI_DC_GPIO_HPD_Y                         0x196f
+
+#define SI_GRPH_CONTROL                          0x1a01
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)           (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2                  0
+#       define SI_ADDR_SURF_P4_8x16             4
+#       define SI_ADDR_SURF_P4_16x16            5
+#       define SI_ADDR_SURF_P4_16x32            6
+#       define SI_ADDR_SURF_P4_32x32            7
+#       define SI_ADDR_SURF_P8_16x16_8x16       8
+#       define SI_ADDR_SURF_P8_16x32_8x16       9
+#       define SI_ADDR_SURF_P8_32x32_8x16       10
+#       define SI_ADDR_SURF_P8_16x32_16x16      11
+#       define SI_ADDR_SURF_P8_32x32_16x16      12
+#       define SI_ADDR_SURF_P8_32x32_16x32      13
+#       define SI_ADDR_SURF_P8_32x64_32x32      14
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
new file mode 100644 (file)
index 0000000..c57eff1
--- /dev/null
@@ -0,0 +1,2461 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
+
+#define SI_MAX_SH_GPRS                 256
+#define SI_MAX_TEMP_GPRS               16
+#define SI_MAX_SH_THREADS              256
+#define SI_MAX_SH_STACK_ENTRIES        4096
+#define SI_MAX_FRC_EOV_CNT             16384
+#define SI_MAX_BACKENDS                8
+#define SI_MAX_BACKENDS_MASK           0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK     0x0F
+#define SI_MAX_SIMDS                   12
+#define SI_MAX_SIMDS_MASK              0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK        0x00FF
+#define SI_MAX_PIPES                   8
+#define SI_MAX_PIPES_MASK              0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK      0x3F
+#define SI_MAX_LDS_NUM                 0xFFFF
+#define SI_MAX_TCC                     16
+#define SI_MAX_TCC_MASK                0xFFFF
+
+#define AMDGPU_NUM_OF_VMIDS            8
+
+/* SMC IND accessor regs */
+#define SMC_IND_INDEX_0                              0x80
+#define SMC_IND_DATA_0                               0x81
+
+#define SMC_IND_ACCESS_CNTL                          0x8A
+#       define AUTO_INCREMENT_IND_0                  (1 << 0)
+#define SMC_MESSAGE_0                                0x8B
+#define SMC_RESP_0                                   0x8C
+
+/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
+#define SMC_CG_IND_START                    0xc0030000
+#define SMC_CG_IND_END                      0xc0040000
+
+#define        CG_CGTT_LOCAL_0                         0x400
+#define        CG_CGTT_LOCAL_1                         0x401
+
+/* SMC IND registers */
+#define        SMC_SYSCON_RESET_CNTL                           0x80000000
+#       define RST_REG                                  (1 << 0)
+#define        SMC_SYSCON_CLOCK_CNTL_0                         0x80000004
+#       define CK_DISABLE                               (1 << 0)
+#       define CKEN                                     (1 << 24)
+
+#define VGA_HDP_CONTROL                                0xCA
+#define                VGA_MEMORY_DISABLE                              (1 << 4)
+
+#define DCCG_DISP_SLOW_SELECT_REG                       0x13F
+#define                DCCG_DISP1_SLOW_SELECT(x)               ((x) << 0)
+#define                DCCG_DISP1_SLOW_SELECT_MASK             (7 << 0)
+#define                DCCG_DISP1_SLOW_SELECT_SHIFT            0
+#define                DCCG_DISP2_SLOW_SELECT(x)               ((x) << 4)
+#define                DCCG_DISP2_SLOW_SELECT_MASK             (7 << 4)
+#define                DCCG_DISP2_SLOW_SELECT_SHIFT            4
+
+#define        CG_SPLL_FUNC_CNTL                               0x180
+#define                SPLL_RESET                              (1 << 0)
+#define                SPLL_SLEEP                              (1 << 1)
+#define                SPLL_BYPASS_EN                          (1 << 3)
+#define                SPLL_REF_DIV(x)                         ((x) << 4)
+#define                SPLL_REF_DIV_MASK                       (0x3f << 4)
+#define                SPLL_PDIV_A(x)                          ((x) << 20)
+#define                SPLL_PDIV_A_MASK                        (0x7f << 20)
+#define                SPLL_PDIV_A_SHIFT                       20
+#define        CG_SPLL_FUNC_CNTL_2                             0x181
+#define                SCLK_MUX_SEL(x)                         ((x) << 0)
+#define                SCLK_MUX_SEL_MASK                       (0x1ff << 0)
+#define                SPLL_CTLREQ_CHG                         (1 << 23)
+#define                SCLK_MUX_UPDATE                         (1 << 26)
+#define        CG_SPLL_FUNC_CNTL_3                             0x182
+#define                SPLL_FB_DIV(x)                          ((x) << 0)
+#define                SPLL_FB_DIV_MASK                        (0x3ffffff << 0)
+#define                SPLL_FB_DIV_SHIFT                       0
+#define                SPLL_DITHEN                             (1 << 28)
+#define        CG_SPLL_FUNC_CNTL_4                             0x183
+
+#define        SPLL_STATUS                                     0x185
+#define                SPLL_CHG_STATUS                         (1 << 1)
+#define        SPLL_CNTL_MODE                                  0x186
+#define                SPLL_SW_DIR_CONTROL                     (1 << 0)
+#      define SPLL_REFCLK_SEL(x)                       ((x) << 26)
+#      define SPLL_REFCLK_SEL_MASK                     (3 << 26)
+
+#define        CG_SPLL_SPREAD_SPECTRUM                         0x188
+#define                SSEN                                    (1 << 0)
+#define                CLK_S(x)                                ((x) << 4)
+#define                CLK_S_MASK                              (0xfff << 4)
+#define                CLK_S_SHIFT                             4
+#define        CG_SPLL_SPREAD_SPECTRUM_2                       0x189
+#define                CLK_V(x)                                ((x) << 0)
+#define                CLK_V_MASK                              (0x3ffffff << 0)
+#define                CLK_V_SHIFT                             0
+
+#define        CG_SPLL_AUTOSCALE_CNTL                          0x18b
+#       define AUTOSCALE_ON_SS_CLEAR                    (1 << 9)
+
+/* discrete uvd clocks */
+#define        CG_UPLL_FUNC_CNTL                               0x18d
+#      define UPLL_RESET_MASK                          0x00000001
+#      define UPLL_SLEEP_MASK                          0x00000002
+#      define UPLL_BYPASS_EN_MASK                      0x00000004
+#      define UPLL_CTLREQ_MASK                         0x00000008
+#      define UPLL_VCO_MODE_MASK                       0x00000600
+#      define UPLL_REF_DIV_MASK                        0x003F0000
+#      define UPLL_CTLACK_MASK                         0x40000000
+#      define UPLL_CTLACK2_MASK                        0x80000000
+#define        CG_UPLL_FUNC_CNTL_2                             0x18e
+#      define UPLL_PDIV_A(x)                           ((x) << 0)
+#      define UPLL_PDIV_A_MASK                         0x0000007F
+#      define UPLL_PDIV_B(x)                           ((x) << 8)
+#      define UPLL_PDIV_B_MASK                         0x00007F00
+#      define VCLK_SRC_SEL(x)                          ((x) << 20)
+#      define VCLK_SRC_SEL_MASK                        0x01F00000
+#      define DCLK_SRC_SEL(x)                          ((x) << 25)
+#      define DCLK_SRC_SEL_MASK                        0x3E000000
+#define        CG_UPLL_FUNC_CNTL_3                             0x18f
+#      define UPLL_FB_DIV(x)                           ((x) << 0)
+#      define UPLL_FB_DIV_MASK                         0x01FFFFFF
+#define        CG_UPLL_FUNC_CNTL_4                             0x191
+#      define UPLL_SPARE_ISPARE9                       0x00020000
+#define        CG_UPLL_FUNC_CNTL_5                             0x192
+#      define RESET_ANTI_MUX_MASK                      0x00000200
+#define        CG_UPLL_SPREAD_SPECTRUM                         0x194
+#      define SSEN_MASK                                0x00000001
+
+#define        MPLL_BYPASSCLK_SEL                              0x197
+#      define MPLL_CLKOUT_SEL(x)                       ((x) << 8)
+#      define MPLL_CLKOUT_SEL_MASK                     0xFF00
+
+#define CG_CLKPIN_CNTL                                    0x198
+#       define XTALIN_DIVIDE                              (1 << 1)
+#       define BCLK_AS_XCLK                               (1 << 2)
+#define CG_CLKPIN_CNTL_2                                  0x199
+#       define FORCE_BIF_REFCLK_EN                        (1 << 3)
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+
+#define        THM_CLK_CNTL                                    0x19b
+#      define CMON_CLK_SEL(x)                          ((x) << 0)
+#      define CMON_CLK_SEL_MASK                        0xFF
+#      define TMON_CLK_SEL(x)                          ((x) << 8)
+#      define TMON_CLK_SEL_MASK                        0xFF00
+#define        MISC_CLK_CNTL                                   0x19c
+#      define DEEP_SLEEP_CLK_SEL(x)                    ((x) << 0)
+#      define DEEP_SLEEP_CLK_SEL_MASK                  0xFF
+#      define ZCLK_SEL(x)                              ((x) << 8)
+#      define ZCLK_SEL_MASK                            0xFF00
+
+#define        CG_THERMAL_CTRL                                 0x1c0
+#define        DPM_EVENT_SRC(x)                        ((x) << 0)
+#define        DPM_EVENT_SRC_MASK                      (7 << 0)
+#define                DIG_THERM_DPM(x)                        ((x) << 14)
+#define                DIG_THERM_DPM_MASK                      0x003FC000
+#define                DIG_THERM_DPM_SHIFT                     14
+#define        CG_THERMAL_STATUS                               0x1c1
+#define                FDO_PWM_DUTY(x)                         ((x) << 9)
+#define                FDO_PWM_DUTY_MASK                       (0xff << 9)
+#define                FDO_PWM_DUTY_SHIFT                      9
+#define        CG_THERMAL_INT                                  0x1c2
+#define                DIG_THERM_INTH(x)                       ((x) << 8)
+#define                DIG_THERM_INTH_MASK                     0x0000FF00
+#define                DIG_THERM_INTH_SHIFT                    8
+#define                DIG_THERM_INTL(x)                       ((x) << 16)
+#define                DIG_THERM_INTL_MASK                     0x00FF0000
+#define                DIG_THERM_INTL_SHIFT                    16
+#define        THERM_INT_MASK_HIGH                     (1 << 24)
+#define        THERM_INT_MASK_LOW                      (1 << 25)
+
+#define        CG_MULT_THERMAL_CTRL                                    0x1c4
+#define                TEMP_SEL(x)                                     ((x) << 20)
+#define                TEMP_SEL_MASK                                   (0xff << 20)
+#define                TEMP_SEL_SHIFT                                  20
+#define        CG_MULT_THERMAL_STATUS                                  0x1c5
+#define                ASIC_MAX_TEMP(x)                                ((x) << 0)
+#define                ASIC_MAX_TEMP_MASK                              0x000001ff
+#define                ASIC_MAX_TEMP_SHIFT                             0
+#define                CTF_TEMP(x)                                     ((x) << 9)
+#define                CTF_TEMP_MASK                                   0x0003fe00
+#define                CTF_TEMP_SHIFT                                  9
+
+#define        CG_FDO_CTRL0                                    0x1d5
+#define                FDO_STATIC_DUTY(x)                      ((x) << 0)
+#define                FDO_STATIC_DUTY_MASK                    0x000000FF
+#define                FDO_STATIC_DUTY_SHIFT                   0
+#define        CG_FDO_CTRL1                                    0x1d6
+#define                FMAX_DUTY100(x)                         ((x) << 0)
+#define                FMAX_DUTY100_MASK                       0x000000FF
+#define                FMAX_DUTY100_SHIFT                      0
+#define        CG_FDO_CTRL2                                    0x1d7
+#define                TMIN(x)                                 ((x) << 0)
+#define                TMIN_MASK                               0x000000FF
+#define                TMIN_SHIFT                              0
+#define                FDO_PWM_MODE(x)                         ((x) << 11)
+#define                FDO_PWM_MODE_MASK                       (7 << 11)
+#define                FDO_PWM_MODE_SHIFT                      11
+#define                TACH_PWM_RESP_RATE(x)                   ((x) << 25)
+#define                TACH_PWM_RESP_RATE_MASK                 (0x7f << 25)
+#define                TACH_PWM_RESP_RATE_SHIFT                25
+
+#define CG_TACH_CTRL                                    0x1dc
+#       define EDGE_PER_REV(x)                          ((x) << 0)
+#       define EDGE_PER_REV_MASK                        (0x7 << 0)
+#       define EDGE_PER_REV_SHIFT                       0
+#       define TARGET_PERIOD(x)                         ((x) << 3)
+#       define TARGET_PERIOD_MASK                       0xfffffff8
+#       define TARGET_PERIOD_SHIFT                      3
+#define CG_TACH_STATUS                                  0x1dd
+#       define TACH_PERIOD(x)                           ((x) << 0)
+#       define TACH_PERIOD_MASK                         0xffffffff
+#       define TACH_PERIOD_SHIFT                        0
+
+#define GENERAL_PWRMGT                                  0x1e0
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (1 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#define CG_TPC                                            0x1e1
+#define SCLK_PWRMGT_CNTL                                  0x1e2
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+#       define DYN_LIGHT_SLEEP_EN                         (1 << 14)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x1e6
+#       define CURRENT_STATE_INDEX_MASK                   (0xf << 4)
+#       define CURRENT_STATE_INDEX_SHIFT                  4
+
+#define CG_FTV                                            0x1ef
+
+#define CG_FFCT_0                                         0x1f0
+#       define UTC_0(x)                                   ((x) << 0)
+#       define UTC_0_MASK                                 (0x3ff << 0)
+#       define DTC_0(x)                                   ((x) << 10)
+#       define DTC_0_MASK                                 (0x3ff << 10)
+
+#define CG_BSP                                          0x1ff
+#       define BSP(x)                                  ((x) << 0)
+#       define BSP_MASK                                        (0xffff << 0)
+#       define BSU(x)                                  ((x) << 16)
+#       define BSU_MASK                                        (0xf << 16)
+#define CG_AT                                           0x200
+#       define CG_R(x)                                 ((x) << 0)
+#       define CG_R_MASK                               (0xffff << 0)
+#       define CG_L(x)                                 ((x) << 16)
+#       define CG_L_MASK                               (0xffff << 16)
+
+#define CG_GIT                                          0x201
+#       define CG_GICST(x)                              ((x) << 0)
+#       define CG_GICST_MASK                            (0xffff << 0)
+#       define CG_GIPOT(x)                              ((x) << 16)
+#       define CG_GIPOT_MASK                            (0xffff << 16)
+
+#define CG_SSP                                            0x203
+#       define SST(x)                                     ((x) << 0)
+#       define SST_MASK                                   (0xffff << 0)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL                               0x20a
+#       define DISP1_GAP(x)                               ((x) << 0)
+#       define DISP1_GAP_MASK                             (3 << 0)
+#       define DISP2_GAP(x)                               ((x) << 2)
+#       define DISP2_GAP_MASK                             (3 << 2)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP1_GAP_MCHG(x)                          ((x) << 24)
+#       define DISP1_GAP_MCHG_MASK                        (3 << 24)
+#       define DISP2_GAP_MCHG(x)                          ((x) << 26)
+#       define DISP2_GAP_MCHG_MASK                        (3 << 26)
+
+#define        CG_ULV_CONTROL                                  0x21e
+#define        CG_ULV_PARAMETER                                0x21f
+
+#define        SMC_SCRATCH0                                    0x221
+
+#define        CG_CAC_CTRL                                     0x22e
+#      define CAC_WINDOW(x)                            ((x) << 0)
+#      define CAC_WINDOW_MASK                          0x00ffffff
+
+#define DMIF_ADDR_CONFIG                               0x2F5
+
+#define DMIF_ADDR_CALC                                 0x300
+
+#define        PIPE0_DMIF_BUFFER_CONTROL                         0x0328
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define        SRBM_STATUS                                     0x394
+#define                GRBM_RQ_PENDING                         (1 << 5)
+#define                VMC_BUSY                                (1 << 8)
+#define                MCB_BUSY                                (1 << 9)
+#define                MCB_NON_DISPLAY_BUSY                    (1 << 10)
+#define                MCC_BUSY                                (1 << 11)
+#define                MCD_BUSY                                (1 << 12)
+#define                SEM_BUSY                                (1 << 14)
+#define                IH_BUSY                                 (1 << 17)
+
+#define        SRBM_SOFT_RESET                                 0x398
+#define                SOFT_RESET_BIF                          (1 << 1)
+#define                SOFT_RESET_DC                           (1 << 5)
+#define                SOFT_RESET_DMA1                         (1 << 6)
+#define                SOFT_RESET_GRBM                         (1 << 8)
+#define                SOFT_RESET_HDP                          (1 << 9)
+#define                SOFT_RESET_IH                           (1 << 10)
+#define                SOFT_RESET_MC                           (1 << 11)
+#define                SOFT_RESET_ROM                          (1 << 14)
+#define                SOFT_RESET_SEM                          (1 << 15)
+#define                SOFT_RESET_VMC                          (1 << 17)
+#define                SOFT_RESET_DMA                          (1 << 20)
+#define                SOFT_RESET_TST                          (1 << 21)
+#define                SOFT_RESET_REGBB                        (1 << 22)
+#define                SOFT_RESET_ORB                          (1 << 23)
+
+#define        CC_SYS_RB_BACKEND_DISABLE                       0x3A0
+#define        GC_USER_SYS_RB_BACKEND_DISABLE                  0x3A1
+
+#define SRBM_READ_ERROR                                        0x3A6
+#define SRBM_INT_CNTL                                  0x3A8
+#define SRBM_INT_ACK                                   0x3AA
+
+#define        SRBM_STATUS2                                    0x3B1
+#define                DMA_BUSY                                (1 << 5)
+#define                DMA1_BUSY                               (1 << 6)
+
+#define VM_L2_CNTL                                     0x500
+#define                ENABLE_L2_CACHE                                 (1 << 0)
+#define                ENABLE_L2_FRAGMENT_PROCESSING                   (1 << 1)
+#define                L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)                ((x) << 2)
+#define                L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)                ((x) << 4)
+#define                ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE         (1 << 9)
+#define                ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE        (1 << 10)
+#define                EFFECTIVE_L2_QUEUE_SIZE(x)                      (((x) & 7) << 15)
+#define                CONTEXT1_IDENTITY_ACCESS_MODE(x)                (((x) & 3) << 19)
+#define VM_L2_CNTL2                                    0x501
+#define                INVALIDATE_ALL_L1_TLBS                          (1 << 0)
+#define                INVALIDATE_L2_CACHE                             (1 << 1)
+#define                INVALIDATE_CACHE_MODE(x)                        ((x) << 26)
+#define                        INVALIDATE_PTE_AND_PDE_CACHES           0
+#define                        INVALIDATE_ONLY_PTE_CACHES              1
+#define                        INVALIDATE_ONLY_PDE_CACHES              2
+#define VM_L2_CNTL3                                    0x502
+#define                BANK_SELECT(x)                                  ((x) << 0)
+#define                L2_CACHE_UPDATE_MODE(x)                         ((x) << 6)
+#define                L2_CACHE_BIGK_FRAGMENT_SIZE(x)                  ((x) << 15)
+#define                L2_CACHE_BIGK_ASSOCIATIVITY                     (1 << 20)
+#define        VM_L2_STATUS                                    0x503
+#define                L2_BUSY                                         (1 << 0)
+#define VM_CONTEXT0_CNTL                               0x504
+#define                ENABLE_CONTEXT                                  (1 << 0)
+#define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
+#define                RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 3)
+#define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT    (1 << 6)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT      (1 << 7)
+#define                PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 9)
+#define                PDE0_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 10)
+#define                VALID_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 12)
+#define                VALID_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 13)
+#define                READ_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 15)
+#define                READ_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 16)
+#define                WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 18)
+#define                WRITE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 19)
+#define                PAGE_TABLE_BLOCK_SIZE(x)                        (((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL                               0x505
+#define VM_CONTEXT0_CNTL2                              0x50C
+#define VM_CONTEXT1_CNTL2                              0x50D
+#define        VM_CONTEXT8_PAGE_TABLE_BASE_ADDR                0x50E
+#define        VM_CONTEXT9_PAGE_TABLE_BASE_ADDR                0x50F
+#define        VM_CONTEXT10_PAGE_TABLE_BASE_ADDR               0x510
+#define        VM_CONTEXT11_PAGE_TABLE_BASE_ADDR               0x511
+#define        VM_CONTEXT12_PAGE_TABLE_BASE_ADDR               0x512
+#define        VM_CONTEXT13_PAGE_TABLE_BASE_ADDR               0x513
+#define        VM_CONTEXT14_PAGE_TABLE_BASE_ADDR               0x514
+#define        VM_CONTEXT15_PAGE_TABLE_BASE_ADDR               0x515
+
+#define        VM_CONTEXT1_PROTECTION_FAULT_ADDR               0x53f
+#define        VM_CONTEXT1_PROTECTION_FAULT_STATUS             0x537
+#define                PROTECTIONS_MASK                        (0xf << 0)
+#define                PROTECTIONS_SHIFT                       0
+               /* bit 0: range
+                * bit 1: pde0
+                * bit 2: valid
+                * bit 3: read
+                * bit 4: write
+                */
+#define                MEMORY_CLIENT_ID_MASK                   (0xff << 12)
+#define                MEMORY_CLIENT_ID_SHIFT                  12
+#define                MEMORY_CLIENT_RW_MASK                   (1 << 24)
+#define                MEMORY_CLIENT_RW_SHIFT                  24
+#define                FAULT_VMID_MASK                         (0xf << 25)
+#define                FAULT_VMID_SHIFT                        25
+
+#define VM_INVALIDATE_REQUEST                          0x51E
+#define VM_INVALIDATE_RESPONSE                         0x51F
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR      0x546
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR      0x547
+
+#define        VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                0x54F
+#define        VM_CONTEXT1_PAGE_TABLE_BASE_ADDR                0x550
+#define        VM_CONTEXT2_PAGE_TABLE_BASE_ADDR                0x551
+#define        VM_CONTEXT3_PAGE_TABLE_BASE_ADDR                0x552
+#define        VM_CONTEXT4_PAGE_TABLE_BASE_ADDR                0x553
+#define        VM_CONTEXT5_PAGE_TABLE_BASE_ADDR                0x554
+#define        VM_CONTEXT6_PAGE_TABLE_BASE_ADDR                0x555
+#define        VM_CONTEXT7_PAGE_TABLE_BASE_ADDR                0x556
+#define        VM_CONTEXT0_PAGE_TABLE_START_ADDR               0x557
+#define        VM_CONTEXT1_PAGE_TABLE_START_ADDR               0x558
+
+#define        VM_CONTEXT0_PAGE_TABLE_END_ADDR                 0x55F
+#define        VM_CONTEXT1_PAGE_TABLE_END_ADDR                 0x560
+
+#define VM_L2_CG                                       0x570
+#define                MC_CG_ENABLE                            (1 << 18)
+#define                MC_LS_ENABLE                            (1 << 19)
+
+#define MC_SHARED_CHMAP                                                0x801
+#define                NOOFCHAN_SHIFT                                  12
+#define                NOOFCHAN_MASK                                   0x0000f000
+#define MC_SHARED_CHREMAP                                      0x802
+
+#define        MC_VM_FB_LOCATION                               0x809
+#define        MC_VM_AGP_TOP                                   0x80A
+#define        MC_VM_AGP_BOT                                   0x80B
+#define        MC_VM_AGP_BASE                                  0x80C
+#define        MC_VM_SYSTEM_APERTURE_LOW_ADDR                  0x80D
+#define        MC_VM_SYSTEM_APERTURE_HIGH_ADDR                 0x80E
+#define        MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR              0x80F
+
+#define        MC_VM_MX_L1_TLB_CNTL                            0x819
+#define                ENABLE_L1_TLB                                   (1 << 0)
+#define                ENABLE_L1_FRAGMENT_PROCESSING                   (1 << 1)
+#define                SYSTEM_ACCESS_MODE_PA_ONLY                      (0 << 3)
+#define                SYSTEM_ACCESS_MODE_USE_SYS_MAP                  (1 << 3)
+#define                SYSTEM_ACCESS_MODE_IN_SYS                       (2 << 3)
+#define                SYSTEM_ACCESS_MODE_NOT_IN_SYS                   (3 << 3)
+#define                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU       (0 << 5)
+#define                ENABLE_ADVANCED_DRIVER_MODEL                    (1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL                        0x82B
+
+#define MC_HUB_MISC_HUB_CG                             0x82E
+#define MC_HUB_MISC_VM_CG                              0x82F
+
+#define MC_HUB_MISC_SIP_CG                             0x830
+
+#define MC_XPB_CLK_GAT                                 0x91E
+
+#define MC_CITF_MISC_RD_CG                             0x992
+#define MC_CITF_MISC_WR_CG                             0x993
+#define MC_CITF_MISC_VM_CG                             0x994
+
+#define        MC_ARB_RAMCFG                                   0x9D8
+#define                NOOFBANK_SHIFT                                  0
+#define                NOOFBANK_MASK                                   0x00000003
+#define                NOOFRANK_SHIFT                                  2
+#define                NOOFRANK_MASK                                   0x00000004
+#define                NOOFROWS_SHIFT                                  3
+#define                NOOFROWS_MASK                                   0x00000038
+#define                NOOFCOLS_SHIFT                                  6
+#define                NOOFCOLS_MASK                                   0x000000C0
+#define                CHANSIZE_SHIFT                                  8
+#define                CHANSIZE_MASK                                   0x00000100
+#define                CHANSIZE_OVERRIDE                               (1 << 11)
+#define                NOOFGROUPS_SHIFT                                12
+#define                NOOFGROUPS_MASK                                 0x00001000
+
+#define        MC_ARB_DRAM_TIMING                              0x9DD
+#define        MC_ARB_DRAM_TIMING2                             0x9DE
+
+#define MC_ARB_BURST_TIME                               0xA02
+#define                STATE0(x)                               ((x) << 0)
+#define                STATE0_MASK                             (0x1f << 0)
+#define                STATE0_SHIFT                            0
+#define                STATE1(x)                               ((x) << 5)
+#define                STATE1_MASK                             (0x1f << 5)
+#define                STATE1_SHIFT                            5
+#define                STATE2(x)                               ((x) << 10)
+#define                STATE2_MASK                             (0x1f << 10)
+#define                STATE2_SHIFT                            10
+#define                STATE3(x)                               ((x) << 15)
+#define                STATE3_MASK                             (0x1f << 15)
+#define                STATE3_SHIFT                            15
+
+#define        MC_SEQ_TRAIN_WAKEUP_CNTL                        0xA3A
+#define                TRAIN_DONE_D0                           (1 << 30)
+#define                TRAIN_DONE_D1                           (1 << 31)
+
+#define MC_SEQ_SUP_CNTL                                0xA32
+#define                RUN_MASK                                (1 << 0)
+#define MC_SEQ_SUP_PGM                                 0xA33
+#define MC_PMG_AUTO_CMD                                0xA34
+
+#define MC_IO_PAD_CNTL_D0                              0xA74
+#define                MEM_FALL_OUT_CMD                        (1 << 8)
+
+#define MC_SEQ_RAS_TIMING                               0xA28
+#define MC_SEQ_CAS_TIMING                               0xA29
+#define MC_SEQ_MISC_TIMING                              0xA2A
+#define MC_SEQ_MISC_TIMING2                             0xA2B
+#define MC_SEQ_PMG_TIMING                               0xA2C
+#define MC_SEQ_RD_CTL_D0                                0xA2D
+#define MC_SEQ_RD_CTL_D1                                0xA2E
+#define MC_SEQ_WR_CTL_D0                                0xA2F
+#define MC_SEQ_WR_CTL_D1                                0xA30
+
+#define MC_SEQ_MISC0                                           0xA80
+#define        MC_SEQ_MISC0_VEN_ID_SHIFT               8
+#define        MC_SEQ_MISC0_VEN_ID_MASK                0x00000f00
+#define        MC_SEQ_MISC0_VEN_ID_VALUE               3
+#define        MC_SEQ_MISC0_REV_ID_SHIFT               12
+#define        MC_SEQ_MISC0_REV_ID_MASK                0x0000f000
+#define        MC_SEQ_MISC0_REV_ID_VALUE               1
+#define        MC_SEQ_MISC0_GDDR5_SHIFT                28
+#define        MC_SEQ_MISC0_GDDR5_MASK                 0xf0000000
+#define        MC_SEQ_MISC0_GDDR5_VALUE                5
+#define MC_SEQ_MISC1                                    0xA81
+#define MC_SEQ_RESERVE_M                                0xA82
+#define MC_PMG_CMD_EMRS                                 0xA83
+
+#define MC_SEQ_IO_DEBUG_INDEX                          0xA91
+#define MC_SEQ_IO_DEBUG_DATA                                   0xA92
+
+#define MC_SEQ_MISC5                                    0xA95
+#define MC_SEQ_MISC6                                    0xA96
+
+#define MC_SEQ_MISC7                                    0xA99
+
+#define MC_SEQ_RAS_TIMING_LP                            0xA9B
+#define MC_SEQ_CAS_TIMING_LP                            0xA9C
+#define MC_SEQ_MISC_TIMING_LP                           0xA9D
+#define MC_SEQ_MISC_TIMING2_LP                          0xA9E
+#define MC_SEQ_WR_CTL_D0_LP                             0xA9F
+#define MC_SEQ_WR_CTL_D1_LP                             0xAA0
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0xAA1
+#define MC_SEQ_PMG_CMD_MRS_LP                           0xAA2
+
+#define MC_PMG_CMD_MRS                                  0xAAB
+
+#define MC_SEQ_RD_CTL_D0_LP                             0xAC7
+#define MC_SEQ_RD_CTL_D1_LP                             0xAC8
+
+#define MC_PMG_CMD_MRS1                                 0xAD1
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0xAD2
+#define MC_SEQ_PMG_TIMING_LP                            0xAD3
+
+#define MC_SEQ_WR_CTL_2                                 0xAD5
+#define MC_SEQ_WR_CTL_2_LP                              0xAD6
+#define MC_PMG_CMD_MRS2                                 0xAD7
+#define MC_SEQ_PMG_CMD_MRS2_LP                          0xAD8
+
+#define        MCLK_PWRMGT_CNTL                                0xAE8
+#       define DLL_SPEED(x)                            ((x) << 0)
+#       define DLL_SPEED_MASK                          (0x1f << 0)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCK0_PDNB                              (1 << 8)
+#       define MRDCK1_PDNB                              (1 << 9)
+#       define MRDCK0_RESET                             (1 << 16)
+#       define MRDCK1_RESET                             (1 << 17)
+#       define DLL_READY_READ                           (1 << 24)
+#define        DLL_CNTL                                        0xAE9
+#       define MRDCK0_BYPASS                            (1 << 24)
+#       define MRDCK1_BYPASS                            (1 << 25)
+
+#define        MPLL_CNTL_MODE                                  0xAEC
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#define        MPLL_FUNC_CNTL                                  0xAED
+#define                BWCTRL(x)                               ((x) << 20)
+#define                BWCTRL_MASK                             (0xff << 20)
+#define        MPLL_FUNC_CNTL_1                                0xAEE
+#define                VCO_MODE(x)                             ((x) << 0)
+#define                VCO_MODE_MASK                           (3 << 0)
+#define                CLKFRAC(x)                              ((x) << 4)
+#define                CLKFRAC_MASK                            (0xfff << 4)
+#define                CLKF(x)                                 ((x) << 16)
+#define                CLKF_MASK                               (0xfff << 16)
+#define        MPLL_FUNC_CNTL_2                                0xAEF
+#define        MPLL_AD_FUNC_CNTL                               0xAF0
+#define                YCLK_POST_DIV(x)                        ((x) << 0)
+#define                YCLK_POST_DIV_MASK                      (7 << 0)
+#define        MPLL_DQ_FUNC_CNTL                               0xAF1
+#define                YCLK_SEL(x)                             ((x) << 4)
+#define                YCLK_SEL_MASK                           (1 << 4)
+
+#define        MPLL_SS1                                        0xAF3
+#define                CLKV(x)                                 ((x) << 0)
+#define                CLKV_MASK                               (0x3ffffff << 0)
+#define        MPLL_SS2                                        0xAF4
+#define                CLKS(x)                                 ((x) << 0)
+#define                CLKS_MASK                               (0xfff << 0)
+
+#define        HDP_HOST_PATH_CNTL                              0xB00
+#define        CLOCK_GATING_DIS                        (1 << 23)
+#define        HDP_NONSURFACE_BASE                             0xB01
+#define        HDP_NONSURFACE_INFO                             0xB02
+#define        HDP_NONSURFACE_SIZE                             0xB03
+
+#define HDP_DEBUG0                                     0xBCC
+
+#define HDP_ADDR_CONFIG                                0xBD2
+#define HDP_MISC_CNTL                                  0xBD3
+#define        HDP_FLUSH_INVALIDATE_CACHE                      (1 << 0)
+#define HDP_MEM_POWER_LS                               0xBD4
+#define        HDP_LS_ENABLE                           (1 << 0)
+
+#define ATC_MISC_CG                                    0xCD4
+
+#define IH_RB_CNTL                                        0xF80
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0xF81
+#define IH_RB_RPTR                                        0xF82
+#define IH_RB_WPTR                                        0xF83
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0xF84
+#define IH_RB_WPTR_ADDR_LO                                0xF85
+#define IH_CNTL                                           0xF86
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+#       define MC_VMID(x)                                 ((x) << 25)
+
+#define        CONFIG_MEMSIZE                                  0x150A
+
+#define INTERRUPT_CNTL                                    0x151A
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x151B
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL                   0x1520
+
+#define        BIF_FB_EN                                               0x1524
+#define                FB_READ_EN                                      (1 << 0)
+#define                FB_WRITE_EN                                     (1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL                   0x1528
+
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX                       0x1780
+#       define AZ_ENDPOINT_REG_INDEX(x)                  (((x) & 0xff) << 0)
+#       define AZ_ENDPOINT_REG_WRITE_EN                  (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA                        0x1781
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER          0x25
+#define                SPEAKER_ALLOCATION(x)                   (((x) & 0x7f) << 0)
+#define                SPEAKER_ALLOCATION_MASK                 (0x7f << 0)
+#define                SPEAKER_ALLOCATION_SHIFT                0
+#define                HDMI_CONNECTION                         (1 << 16)
+#define                DP_CONNECTION                           (1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0        0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1        0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2        0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3        0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4        0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5        0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6        0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7        0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8        0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9        0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10       0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11       0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12       0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13       0x35 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC         0x37
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR             0x38
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0               0x3a
+#       define MANUFACTURER_ID(x)                        (((x) & 0xffff) << 0)
+#       define PRODUCT_ID(x)                             (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1               0x3b
+#       define SINK_DESCRIPTION_LEN(x)                   (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2               0x3c
+#       define PORT_ID0(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3               0x3d
+#       define PORT_ID1(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4               0x3e
+#       define DESCRIPTION0(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION1(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION2(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION3(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5               0x3f
+#       define DESCRIPTION4(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION5(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION6(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION7(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6               0x40
+#       define DESCRIPTION8(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION9(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION10(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION11(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7               0x41
+#       define DESCRIPTION12(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION13(x)                          (((x) & 0xff) << 8)
+#       define DESCRIPTION14(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION15(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8               0x42
+#       define DESCRIPTION16(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION17(x)                          (((x) & 0xff) << 8)
+
+#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL         0x54
+#       define AUDIO_ENABLED                             (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT  0x56
+#define                PORT_CONNECTIVITY_MASK                          (3 << 30)
+#define                PORT_CONNECTIVITY_SHIFT                         30
+
+#define        DC_LB_MEMORY_SPLIT                                      0x1AC3
+#define                DC_LB_MEMORY_CONFIG(x)                          ((x) << 20)
+
+#define        PRIORITY_A_CNT                                          0x1AC6
+#define                PRIORITY_MARK_MASK                              0x7fff
+#define                PRIORITY_OFF                                    (1 << 16)
+#define                PRIORITY_ALWAYS_ON                              (1 << 20)
+#define        PRIORITY_B_CNT                                          0x1AC7
+
+#define        DPG_PIPE_ARBITRATION_CONTROL3                           0x1B32
+#       define LATENCY_WATERMARK_MASK(x)                       ((x) << 16)
+#define        DPG_PIPE_LATENCY_CONTROL                                0x1B33
+#       define LATENCY_LOW_WATERMARK(x)                                ((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)                       ((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x1AEE
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x1AEF
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x1AD0
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x183D
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x183E
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x183F
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x1840
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x1853
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x1854
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x1A16
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define        GRPH_INT_CONTROL                                0x1A17
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define        DAC_AUTODETECT_INT_CONTROL                      0x19F2
+
+#define DC_HPD1_INT_STATUS                              0x1807
+#define DC_HPD2_INT_STATUS                              0x180A
+#define DC_HPD3_INT_STATUS                              0x180D
+#define DC_HPD4_INT_STATUS                              0x1810
+#define DC_HPD5_INT_STATUS                              0x1813
+#define DC_HPD6_INT_STATUS                              0x1816
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x1808
+#define DC_HPD2_INT_CONTROL                             0x180B
+#define DC_HPD3_INT_CONTROL                             0x180E
+#define DC_HPD4_INT_CONTROL                             0x1811
+#define DC_HPD5_INT_CONTROL                             0x1814
+#define DC_HPD6_INT_CONTROL                             0x1817
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x1809
+#define DC_HPD2_CONTROL                                   0x180C
+#define DC_HPD3_CONTROL                                   0x180F
+#define DC_HPD4_CONTROL                                   0x1812
+#define DC_HPD5_CONTROL                                   0x1815
+#define DC_HPD6_CONTROL                                   0x1818
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define DPG_PIPE_STUTTER_CONTROL                          0x1B35
+#       define STUTTER_ENABLE                             (1 << 0)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x1BA6
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE                           0x05ac
+#       define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+#       define DCCG_AUDIO_DTO_SEL            (1 << 4)   /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE                           0x05b0
+#define DCCG_AUDIO_DTO0_MODULE                          0x05b4
+#define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+#define DCCG_AUDIO_DTO1_MODULE                          0x05c4
+
+#define AFMT_AUDIO_SRC_CONTROL                          0x1c4f
+#define                AFMT_AUDIO_SRC_SELECT(x)                (((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
+#define        GRBM_CNTL                                       0x2000
+#define                GRBM_READ_TIMEOUT(x)                            ((x) << 0)
+
+#define        GRBM_STATUS2                                    0x2002
+#define                RLC_RQ_PENDING                                  (1 << 0)
+#define                RLC_BUSY                                        (1 << 8)
+#define                TC_BUSY                                         (1 << 9)
+
+#define        GRBM_STATUS                                     0x2004
+#define                CMDFIFO_AVAIL_MASK                              0x0000000F
+#define                RING2_RQ_PENDING                                (1 << 4)
+#define                SRBM_RQ_PENDING                                 (1 << 5)
+#define                RING1_RQ_PENDING                                (1 << 6)
+#define                CF_RQ_PENDING                                   (1 << 7)
+#define                PF_RQ_PENDING                                   (1 << 8)
+#define                GDS_DMA_RQ_PENDING                              (1 << 9)
+#define                GRBM_EE_BUSY                                    (1 << 10)
+#define                DB_CLEAN                                        (1 << 12)
+#define                CB_CLEAN                                        (1 << 13)
+#define                TA_BUSY                                         (1 << 14)
+#define                GDS_BUSY                                        (1 << 15)
+#define                VGT_BUSY                                        (1 << 17)
+#define                IA_BUSY_NO_DMA                                  (1 << 18)
+#define                IA_BUSY                                         (1 << 19)
+#define                SX_BUSY                                         (1 << 20)
+#define                SPI_BUSY                                        (1 << 22)
+#define                BCI_BUSY                                        (1 << 23)
+#define                SC_BUSY                                         (1 << 24)
+#define                PA_BUSY                                         (1 << 25)
+#define                DB_BUSY                                         (1 << 26)
+#define                CP_COHERENCY_BUSY                               (1 << 28)
+#define                CP_BUSY                                         (1 << 29)
+#define                CB_BUSY                                         (1 << 30)
+#define                GUI_ACTIVE                                      (1 << 31)
+#define        GRBM_STATUS_SE0                                 0x2005
+#define        GRBM_STATUS_SE1                                 0x2006
+#define                SE_DB_CLEAN                                     (1 << 1)
+#define                SE_CB_CLEAN                                     (1 << 2)
+#define                SE_BCI_BUSY                                     (1 << 22)
+#define                SE_VGT_BUSY                                     (1 << 23)
+#define                SE_PA_BUSY                                      (1 << 24)
+#define                SE_TA_BUSY                                      (1 << 25)
+#define                SE_SX_BUSY                                      (1 << 26)
+#define                SE_SPI_BUSY                                     (1 << 27)
+#define                SE_SC_BUSY                                      (1 << 29)
+#define                SE_DB_BUSY                                      (1 << 30)
+#define                SE_CB_BUSY                                      (1 << 31)
+
+#define        GRBM_SOFT_RESET                                 0x2008
+#define                SOFT_RESET_CP                                   (1 << 0)
+#define                SOFT_RESET_CB                                   (1 << 1)
+#define                SOFT_RESET_RLC                                  (1 << 2)
+#define                SOFT_RESET_DB                                   (1 << 3)
+#define                SOFT_RESET_GDS                                  (1 << 4)
+#define                SOFT_RESET_PA                                   (1 << 5)
+#define                SOFT_RESET_SC                                   (1 << 6)
+#define                SOFT_RESET_BCI                                  (1 << 7)
+#define                SOFT_RESET_SPI                                  (1 << 8)
+#define                SOFT_RESET_SX                                   (1 << 10)
+#define                SOFT_RESET_TC                                   (1 << 11)
+#define                SOFT_RESET_TA                                   (1 << 12)
+#define                SOFT_RESET_VGT                                  (1 << 14)
+#define                SOFT_RESET_IA                                   (1 << 15)
+
+#define GRBM_GFX_INDEX                                 0x200B
+#define                INSTANCE_INDEX(x)                       ((x) << 0)
+#define                SH_INDEX(x)                             ((x) << 8)
+#define                SE_INDEX(x)                             ((x) << 16)
+#define                SH_BROADCAST_WRITES                     (1 << 29)
+#define                INSTANCE_BROADCAST_WRITES               (1 << 30)
+#define                SE_BROADCAST_WRITES                     (1 << 31)
+
+#define GRBM_INT_CNTL                                   0x2018
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+#define        CP_STRMOUT_CNTL                                 0x213F
+#define        SCRATCH_REG0                                    0x2140
+#define        SCRATCH_REG1                                    0x2141
+#define        SCRATCH_REG2                                    0x2142
+#define        SCRATCH_REG3                                    0x2143
+#define        SCRATCH_REG4                                    0x2144
+#define        SCRATCH_REG5                                    0x2145
+#define        SCRATCH_REG6                                    0x2146
+#define        SCRATCH_REG7                                    0x2147
+
+#define        SCRATCH_UMSK                                    0x2150
+#define        SCRATCH_ADDR                                    0x2151
+
+#define        CP_SEM_WAIT_TIMER                               0x216F
+
+#define        CP_SEM_INCOMPLETE_TIMER_CNTL                    0x2172
+
+#define CP_ME_CNTL                                     0x21B6
+#define                CP_CE_HALT                                      (1 << 24)
+#define                CP_PFP_HALT                                     (1 << 26)
+#define                CP_ME_HALT                                      (1 << 28)
+
+#define        CP_COHER_CNTL2                                  0x217A
+
+#define        CP_RB2_RPTR                                     0x21BE
+#define        CP_RB1_RPTR                                     0x21BF
+#define        CP_RB0_RPTR                                     0x21C0
+#define        CP_RB_WPTR_DELAY                                0x21C1
+
+#define        CP_QUEUE_THRESHOLDS                             0x21D8
+#define                ROQ_IB1_START(x)                                ((x) << 0)
+#define                ROQ_IB2_START(x)                                ((x) << 8)
+#define CP_MEQ_THRESHOLDS                              0x21D9
+#define                MEQ1_START(x)                           ((x) << 0)
+#define                MEQ2_START(x)                           ((x) << 8)
+
+#define        CP_PERFMON_CNTL                                 0x21FF
+
+#define        VGT_VTX_VECT_EJECT_REG                          0x222C
+
+#define        VGT_CACHE_INVALIDATION                          0x2231
+#define                CACHE_INVALIDATION(x)                           ((x) << 0)
+#define                        VC_ONLY                                         0
+#define                        TC_ONLY                                         1
+#define                        VC_AND_TC                                       2
+#define                AUTO_INVLD_EN(x)                                ((x) << 6)
+#define                        NO_AUTO                                         0
+#define                        ES_AUTO                                         1
+#define                        GS_AUTO                                         2
+#define                        ES_AND_GS_AUTO                                  3
+#define        VGT_ESGS_RING_SIZE                              0x2232
+#define        VGT_GSVS_RING_SIZE                              0x2233
+
+#define        VGT_GS_VERTEX_REUSE                             0x2235
+
+#define        VGT_PRIMITIVE_TYPE                              0x2256
+#define        VGT_INDEX_TYPE                                  0x2257
+
+#define        VGT_NUM_INDICES                                 0x225C
+#define        VGT_NUM_INSTANCES                               0x225D
+
+#define        VGT_TF_RING_SIZE                                0x2262
+
+#define        VGT_HS_OFFCHIP_PARAM                            0x226C
+
+#define        VGT_TF_MEMORY_BASE                              0x226E
+
+#define CC_GC_SHADER_ARRAY_CONFIG                      0x226F
+#define                INACTIVE_CUS_MASK                       0xFFFF0000
+#define                INACTIVE_CUS_SHIFT                      16
+#define GC_USER_SHADER_ARRAY_CONFIG                    0x2270
+
+#define        PA_CL_ENHANCE                                   0x2285
+#define                CLIP_VTX_REORDER_ENA                            (1 << 0)
+#define                NUM_CLIP_SEQ(x)                                 ((x) << 1)
+
+#define        PA_SU_LINE_STIPPLE_VALUE                        0x2298
+
+#define        PA_SC_LINE_STIPPLE_STATE                        0x22C4
+
+#define        PA_SC_FORCE_EOV_MAX_CNTS                        0x22C9
+#define                FORCE_EOV_MAX_CLK_CNT(x)                        ((x) << 0)
+#define                FORCE_EOV_MAX_REZ_CNT(x)                        ((x) << 16)
+
+#define        PA_SC_FIFO_SIZE                                 0x22F3
+#define                SC_FRONTEND_PRIM_FIFO_SIZE(x)                   ((x) << 0)
+#define                SC_BACKEND_PRIM_FIFO_SIZE(x)                    ((x) << 6)
+#define                SC_HIZ_TILE_FIFO_SIZE(x)                        ((x) << 15)
+#define                SC_EARLYZ_TILE_FIFO_SIZE(x)                     ((x) << 23)
+
+#define        PA_SC_ENHANCE                                   0x22FC
+
+#define        SQ_CONFIG                                       0x2300
+
+#define        SQC_CACHES                                      0x2302
+
+#define SQ_POWER_THROTTLE                               0x2396
+#define                MIN_POWER(x)                            ((x) << 0)
+#define                MIN_POWER_MASK                          (0x3fff << 0)
+#define                MIN_POWER_SHIFT                         0
+#define                MAX_POWER(x)                            ((x) << 16)
+#define                MAX_POWER_MASK                          (0x3fff << 16)
+#define                MAX_POWER_SHIFT                         0
+#define SQ_POWER_THROTTLE2                              0x2397
+#define                MAX_POWER_DELTA(x)                      ((x) << 0)
+#define                MAX_POWER_DELTA_MASK                    (0x3fff << 0)
+#define                MAX_POWER_DELTA_SHIFT                   0
+#define                STI_SIZE(x)                             ((x) << 16)
+#define                STI_SIZE_MASK                           (0x3ff << 16)
+#define                STI_SIZE_SHIFT                          16
+#define                LTI_RATIO(x)                            ((x) << 27)
+#define                LTI_RATIO_MASK                          (0xf << 27)
+#define                LTI_RATIO_SHIFT                         27
+
+#define        SX_DEBUG_1                                      0x2418
+
+#define        SPI_STATIC_THREAD_MGMT_1                        0x2438
+#define        SPI_STATIC_THREAD_MGMT_2                        0x2439
+#define        SPI_STATIC_THREAD_MGMT_3                        0x243A
+#define        SPI_PS_MAX_WAVE_ID                              0x243B
+
+#define        SPI_CONFIG_CNTL                                 0x2440
+
+#define        SPI_CONFIG_CNTL_1                               0x244F
+#define                VTX_DONE_DELAY(x)                               ((x) << 0)
+#define                INTERP_ONE_PRIM_PER_ROW                         (1 << 4)
+
+#define        CGTS_TCC_DISABLE                                0x2452
+#define        CGTS_USER_TCC_DISABLE                           0x2453
+#define                TCC_DISABLE_MASK                                0xFFFF0000
+#define                TCC_DISABLE_SHIFT                               16
+#define        CGTS_SM_CTRL_REG                                0x2454
+#define                OVERRIDE                                (1 << 21)
+#define                LS_OVERRIDE                             (1 << 22)
+
+#define        SPI_LB_CU_MASK                                  0x24D5
+
+#define        TA_CNTL_AUX                                     0x2542
+
+#define CC_RB_BACKEND_DISABLE                          0x263D
+#define                BACKEND_DISABLE(x)                      ((x) << 16)
+#define GB_ADDR_CONFIG                                 0x263E
+#define                NUM_PIPES(x)                            ((x) << 0)
+#define                NUM_PIPES_MASK                          0x00000007
+#define                NUM_PIPES_SHIFT                         0
+#define                PIPE_INTERLEAVE_SIZE(x)                 ((x) << 4)
+#define                PIPE_INTERLEAVE_SIZE_MASK               0x00000070
+#define                PIPE_INTERLEAVE_SIZE_SHIFT              4
+#define                NUM_SHADER_ENGINES(x)                   ((x) << 12)
+#define                NUM_SHADER_ENGINES_MASK                 0x00003000
+#define                NUM_SHADER_ENGINES_SHIFT                12
+#define                SHADER_ENGINE_TILE_SIZE(x)              ((x) << 16)
+#define                SHADER_ENGINE_TILE_SIZE_MASK            0x00070000
+#define                SHADER_ENGINE_TILE_SIZE_SHIFT           16
+#define                NUM_GPUS(x)                             ((x) << 20)
+#define                NUM_GPUS_MASK                           0x00700000
+#define                NUM_GPUS_SHIFT                          20
+#define                MULTI_GPU_TILE_SIZE(x)                  ((x) << 24)
+#define                MULTI_GPU_TILE_SIZE_MASK                0x03000000
+#define                MULTI_GPU_TILE_SIZE_SHIFT               24
+#define                ROW_SIZE(x)                             ((x) << 28)
+#define                ROW_SIZE_MASK                           0x30000000
+#define                ROW_SIZE_SHIFT                          28
+
+#define        GB_TILE_MODE0                                   0x2644
+#       define MICRO_TILE_MODE(x)                              ((x) << 0)
+#              define  ADDR_SURF_DISPLAY_MICRO_TILING          0
+#              define  ADDR_SURF_THIN_MICRO_TILING             1
+#              define  ADDR_SURF_DEPTH_MICRO_TILING            2
+#       define ARRAY_MODE(x)                                   ((x) << 2)
+#              define  ARRAY_LINEAR_GENERAL                    0
+#              define  ARRAY_LINEAR_ALIGNED                    1
+#              define  ARRAY_1D_TILED_THIN1                    2
+#              define  ARRAY_2D_TILED_THIN1                    4
+#       define PIPE_CONFIG(x)                                  ((x) << 6)
+#              define  ADDR_SURF_P2                            0
+#              define  ADDR_SURF_P4_8x16                       4
+#              define  ADDR_SURF_P4_16x16                      5
+#              define  ADDR_SURF_P4_16x32                      6
+#              define  ADDR_SURF_P4_32x32                      7
+#              define  ADDR_SURF_P8_16x16_8x16                 8
+#              define  ADDR_SURF_P8_16x32_8x16                 9
+#              define  ADDR_SURF_P8_32x32_8x16                 10
+#              define  ADDR_SURF_P8_16x32_16x16                11
+#              define  ADDR_SURF_P8_32x32_16x16                12
+#              define  ADDR_SURF_P8_32x32_16x32                13
+#              define  ADDR_SURF_P8_32x64_32x32                14
+#       define TILE_SPLIT(x)                                   ((x) << 11)
+#              define  ADDR_SURF_TILE_SPLIT_64B                0
+#              define  ADDR_SURF_TILE_SPLIT_128B               1
+#              define  ADDR_SURF_TILE_SPLIT_256B               2
+#              define  ADDR_SURF_TILE_SPLIT_512B               3
+#              define  ADDR_SURF_TILE_SPLIT_1KB                4
+#              define  ADDR_SURF_TILE_SPLIT_2KB                5
+#              define  ADDR_SURF_TILE_SPLIT_4KB                6
+#       define BANK_WIDTH(x)                                   ((x) << 14)
+#              define  ADDR_SURF_BANK_WIDTH_1                  0
+#              define  ADDR_SURF_BANK_WIDTH_2                  1
+#              define  ADDR_SURF_BANK_WIDTH_4                  2
+#              define  ADDR_SURF_BANK_WIDTH_8                  3
+#       define BANK_HEIGHT(x)                                  ((x) << 16)
+#              define  ADDR_SURF_BANK_HEIGHT_1                 0
+#              define  ADDR_SURF_BANK_HEIGHT_2                 1
+#              define  ADDR_SURF_BANK_HEIGHT_4                 2
+#              define  ADDR_SURF_BANK_HEIGHT_8                 3
+#       define MACRO_TILE_ASPECT(x)                            ((x) << 18)
+#              define  ADDR_SURF_MACRO_ASPECT_1                0
+#              define  ADDR_SURF_MACRO_ASPECT_2                1
+#              define  ADDR_SURF_MACRO_ASPECT_4                2
+#              define  ADDR_SURF_MACRO_ASPECT_8                3
+#       define NUM_BANKS(x)                                    ((x) << 20)
+#              define  ADDR_SURF_2_BANK                        0
+#              define  ADDR_SURF_4_BANK                        1
+#              define  ADDR_SURF_8_BANK                        2
+#              define  ADDR_SURF_16_BANK                       3
+#define        GB_TILE_MODE1                                   0x2645
+#define        GB_TILE_MODE2                                   0x2646
+#define        GB_TILE_MODE3                                   0x2647
+#define        GB_TILE_MODE4                                   0x2648
+#define        GB_TILE_MODE5                                   0x2649
+#define        GB_TILE_MODE6                                   0x264a
+#define        GB_TILE_MODE7                                   0x264b
+#define        GB_TILE_MODE8                                   0x264c
+#define        GB_TILE_MODE9                                   0x264d
+#define        GB_TILE_MODE10                                  0x264e
+#define        GB_TILE_MODE11                                  0x264f
+#define        GB_TILE_MODE12                                  0x2650
+#define        GB_TILE_MODE13                                  0x2651
+#define        GB_TILE_MODE14                                  0x2652
+#define        GB_TILE_MODE15                                  0x2653
+#define        GB_TILE_MODE16                                  0x2654
+#define        GB_TILE_MODE17                                  0x2655
+#define        GB_TILE_MODE18                                  0x2656
+#define        GB_TILE_MODE19                                  0x2657
+#define        GB_TILE_MODE20                                  0x2658
+#define        GB_TILE_MODE21                                  0x2659
+#define        GB_TILE_MODE22                                  0x265a
+#define        GB_TILE_MODE23                                  0x265b
+#define        GB_TILE_MODE24                                  0x265c
+#define        GB_TILE_MODE25                                  0x265d
+#define        GB_TILE_MODE26                                  0x265e
+#define        GB_TILE_MODE27                                  0x265f
+#define        GB_TILE_MODE28                                  0x2660
+#define        GB_TILE_MODE29                                  0x2661
+#define        GB_TILE_MODE30                                  0x2662
+#define        GB_TILE_MODE31                                  0x2663
+
+#define        CB_PERFCOUNTER0_SELECT0                         0x2688
+#define        CB_PERFCOUNTER0_SELECT1                         0x2689
+#define        CB_PERFCOUNTER1_SELECT0                         0x268A
+#define        CB_PERFCOUNTER1_SELECT1                         0x268B
+#define        CB_PERFCOUNTER2_SELECT0                         0x268C
+#define        CB_PERFCOUNTER2_SELECT1                         0x268D
+#define        CB_PERFCOUNTER3_SELECT0                         0x268E
+#define        CB_PERFCOUNTER3_SELECT1                         0x268F
+
+#define        CB_CGTT_SCLK_CTRL                               0x2698
+
+#define        GC_USER_RB_BACKEND_DISABLE                      0x26DF
+#define                BACKEND_DISABLE_MASK                    0x00FF0000
+#define                BACKEND_DISABLE_SHIFT                   16
+
+#define        TCP_CHAN_STEER_LO                               0x2B03
+#define        TCP_CHAN_STEER_HI                               0x2B94
+
+#define        CP_RB0_BASE                                     0x3040
+#define        CP_RB0_CNTL                                     0x3041
+#define                RB_BUFSZ(x)                                     ((x) << 0)
+#define                RB_BLKSZ(x)                                     ((x) << 8)
+#define                BUF_SWAP_32BIT                                  (2 << 16)
+#define                RB_NO_UPDATE                                    (1 << 27)
+#define                RB_RPTR_WR_ENA                                  (1 << 31)
+
+#define        CP_RB0_RPTR_ADDR                                0x3043
+#define        CP_RB0_RPTR_ADDR_HI                             0x3044
+#define        CP_RB0_WPTR                                     0x3045
+
+#define        CP_PFP_UCODE_ADDR                               0x3054
+#define        CP_PFP_UCODE_DATA                               0x3055
+#define        CP_ME_RAM_RADDR                                 0x3056
+#define        CP_ME_RAM_WADDR                                 0x3057
+#define        CP_ME_RAM_DATA                                  0x3058
+
+#define        CP_CE_UCODE_ADDR                                0x305A
+#define        CP_CE_UCODE_DATA                                0x305B
+
+#define        CP_RB1_BASE                                     0x3060
+#define        CP_RB1_CNTL                                     0x3061
+#define        CP_RB1_RPTR_ADDR                                0x3062
+#define        CP_RB1_RPTR_ADDR_HI                             0x3063
+#define        CP_RB1_WPTR                                     0x3064
+#define        CP_RB2_BASE                                     0x3065
+#define        CP_RB2_CNTL                                     0x3066
+#define        CP_RB2_RPTR_ADDR                                0x3067
+#define        CP_RB2_RPTR_ADDR_HI                             0x3068
+#define        CP_RB2_WPTR                                     0x3069
+#define CP_INT_CNTL_RING0                               0x306A
+#define CP_INT_CNTL_RING1                               0x306B
+#define CP_INT_CNTL_RING2                               0x306C
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define WAIT_MEM_SEM_INT_ENABLE                  (1 << 21)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
+#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
+#       define CP_RINGID0_INT_ENABLE                    (1 << 31)
+#define CP_INT_STATUS_RING0                             0x306D
+#define CP_INT_STATUS_RING1                             0x306E
+#define CP_INT_STATUS_RING2                             0x306F
+#       define WAIT_MEM_SEM_INT_STAT                    (1 << 21)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define CP_RINGID2_INT_STAT                      (1 << 29)
+#       define CP_RINGID1_INT_STAT                      (1 << 30)
+#       define CP_RINGID0_INT_STAT                      (1 << 31)
+
+#define        CP_MEM_SLP_CNTL                                 0x3079
+#       define CP_MEM_LS_EN                             (1 << 0)
+
+#define        CP_DEBUG                                        0x307F
+
+#define RLC_CNTL                                          0x30C0
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_RL_BASE                                       0x30C1
+#define RLC_RL_SIZE                                       0x30C2
+#define RLC_LB_CNTL                                       0x30C3
+#       define LOAD_BALANCE_ENABLE                        (1 << 0)
+#define RLC_SAVE_AND_RESTORE_BASE                         0x30C4
+#define RLC_LB_CNTR_MAX                                   0x30C5
+#define RLC_LB_CNTR_INIT                                  0x30C6
+
+#define RLC_CLEAR_STATE_RESTORE_BASE                      0x30C8
+
+#define RLC_UCODE_ADDR                                    0x30CB
+#define RLC_UCODE_DATA                                    0x30CC
+
+#define RLC_GPU_CLOCK_COUNT_LSB                           0x30CE
+#define RLC_GPU_CLOCK_COUNT_MSB                           0x30CF
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0x30D0
+#define RLC_MC_CNTL                                       0x30D1
+#define RLC_UCODE_CNTL                                    0x30D2
+#define RLC_STAT                                          0x30D3
+#       define RLC_BUSY_STATUS                            (1 << 0)
+#       define GFX_POWER_STATUS                           (1 << 1)
+#       define GFX_CLOCK_STATUS                           (1 << 2)
+#       define GFX_LS_STATUS                              (1 << 3)
+
+#define        RLC_PG_CNTL                                     0x30D7
+#      define GFX_PG_ENABLE                            (1 << 0)
+#      define GFX_PG_SRC                               (1 << 1)
+
+#define        RLC_CGTT_MGCG_OVERRIDE                          0x3100
+#define        RLC_CGCG_CGLS_CTRL                              0x3101
+#      define CGCG_EN                                  (1 << 0)
+#      define CGLS_EN                                  (1 << 1)
+
+#define        RLC_TTOP_D                                      0x3105
+#      define RLC_PUD(x)                               ((x) << 0)
+#      define RLC_PUD_MASK                             (0xff << 0)
+#      define RLC_PDD(x)                               ((x) << 8)
+#      define RLC_PDD_MASK                             (0xff << 8)
+#      define RLC_TTPD(x)                              ((x) << 16)
+#      define RLC_TTPD_MASK                            (0xff << 16)
+#      define RLC_MSD(x)                               ((x) << 24)
+#      define RLC_MSD_MASK                             (0xff << 24)
+
+#define RLC_LB_INIT_CU_MASK                               0x3107
+
+#define        RLC_PG_AO_CU_MASK                               0x310B
+#define        RLC_MAX_PG_CU                                   0x310C
+#      define MAX_PU_CU(x)                             ((x) << 0)
+#      define MAX_PU_CU_MASK                           (0xff << 0)
+#define        RLC_AUTO_PG_CTRL                                0x310C
+#      define AUTO_PG_EN                               (1 << 0)
+#      define GRBM_REG_SGIT(x)                         ((x) << 3)
+#      define GRBM_REG_SGIT_MASK                       (0xffff << 3)
+#      define PG_AFTER_GRBM_REG_ST(x)                  ((x) << 19)
+#      define PG_AFTER_GRBM_REG_ST_MASK                (0x1fff << 19)
+
+#define RLC_SERDES_WR_MASTER_MASK_0                       0x3115
+#define RLC_SERDES_WR_MASTER_MASK_1                       0x3116
+#define RLC_SERDES_WR_CTRL                                0x3117
+
+#define RLC_SERDES_MASTER_BUSY_0                          0x3119
+#define RLC_SERDES_MASTER_BUSY_1                          0x311A
+
+#define RLC_GCPM_GENERAL_3                                0x311E
+
+#define        DB_RENDER_CONTROL                               0xA000
+
+#define DB_DEPTH_INFO                                   0xA00F
+
+#define PA_SC_RASTER_CONFIG                             0xA0D4
+#      define RB_MAP_PKR0(x)                           ((x) << 0)
+#      define RB_MAP_PKR0_MASK                         (0x3 << 0)
+#      define RB_MAP_PKR1(x)                           ((x) << 2)
+#      define RB_MAP_PKR1_MASK                         (0x3 << 2)
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+#      define RB_XSEL2(x)                              ((x) << 4)
+#      define RB_XSEL2_MASK                            (0x3 << 4)
+#      define RB_XSEL                                  (1 << 6)
+#      define RB_YSEL                                  (1 << 7)
+#      define PKR_MAP(x)                               ((x) << 8)
+#      define PKR_MAP_MASK                             (0x3 << 8)
+#       define RASTER_CONFIG_PKR_MAP_0                 0
+#       define RASTER_CONFIG_PKR_MAP_1                 1
+#       define RASTER_CONFIG_PKR_MAP_2                 2
+#       define RASTER_CONFIG_PKR_MAP_3                 3
+#      define PKR_XSEL(x)                              ((x) << 10)
+#      define PKR_XSEL_MASK                            (0x3 << 10)
+#      define PKR_YSEL(x)                              ((x) << 12)
+#      define PKR_YSEL_MASK                            (0x3 << 12)
+#      define SC_MAP(x)                                ((x) << 16)
+#      define SC_MAP_MASK                              (0x3 << 16)
+#      define SC_XSEL(x)                               ((x) << 18)
+#      define SC_XSEL_MASK                             (0x3 << 18)
+#      define SC_YSEL(x)                               ((x) << 20)
+#      define SC_YSEL_MASK                             (0x3 << 20)
+#      define SE_MAP(x)                                ((x) << 24)
+#      define SE_MAP_MASK                              (0x3 << 24)
+#       define RASTER_CONFIG_SE_MAP_0                  0
+#       define RASTER_CONFIG_SE_MAP_1                  1
+#       define RASTER_CONFIG_SE_MAP_2                  2
+#       define RASTER_CONFIG_SE_MAP_3                  3
+#      define SE_XSEL(x)                               ((x) << 26)
+#      define SE_XSEL_MASK                             (0x3 << 26)
+#      define SE_YSEL(x)                               ((x) << 28)
+#      define SE_YSEL_MASK                             (0x3 << 28)
+
+
+#define VGT_EVENT_INITIATOR                             0xA2A4
+#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
+#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
+#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
+#       define CACHE_FLUSH_TS                           (4 << 0)
+#       define CACHE_FLUSH                              (6 << 0)
+#       define CS_PARTIAL_FLUSH                         (7 << 0)
+#       define VGT_STREAMOUT_RESET                      (10 << 0)
+#       define END_OF_PIPE_INCR_DE                      (11 << 0)
+#       define END_OF_PIPE_IB_END                       (12 << 0)
+#       define RST_PIX_CNT                              (13 << 0)
+#       define VS_PARTIAL_FLUSH                         (15 << 0)
+#       define PS_PARTIAL_FLUSH                         (16 << 0)
+#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
+#       define ZPASS_DONE                               (21 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
+#       define PERFCOUNTER_START                        (23 << 0)
+#       define PERFCOUNTER_STOP                         (24 << 0)
+#       define PIPELINESTAT_START                       (25 << 0)
+#       define PIPELINESTAT_STOP                        (26 << 0)
+#       define PERFCOUNTER_SAMPLE                       (27 << 0)
+#       define SAMPLE_PIPELINESTAT                      (30 << 0)
+#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
+#       define RESET_VTX_CNT                            (33 << 0)
+#       define VGT_FLUSH                                (36 << 0)
+#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
+#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
+#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
+#       define FLUSH_AND_INV_DB_META                    (44 << 0)
+#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
+#       define FLUSH_AND_INV_CB_META                    (46 << 0)
+#       define CS_DONE                                  (47 << 0)
+#       define PS_DONE                                  (48 << 0)
+#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
+#       define THREAD_TRACE_START                       (51 << 0)
+#       define THREAD_TRACE_STOP                        (52 << 0)
+#       define THREAD_TRACE_FLUSH                       (54 << 0)
+#       define THREAD_TRACE_FINISH                      (55 << 0)
+
+/* PIF PHY0 registers idx/data 0x8/0xc */
+#define PB0_PIF_CNTL                                      0x10
+#       define LS2_EXIT_TIME(x)                           ((x) << 17)
+#       define LS2_EXIT_TIME_MASK                         (0x7 << 17)
+#       define LS2_EXIT_TIME_SHIFT                        17
+#define PB0_PIF_PAIRING                                   0x11
+#       define MULTI_PIF                                  (1 << 25)
+#define PB0_PIF_PWRDOWN_0                                 0x12
+#       define PLL_POWER_STATE_IN_TXS2_0(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_0(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_SHIFT             10
+#       define PLL_RAMP_UP_TIME_0(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_0_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_0_SHIFT                   24
+#define PB0_PIF_PWRDOWN_1                                 0x13
+#       define PLL_POWER_STATE_IN_TXS2_1(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_1(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_SHIFT             10
+#       define PLL_RAMP_UP_TIME_1(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_1_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_1_SHIFT                   24
+
+#define PB0_PIF_PWRDOWN_2                                 0x17
+#       define PLL_POWER_STATE_IN_TXS2_2(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_2(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_SHIFT             10
+#       define PLL_RAMP_UP_TIME_2(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_2_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_2_SHIFT                   24
+#define PB0_PIF_PWRDOWN_3                                 0x18
+#       define PLL_POWER_STATE_IN_TXS2_3(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_3(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_SHIFT             10
+#       define PLL_RAMP_UP_TIME_3(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_3_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_3_SHIFT                   24
+/* PIF PHY1 registers idx/data 0x10/0x14 */
+#define PB1_PIF_CNTL                                      0x10
+#define PB1_PIF_PAIRING                                   0x11
+#define PB1_PIF_PWRDOWN_0                                 0x12
+#define PB1_PIF_PWRDOWN_1                                 0x13
+
+#define PB1_PIF_PWRDOWN_2                                 0x17
+#define PB1_PIF_PWRDOWN_3                                 0x18
+/* PCIE registers idx/data 0x30/0x34 */
+#define PCIE_CNTL2                                        0x1c /* PCIE */
+#       define SLV_MEM_LS_EN                              (1 << 16)
+#       define SLV_MEM_AGGRESSIVE_LS_EN                   (1 << 17)
+#       define MST_MEM_LS_EN                              (1 << 18)
+#       define REPLAY_MEM_LS_EN                           (1 << 19)
+#define PCIE_LC_STATUS1                                   0x28 /* PCIE */
+#       define LC_REVERSE_RCVR                            (1 << 0)
+#       define LC_REVERSE_XMIT                            (1 << 1)
+#       define LC_OPERATING_LINK_WIDTH_MASK               (0x7 << 2)
+#       define LC_OPERATING_LINK_WIDTH_SHIFT              2
+#       define LC_DETECTED_LINK_WIDTH_MASK                (0x7 << 5)
+#       define LC_DETECTED_LINK_WIDTH_SHIFT               5
+
+#define PCIE_P_CNTL                                       0x40 /* PCIE */
+#       define P_IGNORE_EDB_ERR                           (1 << 6)
+
+/* PCIE PORT registers idx/data 0x38/0x3c */
+#define PCIE_LC_CNTL                                      0xa0
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#       define LC_DYN_LANES_PWR_STATE(x)                  ((x) << 21)
+#       define LC_DYN_LANES_PWR_STATE_MASK                (0x3 << 21)
+#       define LC_DYN_LANES_PWR_STATE_SHIFT               21
+#define PCIE_LC_N_FTS_CNTL                                0xa3 /* PCIE_P */
+#       define LC_XMIT_N_FTS(x)                           ((x) << 0)
+#       define LC_XMIT_N_FTS_MASK                         (0xff << 0)
+#       define LC_XMIT_N_FTS_SHIFT                        0
+#       define LC_XMIT_N_FTS_OVERRIDE_EN                  (1 << 8)
+#       define LC_N_FTS_MASK                              (0xff << 24)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_GEN3_EN_STRAP                           (1 << 1)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 2)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_MASK         (0x3 << 3)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT        3
+#       define LC_FORCE_EN_SW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_SW_SPEED_CHANGE               (1 << 6)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 7)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 8)
+#       define LC_INITIATE_LINK_SPEED_CHANGE              (1 << 9)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 10)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     10
+#       define LC_CURRENT_DATA_RATE_MASK                  (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+#       define LC_CURRENT_DATA_RATE_SHIFT                 13
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 16)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 18)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 19)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN3               (1 << 20)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN3                (1 << 21)
+
+#define PCIE_LC_CNTL2                                     0xb1
+#       define LC_ALLOW_PDWN_IN_L1                        (1 << 17)
+#       define LC_ALLOW_PDWN_IN_L23                       (1 << 18)
+
+#define PCIE_LC_CNTL3                                     0xb5 /* PCIE_P */
+#       define LC_GO_TO_RECOVERY                          (1 << 30)
+#define PCIE_LC_CNTL4                                     0xb6 /* PCIE_P */
+#       define LC_REDO_EQ                                 (1 << 5)
+#       define LC_SET_QUIESCE                             (1 << 13)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG                           0x3bd3
+#define UVD_UDEC_DB_ADDR_CONFIG                                0x3bd4
+#define UVD_UDEC_DBW_ADDR_CONFIG                       0x3bd5
+#define UVD_RBC_RB_RPTR                                        0x3da4
+#define UVD_RBC_RB_WPTR                                        0x3da5
+#define UVD_STATUS                                     0x3daf
+
+#define        UVD_CGC_CTRL                                    0x3dc2
+#      define DCM                                      (1 << 0)
+#      define CG_DT(x)                                 ((x) << 2)
+#      define CG_DT_MASK                               (0xf << 2)
+#      define CLK_OD(x)                                ((x) << 6)
+#      define CLK_OD_MASK                              (0x1f << 6)
+
+ /* UVD CTX indirect */
+#define        UVD_CGC_MEM_CTRL                                0xC0
+#define        UVD_CGC_CTRL2                                   0xC1
+#      define DYN_OR_EN                                (1 << 0)
+#      define DYN_RR_EN                                (1 << 1)
+#      define G_DIV_ID(x)                              ((x) << 2)
+#      define G_DIV_ID_MASK                            (0x7 << 2)
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)        ((RADEON_PACKET_TYPE0 << 30) |                  \
+                        (((reg) >> 2) & 0xFFFF) |                      \
+                        ((n) & 0x3FFF) << 16)
+#define CP_PACKET2                     0x80000000
+#define                PACKET2_PAD_SHIFT               0
+#define                PACKET2_PAD_MASK                (0x3fffffff << 0)
+
+#define PACKET2(v)     (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define RADEON_PACKET_TYPE3 3
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) |                  \
+                        (((op) & 0xFF) << 8) |                         \
+                        ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define        PACKET3_NOP                                     0x10
+#define        PACKET3_SET_BASE                                0x11
+#define                PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define                        GDS_PARTITION_BASE              2
+#define                        CE_PARTITION_BASE               3
+#define        PACKET3_CLEAR_STATE                             0x12
+#define        PACKET3_INDEX_BUFFER_SIZE                       0x13
+#define        PACKET3_DISPATCH_DIRECT                         0x15
+#define        PACKET3_DISPATCH_INDIRECT                       0x16
+#define        PACKET3_ALLOC_GDS                               0x1B
+#define        PACKET3_WRITE_GDS_RAM                           0x1C
+#define        PACKET3_ATOMIC_GDS                              0x1D
+#define        PACKET3_ATOMIC                                  0x1E
+#define        PACKET3_OCCLUSION_QUERY                         0x1F
+#define        PACKET3_SET_PREDICATION                         0x20
+#define        PACKET3_REG_RMW                                 0x21
+#define        PACKET3_COND_EXEC                               0x22
+#define        PACKET3_PRED_EXEC                               0x23
+#define        PACKET3_DRAW_INDIRECT                           0x24
+#define        PACKET3_DRAW_INDEX_INDIRECT                     0x25
+#define        PACKET3_INDEX_BASE                              0x26
+#define        PACKET3_DRAW_INDEX_2                            0x27
+#define        PACKET3_CONTEXT_CONTROL                         0x28
+#define        PACKET3_INDEX_TYPE                              0x2A
+#define        PACKET3_DRAW_INDIRECT_MULTI                     0x2C
+#define        PACKET3_DRAW_INDEX_AUTO                         0x2D
+#define        PACKET3_DRAW_INDEX_IMMD                         0x2E
+#define        PACKET3_NUM_INSTANCES                           0x2F
+#define        PACKET3_DRAW_INDEX_MULTI_AUTO                   0x30
+#define        PACKET3_INDIRECT_BUFFER_CONST                   0x31
+#define        PACKET3_INDIRECT_BUFFER                         0x3F
+#define        PACKET3_STRMOUT_BUFFER_UPDATE                   0x34
+#define        PACKET3_DRAW_INDEX_OFFSET_2                     0x35
+#define        PACKET3_DRAW_INDEX_MULTI_ELEMENT                0x36
+#define        PACKET3_WRITE_DATA                              0x37
+#define                WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+                * 1 - memory (sync - via GRBM)
+                * 2 - tc/l2
+                * 3 - gds
+                * 4 - reserved
+                * 5 - memory (async - direct)
+                */
+#define                WR_ONE_ADDR                             (1 << 16)
+#define                WR_CONFIRM                              (1 << 20)
+#define                WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+                * 1 - pfp
+                * 2 - ce
+                */
+#define        PACKET3_DRAW_INDEX_INDIRECT_MULTI               0x38
+#define        PACKET3_MEM_SEMAPHORE                           0x39
+#define        PACKET3_MPEG_INDEX                              0x3A
+#define        PACKET3_COPY_DW                                 0x3B
+#define        PACKET3_WAIT_REG_MEM                            0x3C
+#define                WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+                * 1 - <
+                * 2 - <=
+                * 3 - ==
+                * 4 - !=
+                * 5 - >=
+                * 6 - >
+                */
+#define                WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+                * 1 - mem
+                */
+#define                WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+                * 1 - pfp
+                */
+#define        PACKET3_MEM_WRITE                               0x3D
+#define        PACKET3_COPY_DATA                               0x40
+#define        PACKET3_CP_DMA                                  0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - DST_ADDR
+                * 1 - GDS
+                */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+                * 1 - PFP
+                */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                * 2 - DATA
+                */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
+#define        PACKET3_PFP_SYNC_ME                             0x42
+#define        PACKET3_SURFACE_SYNC                            0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define        PACKET3_ME_INITIALIZE                           0x44
+#define                PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define        PACKET3_COND_WRITE                              0x45
+#define        PACKET3_EVENT_WRITE                             0x46
+#define                EVENT_TYPE(x)                           ((x) << 0)
+#define                EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+                * 1 - ZPASS_DONE
+                * 2 - SAMPLE_PIPELINESTAT
+                * 3 - SAMPLE_STREAMOUTSTAT*
+                * 4 - *S_PARTIAL_FLUSH
+                * 5 - EOP events
+                * 6 - EOS events
+                * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+                */
+#define                INV_L2                                  (1 << 20)
+                /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define        PACKET3_EVENT_WRITE_EOP                         0x47
+#define                DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+                * 1 - send low 32bit data
+                * 2 - send 64bit data
+                * 3 - send 64bit counter value
+                */
+#define                INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+                * 1 - interrupt only (DATA_SEL = 0)
+                * 2 - interrupt when data write is confirmed
+                */
+#define        PACKET3_EVENT_WRITE_EOS                         0x48
+#define        PACKET3_PREAMBLE_CNTL                           0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define        PACKET3_ONE_REG_WRITE                           0x57
+#define        PACKET3_LOAD_CONFIG_REG                         0x5F
+#define        PACKET3_LOAD_CONTEXT_REG                        0x60
+#define        PACKET3_LOAD_SH_REG                             0x61
+#define        PACKET3_SET_CONFIG_REG                          0x68
+#define                PACKET3_SET_CONFIG_REG_START                    0x00002000
+#define                PACKET3_SET_CONFIG_REG_END                      0x00002c00
+#define        PACKET3_SET_CONTEXT_REG                         0x69
+#define                PACKET3_SET_CONTEXT_REG_START                   0x000a000
+#define                PACKET3_SET_CONTEXT_REG_END                     0x000a400
+#define        PACKET3_SET_CONTEXT_REG_INDIRECT                0x73
+#define        PACKET3_SET_RESOURCE_INDIRECT                   0x74
+#define        PACKET3_SET_SH_REG                              0x76
+#define                PACKET3_SET_SH_REG_START                        0x00002c00
+#define                PACKET3_SET_SH_REG_END                          0x00003000
+#define        PACKET3_SET_SH_REG_OFFSET                       0x77
+#define        PACKET3_ME_WRITE                                0x7A
+#define        PACKET3_SCRATCH_RAM_WRITE                       0x7D
+#define        PACKET3_SCRATCH_RAM_READ                        0x7E
+#define        PACKET3_CE_WRITE                                0x7F
+#define        PACKET3_LOAD_CONST_RAM                          0x80
+#define        PACKET3_WRITE_CONST_RAM                         0x81
+#define        PACKET3_WRITE_CONST_RAM_OFFSET                  0x82
+#define        PACKET3_DUMP_CONST_RAM                          0x83
+#define        PACKET3_INCREMENT_CE_COUNTER                    0x84
+#define        PACKET3_INCREMENT_DE_COUNTER                    0x85
+#define        PACKET3_WAIT_ON_CE_COUNTER                      0x86
+#define        PACKET3_WAIT_ON_DE_COUNTER                      0x87
+#define        PACKET3_WAIT_ON_DE_COUNTER_DIFF                 0x88
+#define        PACKET3_SET_CE_DE_COUNTERS                      0x89
+#define        PACKET3_WAIT_ON_AVAIL_BUFFER                    0x8A
+#define        PACKET3_SWITCH_BUFFER                           0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x200 /* not a register */
+
+#define DMA_RB_CNTL                                       0x3400
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0x3401
+#define DMA_RB_RPTR                                       0x3402
+#define DMA_RB_WPTR                                       0x3403
+
+#define DMA_RB_RPTR_ADDR_HI                               0x3407
+#define DMA_RB_RPTR_ADDR_LO                               0x3408
+
+#define DMA_IB_CNTL                                       0x3409
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0x340a
+#define DMA_CNTL                                          0x340b
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0x340d
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_TILING_CONFIG                                0x342e
+
+#define        DMA_POWER_CNTL                                  0x342f
+#       define MEM_POWER_OVERRIDE                       (1 << 8)
+#define        DMA_CLK_CTRL                                    0x3430
+
+#define        DMA_PG                                          0x3435
+#      define PG_CNTL_ENABLE                           (1 << 0)
+#define        DMA_PGFSM_CONFIG                                0x3436
+#define        DMA_PGFSM_WRITE                                 0x3437
+
+#define DMA_PACKET(cmd, b, t, s, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((b) & 0x1) << 26) |          \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((vmid) & 0xF) << 20) |       \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)          ((2 << 28) |                    \
+                                        (1 << 26) |                    \
+                                        (1 << 21) |                    \
+                                        (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_SRBM_WRITE                             0x9
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_POLL_REG_MEM                           0xe
+#define        DMA_PACKET_NOP                                    0xf
+
+#define VCE_STATUS                                     0x20004
+#define VCE_VCPU_CNTL                                  0x20014
+#define                VCE_CLK_EN                              (1 << 0)
+#define VCE_VCPU_CACHE_OFFSET0                         0x20024
+#define VCE_VCPU_CACHE_SIZE0                           0x20028
+#define VCE_VCPU_CACHE_OFFSET1                         0x2002c
+#define VCE_VCPU_CACHE_SIZE1                           0x20030
+#define VCE_VCPU_CACHE_OFFSET2                         0x20034
+#define VCE_VCPU_CACHE_SIZE2                           0x20038
+#define VCE_SOFT_RESET                                 0x20120
+#define        VCE_ECPU_SOFT_RESET                     (1 << 0)
+#define        VCE_FME_SOFT_RESET                      (1 << 2)
+#define VCE_RB_BASE_LO2                                        0x2016c
+#define VCE_RB_BASE_HI2                                        0x20170
+#define VCE_RB_SIZE2                                   0x20174
+#define VCE_RB_RPTR2                                   0x20178
+#define VCE_RB_WPTR2                                   0x2017c
+#define VCE_RB_BASE_LO                                 0x20180
+#define VCE_RB_BASE_HI                                 0x20184
+#define VCE_RB_SIZE                                    0x20188
+#define VCE_RB_RPTR                                    0x2018c
+#define VCE_RB_WPTR                                    0x20190
+#define VCE_CLOCK_GATING_A                             0x202f8
+#define VCE_CLOCK_GATING_B                             0x202fc
+#define VCE_UENC_CLOCK_GATING                          0x205bc
+#define VCE_UENC_REG_CLOCK_GATING                      0x205c0
+#define VCE_FW_REG_STATUS                              0x20e10
+#      define VCE_FW_REG_STATUS_BUSY                   (1 << 0)
+#      define VCE_FW_REG_STATUS_PASS                   (1 << 3)
+#      define VCE_FW_REG_STATUS_DONE                   (1 << 11)
+#define VCE_LMI_FW_START_KEYSEL                                0x20e18
+#define VCE_LMI_FW_PERIODIC_CTRL                       0x20e20
+#define VCE_LMI_CTRL2                                  0x20e74
+#define VCE_LMI_CTRL                                   0x20e98
+#define VCE_LMI_VM_CTRL                                        0x20ea0
+#define VCE_LMI_SWAP_CNTL                              0x20eb4
+#define VCE_LMI_SWAP_CNTL1                             0x20eb8
+#define VCE_LMI_CACHE_CTRL                             0x20ef4
+
+#define VCE_CMD_NO_OP                                  0x00000000
+#define VCE_CMD_END                                    0x00000001
+#define VCE_CMD_IB                                     0x00000002
+#define VCE_CMD_FENCE                                  0x00000003
+#define VCE_CMD_TRAP                                   0x00000004
+#define VCE_CMD_IB_AUTO                                        0x00000005
+#define VCE_CMD_SEMAPHORE                              0x00000006
+
+
+//#dce stupp
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define SI_CRTC0_REGISTER_OFFSET                0 //(0x6df0 - 0x6df0)/4
+#define SI_CRTC1_REGISTER_OFFSET                0x300 //(0x79f0 - 0x6df0)/4
+#define SI_CRTC2_REGISTER_OFFSET                0x2600 //(0x105f0 - 0x6df0)/4
+#define SI_CRTC3_REGISTER_OFFSET                0x2900 //(0x111f0 - 0x6df0)/4
+#define SI_CRTC4_REGISTER_OFFSET                0x2c00 //(0x11df0 - 0x6df0)/4
+#define SI_CRTC5_REGISTER_OFFSET                0x2f00 //(0x129f0 - 0x6df0)/4
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define AMDGPU_MM_INDEX                        0x0000
+#define AMDGPU_MM_DATA                 0x0001
+
+#define VERDE_NUM_CRTC 6
+#define        BLACKOUT_MODE_MASK                      0x00000007
+#define        VGA_RENDER_CONTROL                      0xC0
+#define R_000300_VGA_RENDER_CONTROL             0xC0
+#define C_000300_VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS                   0x1BA3
+#define EVERGREEN_CRTC_V_BLANK                  (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION          0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x1b8d
+#define EVERGREEN_CRTC_CONTROL                          0x1b9c
+#define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x1b9d
+#define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x1a05
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+
+#define EVERGREEN_DATA_FORMAT                           0x1ac0
+#       define EVERGREEN_INTERLEAVE_EN                  (1 << 0)
+
+#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0xf000
+#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0xc
+
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL            (0 << 20)
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED            (1 << 20)
+#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1            (2 << 20)
+#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1            (4 << 20)
+
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x1a45
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x1845
+
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x1847
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x1a47
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
+
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
+
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x1
+
+#define R600_D1GRPH_SWAP_CONTROL                               0x1843
+#define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
+
+#define AVIVO_D1VGA_CONTROL                                    0x00cc
+#       define AVIVO_DVGA_CONTROL_MODE_ENABLE            (1 << 0)
+#       define AVIVO_DVGA_CONTROL_TIMING_SELECT          (1 << 8)
+#       define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT   (1 << 9)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN      (1 << 16)
+#       define AVIVO_DVGA_CONTROL_ROTATE                 (1 << 24)
+#define AVIVO_D2VGA_CONTROL                                    0x00ce
+
+#define R600_BUS_CNTL                                           0x1508
+#       define R600_BIOS_ROM_DIS                                (1 << 1)
+
+#define R600_ROM_CNTL                              0x580
+#       define R600_SCK_OVERWRITE                  (1 << 1)
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK  (0xf << 28)
+
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+
+#define FMT_BIT_DEPTH_CONTROL                0x1bf2
+#define FMT_TRUNCATE_EN               (1 << 0)
+#define FMT_TRUNCATE_DEPTH            (1 << 4)
+#define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#define FMT_TEMPORAL_LEVEL            (1 << 24)
+#define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#define FMT_25FRC_SEL(x)              ((x) << 26)
+#define FMT_50FRC_SEL(x)              ((x) << 28)
+#define FMT_75FRC_SEL(x)              ((x) << 30)
+
+#define EVERGREEN_DC_LUT_CONTROL                        0x1a80
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE              0x1a81
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN             0x1a82
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED               0x1a83
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE              0x1a84
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN             0x1a85
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED               0x1a86
+#define EVERGREEN_DC_LUT_30_COLOR                       0x1a7c
+#define EVERGREEN_DC_LUT_RW_INDEX                       0x1a79
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK                  0x1a7e
+#define EVERGREEN_DC_LUT_RW_MODE                        0x1a78
+
+#define EVERGREEN_GRPH_ENABLE                           0x1a00
+#define EVERGREEN_GRPH_CONTROL                          0x1a01
+#define EVERGREEN_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#define EVERGREEN_GRPH_DEPTH_8BPP                0
+#define EVERGREEN_GRPH_DEPTH_16BPP               1
+#define EVERGREEN_GRPH_DEPTH_32BPP               2
+#define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#define EVERGREEN_ADDR_SURF_2_BANK               0
+#define EVERGREEN_ADDR_SURF_4_BANK               1
+#define EVERGREEN_ADDR_SURF_8_BANK               2
+#define EVERGREEN_ADDR_SURF_16_BANK              3
+#define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
+#define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+
+#define EVERGREEN_GRPH_FORMAT_INDEXED            0
+#define EVERGREEN_GRPH_FORMAT_ARGB1555           0
+#define EVERGREEN_GRPH_FORMAT_ARGB565            1
+#define EVERGREEN_GRPH_FORMAT_ARGB4444           2
+#define EVERGREEN_GRPH_FORMAT_AI88               3
+#define EVERGREEN_GRPH_FORMAT_MONO16             4
+#define EVERGREEN_GRPH_FORMAT_BGRA5551           5
+
+/* 32 BPP */
+#define EVERGREEN_GRPH_FORMAT_ARGB8888           0
+#define EVERGREEN_GRPH_FORMAT_ARGB2101010        1
+#define EVERGREEN_GRPH_FORMAT_32BPP_DIG          2
+#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010     3
+#define EVERGREEN_GRPH_FORMAT_BGRA1010102        4
+#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
+#define EVERGREEN_GRPH_FORMAT_RGB111110          6
+#define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
+#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
+#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
+#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+
+#define EVERGREEN_GRPH_SWAP_CONTROL                     0x1a03
+#define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_ENDIAN_NONE               0
+#       define EVERGREEN_GRPH_ENDIAN_8IN16              1
+#       define EVERGREEN_GRPH_ENDIAN_8IN32              2
+#       define EVERGREEN_GRPH_ENDIAN_8IN64              3
+
+#define EVERGREEN_D3VGA_CONTROL                         0xf8
+#define EVERGREEN_D4VGA_CONTROL                         0xf9
+#define EVERGREEN_D5VGA_CONTROL                         0xfa
+#define EVERGREEN_D6VGA_CONTROL                         0xfb
+
+#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK      0xffffff00
+
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL         0x1a02
+#define EVERGREEN_LUT_10BIT_BYPASS_EN            (1 << 8)
+
+#define EVERGREEN_GRPH_PITCH                            0x1a06
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X                 0x1a09
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y                 0x1a0a
+#define EVERGREEN_GRPH_X_START                          0x1a0b
+#define EVERGREEN_GRPH_Y_START                          0x1a0c
+#define EVERGREEN_GRPH_X_END                            0x1a0d
+#define EVERGREEN_GRPH_Y_END                            0x1a0e
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x1a12
+#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+#define EVERGREEN_VIEWPORT_START                        0x1b5c
+#define EVERGREEN_VIEWPORT_SIZE                         0x1b5d
+#define EVERGREEN_DESKTOP_HEIGHT                        0x1ac1
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL                           0x1a66
+#       define EVERGREEN_CURSOR_EN                      (1 << 0)
+#       define EVERGREEN_CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#       define EVERGREEN_CURSOR_MONO                    0
+#       define EVERGREEN_CURSOR_24_1                    1
+#       define EVERGREEN_CURSOR_24_8_PRE_MULT           2
+#       define EVERGREEN_CURSOR_24_8_UNPRE_MULT         3
+#       define EVERGREEN_CURSOR_2X_MAGNIFY              (1 << 16)
+#       define EVERGREEN_CURSOR_FORCE_MC_ON             (1 << 20)
+#       define EVERGREEN_CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#       define EVERGREEN_CURSOR_URGENT_ALWAYS           0
+#       define EVERGREEN_CURSOR_URGENT_1_8              1
+#       define EVERGREEN_CURSOR_URGENT_1_4              2
+#       define EVERGREEN_CURSOR_URGENT_3_8              3
+#       define EVERGREEN_CURSOR_URGENT_1_2              4
+#define EVERGREEN_CUR_SURFACE_ADDRESS                   0x1a67
+#       define EVERGREEN_CUR_SURFACE_ADDRESS_MASK       0xfffff000
+#define EVERGREEN_CUR_SIZE                              0x1a68
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH              0x1a69
+#define EVERGREEN_CUR_POSITION                          0x1a6a
+#define EVERGREEN_CUR_HOT_SPOT                          0x1a6b
+#define EVERGREEN_CUR_COLOR1                            0x1a6c
+#define EVERGREEN_CUR_COLOR2                            0x1a6d
+#define EVERGREEN_CUR_UPDATE                            0x1a6e
+#       define EVERGREEN_CURSOR_UPDATE_PENDING          (1 << 0)
+#       define EVERGREEN_CURSOR_UPDATE_TAKEN            (1 << 1)
+#       define EVERGREEN_CURSOR_UPDATE_LOCK             (1 << 16)
+#       define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+
+#define NI_INPUT_CSC_CONTROL                           0x1a35
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x1a3c
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x1a58
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x1a59
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x1aa0
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x1a2d
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x1a31
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_GAMMA_CONTROL                         0x1a10
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define IH_RB_WPTR__RB_OVERFLOW_MASK   0x1
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000
+#define SRBM_STATUS__IH_BUSY_MASK      0x20000
+#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK    0x400
+
+#define        BLACKOUT_MODE_MASK                      0x00000007
+#define        VGA_RENDER_CONTROL                      0xC0
+#define R_000300_VGA_RENDER_CONTROL             0xC0
+#define C_000300_VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS                   0x1BA3
+#define EVERGREEN_CRTC_V_BLANK                  (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION          0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x1b8d
+#define EVERGREEN_CRTC_CONTROL                          0x1b9c
+#       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x1b9d
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x1a05
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
+
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
+
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
+
+#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
+#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
+#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
+#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
+
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x6
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x200
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x1000
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8000
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40000
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x12
+
+#define MC_SEQ_MISC0__MT__MASK 0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
+#define MC_SEQ_MISC0__MT__DDR2   0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
+#define MC_SEQ_MISC0__MT__HBM    0x60000000
+#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
+
+#define SRBM_STATUS__MCB_BUSY_MASK 0x200
+#define SRBM_STATUS__MCB_BUSY__SHIFT 0x9
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x400
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0xa
+#define SRBM_STATUS__MCC_BUSY_MASK 0x800
+#define SRBM_STATUS__MCC_BUSY__SHIFT 0xb
+#define SRBM_STATUS__MCD_BUSY_MASK 0x1000
+#define SRBM_STATUS__MCD_BUSY__SHIFT 0xc
+#define SRBM_STATUS__VMC_BUSY_MASK 0x100
+#define SRBM_STATUS__VMC_BUSY__SHIFT 0x8
+
+
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
+#define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
+#define PACKET3_SEM_WAIT_ON_SIGNAL    (0x1 << 12)
+#define PACKET3_SEM_SEL_SIGNAL     (0x6 << 29)
+#define PACKET3_SEM_SEL_WAIT       (0x7 << 29)
+
+#define CONFIG_CNTL    0x1509
+#define CC_DRM_ID_STRAPS       0X1559
+#define AMDGPU_PCIE_INDEX      0xc
+#define AMDGPU_PCIE_DATA       0xd
+
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0x3411
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0x3412
+#define DMA_MODE                                          0x342f
+#define DMA_RB_RPTR_ADDR_HI                               0x3407
+#define DMA_RB_RPTR_ADDR_LO                               0x3408
+#define DMA_BUSY_MASK 0x20
+#define DMA1_BUSY_MASK 0X40
+#define SDMA_MAX_INSTANCE 2
+
+#define PCIE_BUS_CLK    10000
+#define TCLK            (PCIE_BUS_CLK / 10)
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK              0xf0000000
+#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define        PCIE_PORT_INDEX                                 0xe
+#define        PCIE_PORT_DATA                                  0xf
+#define EVERGREEN_PIF_PHY0_INDEX                        0x8
+#define EVERGREEN_PIF_PHY0_DATA                         0xc
+#define EVERGREEN_PIF_PHY1_INDEX                        0x10
+#define EVERGREEN_PIF_PHY1_DATA                                0x14
+
+#define        MC_VM_FB_OFFSET                                 0x81a
+
+#endif
index f3e53b118361dd13dc7f1558450db8f5158e6257..19802e96417e0e3212b7c4458bd4ab03cee0f9f9 100644 (file)
@@ -34,6 +34,7 @@
 #define mmUVD_UDEC_ADDR_CONFIG                                                  0x3bd3
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_SEMA_CNTL                                                         0x3d00
 #define mmUVD_LMI_EXT40_ADDR                                                    0x3d26
 #define mmUVD_CTX_INDEX                                                         0x3d28
index eb4cf53427da31d10c8241f966382a28f74841dc..cc972d237a7e47a617610bab93fd69a1118f271e 100644 (file)
@@ -34,6 +34,7 @@
 #define mmUVD_UDEC_ADDR_CONFIG                                                  0x3bd3
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW                                          0x3c69
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH                                         0x3c68
 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW                                          0x3c67
index ec69869c55ff530da2b00a1dae3833896ce07b5b..378f4b6b43da6821d080d07b8587e89d15457e24 100644 (file)
@@ -35,6 +35,7 @@
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
 #define mmUVD_POWER_STATUS_U                                                    0x3bfd
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW                                          0x3c69
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH                                         0x3c68
 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW                                          0x3c67
index 3493da5c8f0efcaef97f2b07145d91097665b703..4a4d3797a6d31e4fbac2283d857a0f21c965698d 100644 (file)
@@ -494,6 +494,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter
@@ -526,6 +527,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter
old mode 100644 (file)
new mode 100755 (executable)
index b86aba9..df7c18b
@@ -119,6 +119,8 @@ enum cgs_system_info_id {
        CGS_SYSTEM_INFO_PG_FLAGS,
        CGS_SYSTEM_INFO_GFX_CU_INFO,
        CGS_SYSTEM_INFO_GFX_SE_INFO,
+       CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
+       CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
        CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 
@@ -159,6 +161,7 @@ struct cgs_clock_limits {
  */
 struct cgs_firmware_info {
        uint16_t                version;
+       uint16_t                fw_version;
        uint16_t                feature_version;
        uint32_t                image_size;
        uint64_t                mc_addr;
index abbb658bdc1e9031f0f5e12bfbbce3a052f1cc58..7174f7a68266242651e5c451f299c57996865e90 100644 (file)
@@ -31,6 +31,7 @@
 #include "eventmanager.h"
 #include "pp_debug.h"
 
+
 #define PP_CHECK(handle)                                               \
        do {                                                            \
                if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \
@@ -162,12 +163,12 @@ static int pp_hw_fini(void *handle)
        pp_handle = (struct pp_instance *)handle;
        eventmgr = pp_handle->eventmgr;
 
-       if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL)
+       if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL)
                eventmgr->pp_eventmgr_fini(eventmgr);
 
        smumgr = pp_handle->smu_mgr;
 
-       if (smumgr != NULL || smumgr->smumgr_funcs != NULL ||
+       if (smumgr != NULL && smumgr->smumgr_funcs != NULL &&
                smumgr->smumgr_funcs->smu_fini != NULL)
                smumgr->smumgr_funcs->smu_fini(smumgr);
 
@@ -190,11 +191,9 @@ static int pp_sw_reset(void *handle)
 }
 
 
-static int pp_set_clockgating_state(void *handle,
-                                   enum amd_clockgating_state state)
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 {
        struct pp_hwmgr  *hwmgr;
-       uint32_t msg_id, pp_state;
 
        if (handle == NULL)
                return -EINVAL;
@@ -208,76 +207,7 @@ static int pp_set_clockgating_state(void *handle,
                return 0;
        }
 
-       if (state == AMD_CG_STATE_UNGATE)
-               pp_state = 0;
-       else
-               pp_state = PP_STATE_CG | PP_STATE_LS;
-
-       /* Enable/disable GFX blocks clock gating through SMU */
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_CG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_3D,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_RLC,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_CP,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_MG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
-       /* Enable/disable System blocks clock gating through SMU */
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                       PP_BLOCK_SYS_BIF,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                       PP_BLOCK_SYS_BIF,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                       PP_BLOCK_SYS_MC,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                       PP_BLOCK_SYS_ROM,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                       PP_BLOCK_SYS_DRM,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                       PP_BLOCK_SYS_HDP,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                       PP_BLOCK_SYS_SDMA,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
-       return 0;
+       return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
 }
 
 static int pp_set_powergating_state(void *handle,
@@ -361,7 +291,7 @@ const struct amd_ip_funcs pp_ip_funcs = {
        .is_idle = pp_is_idle,
        .wait_for_idle = pp_wait_for_idle,
        .soft_reset = pp_sw_reset,
-       .set_clockgating_state = pp_set_clockgating_state,
+       .set_clockgating_state = NULL,
        .set_powergating_state = pp_set_powergating_state,
 };
 
@@ -537,7 +467,6 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
                ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
                break;
        case AMD_PP_EVENT_READJUST_POWER_STATE:
-               pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
                ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
                break;
        default:
@@ -576,28 +505,6 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
        }
 }
 
-static void
-pp_debugfs_print_current_performance_level(void *handle,
-                                              struct seq_file *m)
-{
-       struct pp_hwmgr  *hwmgr;
-
-       if (handle == NULL)
-               return;
-
-       hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
-       if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
-               return;
-
-       if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
-               printk(KERN_INFO "%s was not implemented.\n", __func__);
-               return;
-       }
-
-       hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
-}
-
 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 {
        struct pp_hwmgr  *hwmgr;
@@ -764,15 +671,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
        PP_CHECK_HW(hwmgr);
 
        if (!hwmgr->hardcode_pp_table) {
-               hwmgr->hardcode_pp_table =
-                               kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+               hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+                                                  hwmgr->soft_pp_table_size,
+                                                  GFP_KERNEL);
 
                if (!hwmgr->hardcode_pp_table)
                        return -ENOMEM;
-
-               /* to avoid powerplay crash when hardcode pptable is empty */
-               memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
-                               hwmgr->soft_pp_table_size);
        }
 
        memcpy(hwmgr->hardcode_pp_table, buf, size);
@@ -897,6 +801,25 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
        return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
 }
 
+static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->read_sensor == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
+}
+
 const struct amd_powerplay_funcs pp_dpm_funcs = {
        .get_temperature = pp_dpm_get_temperature,
        .load_firmware = pp_dpm_load_fw,
@@ -909,7 +832,6 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
        .powergate_vce = pp_dpm_powergate_vce,
        .powergate_uvd = pp_dpm_powergate_uvd,
        .dispatch_tasks = pp_dpm_dispatch_tasks,
-       .print_current_performance_level = pp_debugfs_print_current_performance_level,
        .set_fan_control_mode = pp_dpm_set_fan_control_mode,
        .get_fan_control_mode = pp_dpm_get_fan_control_mode,
        .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
@@ -923,6 +845,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
        .set_sclk_od = pp_dpm_set_sclk_od,
        .get_mclk_od = pp_dpm_get_mclk_od,
        .set_mclk_od = pp_dpm_set_mclk_od,
+       .read_sensor = pp_dpm_read_sensor,
 };
 
 static int amd_pp_instance_init(struct amd_pp_init *pp_init,
index 635fc4b4818484c1741d1404024452382c8a5fcc..92b1178438755ab4e981dfa14741f62a3353da9f 100644 (file)
@@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = {
        unblock_adjust_power_state_tasks,
        set_cpu_power_state,
        notify_hw_power_source_tasks,
+       get_2d_performance_state_tasks,
+       set_performance_state_tasks,
        /* updateDALConfigurationTasks,
        variBrightDisplayConfigurationChangeTasks, */
        adjust_power_state_tasks,
index a46225c0fc012080105071fc4a1bf00cbc871c31..489908887e9c08f90cb7c4d83e84a1883c0d68ef 100644 (file)
@@ -70,11 +70,12 @@ int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
        int i;
 
        table_entries = hwmgr->num_ps;
+
        state = hwmgr->ps;
 
        for (i = 0; i < table_entries; i++) {
                if (state->id == *state_id) {
-                       hwmgr->request_ps = state;
+                       memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
                        return 0;
                }
                state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
@@ -100,13 +101,14 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
        if (requested == NULL)
                return 0;
 
+       phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+
        if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
                equal = false;
 
        if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
-               phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
                phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
-               hwmgr->current_ps = requested;
+               memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
        }
        return 0;
 }
index f7ce4cb71346c4623c780c5ba86c1c33ad3eb65c..5fff1d636ab7497739bcaa0381a09c2ab9b67c6e 100644 (file)
@@ -3,14 +3,12 @@
 # It provides the hardware management services for the driver.
 
 HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
-              hardwaremanager.o pp_acpi.o cz_hwmgr.o \
-               cz_clockpowergating.o \
-              tonga_processpptables.o ppatomctrl.o \
-               tonga_hwmgr.o pppcielanes.o  tonga_thermal.o\
-               fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
-               fiji_clockpowergating.o fiji_thermal.o \
-              polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
-              polaris10_clockpowergating.o
+               hardwaremanager.o pp_acpi.o cz_hwmgr.o \
+               cz_clockpowergating.o pppcielanes.o\
+               process_pptables_v1_0.o ppatomctrl.o \
+               smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
+               smu7_clockpowergating.o
+
 
 AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
 
index 8cc0df9b534ac4dc0105f9e0418203e408a33755..7e4fcbbbe08652735c6796a2cbfd62d122434e69 100644 (file)
@@ -178,7 +178,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
        int result;
 
        cz_hwmgr->gfx_ramp_step = 256*25/100;
-
        cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
 
        for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
@@ -186,33 +185,19 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
 
        cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
        cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
-
        cz_hwmgr->clock_slow_down_freq = 25000;
-
        cz_hwmgr->skip_clock_slow_down = 1;
-
        cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
-
        cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
-
        cz_hwmgr->voting_rights_clients = 0x00C00033;
-
        cz_hwmgr->static_screen_threshold = 8;
-
        cz_hwmgr->ddi_power_gating_disabled = 0;
-
        cz_hwmgr->bapm_enabled = 1;
-
        cz_hwmgr->voltage_drop_threshold = 0;
-
        cz_hwmgr->gfx_power_gating_threshold = 500;
-
        cz_hwmgr->vce_slow_sclk_threshold = 20000;
-
        cz_hwmgr->dce_slow_sclk_threshold = 30000;
-
        cz_hwmgr->disable_driver_thermal_policy = 1;
-
        cz_hwmgr->disable_nb_ps3_in_battery = 0;
 
        phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -221,9 +206,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                    PHM_PlatformCaps_NonABMSupportInPPLib);
 
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                          PHM_PlatformCaps_SclkDeepSleep);
-
        phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_DynamicM3Arbiter);
 
@@ -233,9 +215,7 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
                                  PHM_PlatformCaps_DynamicPatchPowerState);
 
        cz_hwmgr->thermal_auto_throttling_treshold = 0;
-
        cz_hwmgr->tdr_clock = 0;
-
        cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
 
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -450,19 +430,12 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
                        (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
 
        cz_hwmgr->boot_power_level.dsDividerIndex = 0;
-
        cz_hwmgr->boot_power_level.ssDividerIndex = 0;
-
        cz_hwmgr->boot_power_level.allowGnbSlow = 1;
-
        cz_hwmgr->boot_power_level.forceNBPstate = 0;
-
        cz_hwmgr->boot_power_level.hysteresis_up = 0;
-
        cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
-
        cz_hwmgr->boot_power_level.display_wm = 0;
-
        cz_hwmgr->boot_power_level.vce_wm = 0;
 
        return 0;
@@ -749,7 +722,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
                cz_hwmgr->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
 
        clock = hwmgr->display_config.min_core_set_clock;
-;
        if (clock == 0)
                printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
 
@@ -832,7 +804,7 @@ static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr,
 
        smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
                                        PPSMC_MSG_SetWatermarkFrequency,
-                                     cz_hwmgr->sclk_dpm.soft_max_clk);
+                                       cz_hwmgr->sclk_dpm.soft_max_clk);
 
        return 0;
 }
@@ -858,9 +830,9 @@ static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr,
                PP_DBG_LOG("enabling ALL SMU features.\n");
                dpm_features |= NB_DPM_MASK;
                ret = smum_send_msg_to_smc_with_parameter(
-                                                            hwmgr->smumgr,
-                                        PPSMC_MSG_EnableAllSmuFeatures,
-                                                            dpm_features);
+                                                         hwmgr->smumgr,
+                                                         PPSMC_MSG_EnableAllSmuFeatures,
+                                                         dpm_features);
                if (ret == 0)
                        cz_hwmgr->is_nb_dpm_enabled = true;
        }
@@ -1246,7 +1218,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 
 static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-       if (hwmgr != NULL || hwmgr->backend != NULL) {
+       if (hwmgr != NULL && hwmgr->backend != NULL) {
                kfree(hwmgr->backend);
                kfree(hwmgr);
        }
@@ -1402,10 +1374,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
                                                   PPSMC_MSG_SetUvdHardMin));
 
                        cz_enable_disable_uvd_dpm(hwmgr, true);
-               } else
+               } else {
                        cz_enable_disable_uvd_dpm(hwmgr, true);
-       } else
+               }
+       } else {
                cz_enable_disable_uvd_dpm(hwmgr, false);
+       }
 
        return 0;
 }
@@ -1564,78 +1538,6 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
        return sizeof(struct cz_power_state);
 }
 
-static void
-cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
-       struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-       struct phm_clock_voltage_dependency_table *table =
-                               hwmgr->dyn_state.vddc_dependency_on_sclk;
-
-       struct phm_vce_clock_voltage_dependency_table *vce_table =
-               hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
-       struct phm_uvd_clock_voltage_dependency_table *uvd_table =
-               hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
-       uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
-                                       TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
-       uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
-                                       TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
-       uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
-                                       TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
-
-       uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
-       uint16_t vddnb, vddgfx;
-       int result;
-
-       if (sclk_index >= NUM_SCLK_LEVELS) {
-               seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
-       } else {
-               sclk = table->entries[sclk_index].clk;
-               seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
-       }
-
-       tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
-               CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
-       vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
-       tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
-               CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
-       vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
-       seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
-
-       seq_printf(m, "\n uvd    %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
-       if (!cz_hwmgr->uvd_power_gated) {
-               if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
-                       seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
-               } else {
-                       vclk = uvd_table->entries[uvd_index].vclk;
-                       dclk = uvd_table->entries[uvd_index].dclk;
-                       seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
-               }
-       }
-
-       seq_printf(m, "\n vce    %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
-       if (!cz_hwmgr->vce_power_gated) {
-               if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
-                       seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
-               } else {
-                       ecclk = vce_table->entries[vce_index].ecclk;
-                       seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
-               }
-       }
-
-       result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
-       if (0 == result) {
-               activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
-               activity_percent = activity_percent > 100 ? 100 : activity_percent;
-       } else {
-               activity_percent = 50;
-       }
-
-       seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent);
-}
-
 static void cz_hw_print_display_cfg(
        const struct cc6_settings *cc6_settings)
 {
@@ -1690,13 +1592,10 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
        struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
 
        if (separation_time !=
-               hw_data->cc6_settings.cpu_pstate_separation_time
-               || cc6_disable !=
-               hw_data->cc6_settings.cpu_cc6_disable
-               || pstate_disable !=
-               hw_data->cc6_settings.cpu_pstate_disable
-               || pstate_switch_disable !=
-               hw_data->cc6_settings.nb_pstate_switch_disable) {
+           hw_data->cc6_settings.cpu_pstate_separation_time ||
+           cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
+           pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
+           pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
 
                hw_data->cc6_settings.cc6_setting_changed = true;
 
@@ -1799,8 +1698,7 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
        ps = cast_const_PhwCzPowerState(state);
 
        level_index = index > ps->level - 1 ? ps->level - 1 : index;
-
-       level->coreClock  = ps->levels[level_index].engineClock;
+       level->coreClock = ps->levels[level_index].engineClock;
 
        if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
                for (i = 1; i < ps->level; i++) {
@@ -1887,6 +1785,107 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
        return 0;
 }
 
+static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+       struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+       struct phm_clock_voltage_dependency_table *table =
+                               hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+       struct phm_vce_clock_voltage_dependency_table *vce_table =
+               hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+       struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+               hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+       uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
+                                       TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+       uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+                                       TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+       uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+                                       TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+
+       uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
+       uint16_t vddnb, vddgfx;
+       int result;
+
+       switch (idx) {
+       case AMDGPU_PP_SENSOR_GFX_SCLK:
+               if (sclk_index < NUM_SCLK_LEVELS) {
+                       sclk = table->entries[sclk_index].clk;
+                       *value = sclk;
+                       return 0;
+               }
+               return -EINVAL;
+       case AMDGPU_PP_SENSOR_VDDNB:
+               tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
+                       CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+               vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
+               *value = vddnb;
+               return 0;
+       case AMDGPU_PP_SENSOR_VDDGFX:
+               tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
+                       CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+               vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+               *value = vddgfx;
+               return 0;
+       case AMDGPU_PP_SENSOR_UVD_VCLK:
+               if (!cz_hwmgr->uvd_power_gated) {
+                       if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                               return -EINVAL;
+                       } else {
+                               vclk = uvd_table->entries[uvd_index].vclk;
+                               *value = vclk;
+                               return 0;
+                       }
+               }
+               *value = 0;
+               return 0;
+       case AMDGPU_PP_SENSOR_UVD_DCLK:
+               if (!cz_hwmgr->uvd_power_gated) {
+                       if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                               return -EINVAL;
+                       } else {
+                               dclk = uvd_table->entries[uvd_index].dclk;
+                               *value = dclk;
+                               return 0;
+                       }
+               }
+               *value = 0;
+               return 0;
+       case AMDGPU_PP_SENSOR_VCE_ECCLK:
+               if (!cz_hwmgr->vce_power_gated) {
+                       if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                               return -EINVAL;
+                       } else {
+                               ecclk = vce_table->entries[vce_index].ecclk;
+                               *value = ecclk;
+                               return 0;
+                       }
+               }
+               *value = 0;
+               return 0;
+       case AMDGPU_PP_SENSOR_GPU_LOAD:
+               result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
+               if (0 == result) {
+                       activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
+                       activity_percent = activity_percent > 100 ? 100 : activity_percent;
+               } else {
+                       activity_percent = 50;
+               }
+               *value = activity_percent;
+               return 0;
+       case AMDGPU_PP_SENSOR_UVD_POWER:
+               *value = cz_hwmgr->uvd_power_gated ? 0 : 1;
+               return 0;
+       case AMDGPU_PP_SENSOR_VCE_POWER:
+               *value = cz_hwmgr->vce_power_gated ? 0 : 1;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 static const struct pp_hwmgr_func cz_hwmgr_funcs = {
        .backend_init = cz_hwmgr_backend_init,
        .backend_fini = cz_hwmgr_backend_fini,
@@ -1902,7 +1901,6 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
        .patch_boot_state = cz_dpm_patch_boot_state,
        .get_pp_table_entry = cz_dpm_get_pp_table_entry,
        .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
-       .print_current_perforce_level = cz_print_current_perforce_level,
        .set_cpu_power_state = cz_set_cpu_power_state,
        .store_cc6_data = cz_store_cc6_data,
        .force_clock_level = cz_force_clock_level,
@@ -1912,6 +1910,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
        .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
        .get_clock_by_type = cz_get_clock_by_type,
        .get_max_high_clocks = cz_get_max_high_clocks,
+       .read_sensor = cz_read_sensor,
 };
 
 int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
deleted file mode 100644 (file)
index 5afe820..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "fiji_clockpowergating.h"
-#include "fiji_ppsmc.h"
-#include "fiji_hwmgr.h"
-
-int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       data->uvd_power_gated = false;
-       data->vce_power_gated = false;
-       data->samu_power_gated = false;
-       data->acp_power_gated = false;
-
-       return 0;
-}
-
-int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (data->uvd_power_gated == bgate)
-               return 0;
-
-       data->uvd_power_gated = bgate;
-
-       if (bgate) {
-               cgs_set_clockgating_state(hwmgr->device,
-                                         AMD_IP_BLOCK_TYPE_UVD,
-                                         AMD_CG_STATE_GATE);
-               fiji_update_uvd_dpm(hwmgr, true);
-       } else {
-               fiji_update_uvd_dpm(hwmgr, false);
-               cgs_set_clockgating_state(hwmgr->device,
-                                         AMD_IP_BLOCK_TYPE_UVD,
-                                         AMD_CG_STATE_UNGATE);
-       }
-
-       return 0;
-}
-
-int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_set_power_state_input states;
-       const struct pp_power_state  *pcurrent;
-       struct pp_power_state  *requested;
-
-       if (data->vce_power_gated == bgate)
-               return 0;
-
-       data->vce_power_gated = bgate;
-
-       pcurrent = hwmgr->current_ps;
-       requested = hwmgr->request_ps;
-
-       states.pcurrent_state = &(pcurrent->hardware);
-       states.pnew_state = &(requested->hardware);
-
-       fiji_update_vce_dpm(hwmgr, &states);
-       fiji_enable_disable_vce_dpm(hwmgr, !bgate);
-
-       return 0;
-}
-
-int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (data->samu_power_gated == bgate)
-               return 0;
-
-       data->samu_power_gated = bgate;
-
-       if (bgate)
-               fiji_update_samu_dpm(hwmgr, true);
-       else
-               fiji_update_samu_dpm(hwmgr, false);
-
-       return 0;
-}
-
-int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (data->acp_power_gated == bgate)
-               return 0;
-
-       data->acp_power_gated = bgate;
-
-       if (bgate)
-               fiji_update_acp_dpm(hwmgr, true);
-       else
-               fiji_update_acp_dpm(hwmgr, false);
-
-       return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
deleted file mode 100644 (file)
index 33af5f5..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _FIJI_CLOCK_POWER_GATING_H_
-#define _FIJI_CLOCK_POWER_GATING_H_
-
-#include "fiji_hwmgr.h"
-#include "pp_asicblocks.h"
-
-extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
deleted file mode 100644 (file)
index 32d43e8..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_DYN_DEFAULTS_H
-#define FIJI_DYN_DEFAULTS_H
-
-/** \file
-* Volcanic Islands Dynamic default parameters.
-*/
-
-enum FIJIdpm_TrendDetection
-{
-    FIJIAdpm_TrendDetection_AUTO,
-    FIJIAdpm_TrendDetection_UP,
-    FIJIAdpm_TrendDetection_DOWN
-};
-typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
-
-/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
-
-/* Bit vector representing same fields as hardware register. */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0    0x3FFFC102  /* CP_Gfx_busy ????
-                                                         * HDP_busy
-                                                         * IH_busy
-                                                         * UVD_busy
-                                                         * VCE_busy
-                                                         * ACP_busy
-                                                         * SAMU_busy
-                                                         * SDMA enabled */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1    0x000400  /* FE_Gfx_busy  - Intended for primary usage.   Rest are for flexibility. ????
-                                                       * SH_Gfx_busy
-                                                       * RB_Gfx_busy
-                                                       * VCE_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2    0xC00080  /* SH_Gfx_busy - Intended for primary usage.   Rest are for flexibility.
-                                                       * FE_Gfx_busy
-                                                       * RB_Gfx_busy
-                                                       * ACP_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3    0xC00200  /* RB_Gfx_busy - Intended for primary usage.   Rest are for flexibility.
-                                                       * FE_Gfx_busy
-                                                       * SH_Gfx_busy
-                                                       * UVD_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4    0xC01680  /* UVD_busy
-                                                       * VCE_busy
-                                                       * ACP_busy
-                                                       * SAMU_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5    0xC00033  /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6    0xC00033  /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7    0x3FFFC000  /* GFX, HDP */
-
-
-/* thermal protection counter (units). */
-#define PPFIJI_THERMALPROTECTCOUNTER_DFLT            0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT    0
-
-/* static screen threshold */
-#define PPFIJI_STATICSCREENTHRESHOLD_DFLT        0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPFIJI_REFERENCEDIVIDER_DFLT                  4
-
-/* ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT             1687
-
-#define PPFIJI_CGULVPARAMETER_DFLT                     0x00040035
-#define PPFIJI_CGULVCONTROL_DFLT                       0x00007450
-#define PPFIJI_TARGETACTIVITY_DFLT                     30 /* 30%*/
-#define PPFIJI_MCLK_TARGETACTIVITY_DFLT                10 /* 10% */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
deleted file mode 100644 (file)
index 120a9e2..0000000
+++ /dev/null
@@ -1,5599 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-
-#include "hwmgr.h"
-#include "fiji_smumgr.h"
-#include "atombios.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "cgs_common.h"
-#include "fiji_dyn_defaults.h"
-#include "fiji_powertune.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "pppcielanes.h"
-#include "fiji_hwmgr.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
-#include "pp_debug.h"
-#include "pp_acpi.h"
-#include "amd_pcie_helpers.h"
-#include "cgs_linux.h"
-#include "ppinterrupt.h"
-
-#include "fiji_clockpowergating.h"
-#include "fiji_thermal.h"
-
-#define VOLTAGE_SCALE  4
-#define SMC_RAM_END            0x40000
-#define VDDC_VDDCI_DELTA       300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-#define MC_CG_ARB_FREQ_F0           0x0a /* boot-up default */
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define MC_CG_ARB_FREQ_F2           0x0c
-#define MC_CG_ARB_FREQ_F3           0x0d
-
-/* From smc_reg.h */
-#define SMC_CG_IND_START            0xc0030000
-#define SMC_CG_IND_END              0xc0040000  /* First byte after SMC_CG_IND */
-
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-
-#define VDDC_VDDCI_DELTA            300
-
-#define ixSWRST_COMMAND_1           0x1400103
-#define MC_SEQ_CNTL__CAC_EN_MASK    0x40000000
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
-    DPM_EVENT_SRC_ANALOG = 0,               /* Internal analog trip point */
-    DPM_EVENT_SRC_EXTERNAL = 1,             /* External (GPIO 17) signal */
-    DPM_EVENT_SRC_DIGITAL = 2,              /* Internal digital trip point (DIG_THERM_DPM) */
-    DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,   /* Internal analog or external */
-    DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4   /* Internal digital or external */
-};
-
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-  { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct fiji_power_state *cast_phw_fiji_power_state(
-                                 struct pp_hw_power_state *hw_ps)
-{
-       PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
-                               "Invalid Powerstate Type!",
-                                return NULL;);
-
-       return (struct fiji_power_state *)hw_ps;
-}
-
-const struct fiji_power_state *cast_const_phw_fiji_power_state(
-                                const struct pp_hw_power_state *hw_ps)
-{
-       PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
-                               "Invalid Powerstate Type!",
-                                return NULL;);
-
-       return (const struct fiji_power_state *)hw_ps;
-}
-
-static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
-                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
-                       ? true : false;
-}
-
-static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_ulv_parm *ulv = &data->ulv;
-
-       ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT;
-       data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0;
-       data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1;
-       data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2;
-       data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3;
-       data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4;
-       data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5;
-       data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6;
-       data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7;
-
-       data->static_screen_threshold_unit =
-                       PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT;
-       data->static_screen_threshold =
-                       PPFIJI_STATICSCREENTHRESHOLD_DFLT;
-
-       /* Unset ABM cap as it moved to DAL.
-        * Add PHM_PlatformCaps_NonABMSupportInPPLib
-        * for re-direct ABM related request to DAL
-        */
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ABM);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_NonABMSupportInPPLib);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_DynamicACTiming);
-
-       fiji_initialize_power_tune_defaults(hwmgr);
-
-       data->mclk_stutter_mode_threshold = 60000;
-       data->pcie_gen_performance.max = PP_PCIEGen1;
-       data->pcie_gen_performance.min = PP_PCIEGen3;
-       data->pcie_gen_power_saving.max = PP_PCIEGen1;
-       data->pcie_gen_power_saving.min = PP_PCIEGen3;
-       data->pcie_lane_performance.max = 0;
-       data->pcie_lane_performance.min = 16;
-       data->pcie_lane_power_saving.max = 0;
-       data->pcie_lane_power_saving.min = 16;
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_DynamicUVDState);
-}
-
-static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
-       phm_ppt_v1_voltage_lookup_table *lookup_table,
-       uint16_t virtual_voltage_id, int32_t *sclk)
-{
-       uint8_t entryId;
-       uint8_t voltageId;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
-
-       /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
-       for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
-               voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
-               if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
-                       break;
-       }
-
-       PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
-                       "Can't find requested voltage id in vdd_dep_on_sclk table!",
-                       return -EINVAL;
-                       );
-
-       *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
-
-       return 0;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint16_t    vv_id;
-       uint16_t    vddc = 0;
-       uint16_t    evv_default = 1150;
-       uint16_t    i, j;
-       uint32_t  sclk = 0;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)hwmgr->pptable;
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-       int result;
-
-       for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) {
-               vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-               if (!fiji_get_sclk_for_voltage_evv(hwmgr,
-                               table_info->vddc_lookup_table, vv_id, &sclk)) {
-                       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_ClockStretcher)) {
-                               for (j = 1; j < sclk_table->count; j++) {
-                                       if (sclk_table->entries[j].clk == sclk &&
-                                                       sclk_table->entries[j].cks_enable == 0) {
-                                               sclk += 5000;
-                                               break;
-                                       }
-                               }
-                       }
-
-                       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_EnableDriverEVV))
-                               result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr,
-                                               VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true);
-                       else
-                               result = -EINVAL;
-
-                       if (result)
-                               result = atomctrl_get_voltage_evv_on_sclk(hwmgr,
-                                               VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc);
-
-                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
-                       PP_ASSERT_WITH_CODE((vddc < 2000),
-                                       "Invalid VDDC value, greater than 2v!", result = -EINVAL;);
-
-                       if (result)
-                               /* 1.15V is the default safe value for Fiji */
-                               vddc = evv_default;
-
-                       /* the voltage should not be zero nor equal to leakage ID */
-                       if (vddc != 0 && vddc != vv_id) {
-                               data->vddc_leakage.actual_voltage
-                               [data->vddc_leakage.count] = vddc;
-                               data->vddc_leakage.leakage_id
-                               [data->vddc_leakage.count] = vv_id;
-                               data->vddc_leakage.count++;
-                       }
-               }
-       }
-       return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to changing voltage
- * @param     pointer to leakage table
- */
-static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
-               uint16_t *voltage, struct fiji_leakage_voltage *leakage_table)
-{
-       uint32_t index;
-
-       /* search for leakage voltage ID 0xff01 ~ 0xff08 */
-       for (index = 0; index < leakage_table->count; index++) {
-               /* if this voltage matches a leakage voltage ID */
-               /* patch with actual leakage voltage */
-               if (leakage_table->leakage_id[index] == *voltage) {
-                       *voltage = leakage_table->actual_voltage[index];
-                       break;
-               }
-       }
-
-       if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
-               printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param     hwmgr  the address of the powerplay hardware manager.
-* @param     pointer to voltage lookup table
-* @param     pointer to leakage table
-* @return     always 0
-*/
-static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_voltage_lookup_table *lookup_table,
-               struct fiji_leakage_voltage *leakage_table)
-{
-       uint32_t i;
-
-       for (i = 0; i < lookup_table->count; i++)
-               fiji_patch_with_vdd_leakage(hwmgr,
-                               &lookup_table->entries[i].us_vdd, leakage_table);
-
-       return 0;
-}
-
-static int fiji_patch_clock_voltage_limits_with_vddc_leakage(
-               struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table,
-               uint16_t *vddc)
-{
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
-       hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
-                       table_info->max_clock_voltage_on_dc.vddc;
-       return 0;
-}
-
-static int fiji_patch_voltage_dependency_tables_with_lookup_table(
-               struct pp_hwmgr *hwmgr)
-{
-       uint8_t entryId;
-       uint8_t voltageId;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-       struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
-                       table_info->vdd_dep_on_mclk;
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-
-       for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-               voltageId = sclk_table->entries[entryId].vddInd;
-               sclk_table->entries[entryId].vddc =
-                               table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-       }
-
-       for (entryId = 0; entryId < mclk_table->count; ++entryId) {
-               voltageId = mclk_table->entries[entryId].vddInd;
-               mclk_table->entries[entryId].vddc =
-                       table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-       }
-
-       for (entryId = 0; entryId < mm_table->count; ++entryId) {
-               voltageId = mm_table->entries[entryId].vddcInd;
-               mm_table->entries[entryId].vddc =
-                       table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-       }
-
-       return 0;
-
-}
-
-static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-       /* Need to determine if we need calculated voltage. */
-       return 0;
-}
-
-static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
-       /* Need to determine if we need calculated voltage from mm table. */
-       return 0;
-}
-
-static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr,
-               struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
-       uint32_t table_size, i, j;
-       struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
-       table_size = lookup_table->count;
-
-       PP_ASSERT_WITH_CODE(0 != lookup_table->count,
-               "Lookup table is empty", return -EINVAL);
-
-       /* Sorting voltages */
-       for (i = 0; i < table_size - 1; i++) {
-               for (j = i + 1; j > 0; j--) {
-                       if (lookup_table->entries[j].us_vdd <
-                                       lookup_table->entries[j - 1].us_vdd) {
-                               tmp_voltage_lookup_record = lookup_table->entries[j - 1];
-                               lookup_table->entries[j - 1] = lookup_table->entries[j];
-                               lookup_table->entries[j] = tmp_voltage_lookup_record;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-       int result = 0;
-       int tmp_result;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr,
-                       table_info->vddc_lookup_table, &(data->vddc_leakage));
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
-                       &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = fiji_calc_voltage_dependency_tables(hwmgr);
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr);
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
-       if(tmp_result)
-               result = tmp_result;
-
-       return result;
-}
-
-static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
-                       table_info->vdd_dep_on_sclk;
-       struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
-                       table_info->vdd_dep_on_mclk;
-
-       PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
-               "VDD dependency on SCLK table is missing.       \
-               This table is mandatory", return -EINVAL);
-       PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
-               "VDD dependency on SCLK table has to have is missing.   \
-               This table is mandatory", return -EINVAL);
-
-       PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
-               "VDD dependency on MCLK table is missing.       \
-               This table is mandatory", return -EINVAL);
-       PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
-               "VDD dependency on MCLK table has to have is missing.    \
-               This table is mandatory", return -EINVAL);
-
-       data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
-       data->max_vddc_in_pptable =     (uint16_t)allowed_sclk_vdd_table->
-                       entries[allowed_sclk_vdd_table->count - 1].vddc;
-
-       table_info->max_clock_voltage_on_ac.sclk =
-               allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
-       table_info->max_clock_voltage_on_ac.mclk =
-               allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
-       table_info->max_clock_voltage_on_ac.vddc =
-               allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-       table_info->max_clock_voltage_on_ac.vddci =
-               allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
-       hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
-               table_info->max_clock_voltage_on_ac.sclk;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
-               table_info->max_clock_voltage_on_ac.mclk;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
-               table_info->max_clock_voltage_on_ac.vddc;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
-               table_info->max_clock_voltage_on_ac.vddci;
-
-       return 0;
-}
-
-static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
-       uint32_t speedCntl = 0;
-
-       /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-       speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
-                       ixPCIE_LC_SPEED_CNTL);
-       return((uint16_t)PHM_GET_FIELD(speedCntl,
-                       PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
-       uint32_t link_width;
-
-       /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-       link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
-                       PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
-       PP_ASSERT_WITH_CODE((7 >= link_width),
-                       "Invalid PCIe lane width!", return 0);
-
-       return decode_pcie_lane_width(link_width);
-}
-
-/** Patch the Boot State to match VBIOS boot clocks and voltage.
-*
-* @param hwmgr Pointer to the hardware manager.
-* @param pPowerState The address of the PowerState instance being created.
-*
-*/
-static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
-               struct pp_hw_power_state *hw_ps)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps;
-       ATOM_FIRMWARE_INFO_V2_2 *fw_info;
-       uint16_t size;
-       uint8_t frev, crev;
-       int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
-       /* First retrieve the Boot clocks and VDDC from the firmware info table.
-        * We assume here that fw_info is unchanged if this call fails.
-        */
-       fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
-                       hwmgr->device, index,
-                       &size, &frev, &crev);
-       if (!fw_info)
-               /* During a test, there is no firmware info table. */
-               return 0;
-
-       /* Patch the state. */
-       data->vbios_boot_state.sclk_bootup_value =
-                       le32_to_cpu(fw_info->ulDefaultEngineClock);
-       data->vbios_boot_state.mclk_bootup_value =
-                       le32_to_cpu(fw_info->ulDefaultMemoryClock);
-       data->vbios_boot_state.mvdd_bootup_value =
-                       le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
-       data->vbios_boot_state.vddc_bootup_value =
-                       le16_to_cpu(fw_info->usBootUpVDDCVoltage);
-       data->vbios_boot_state.vddci_bootup_value =
-                       le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
-       data->vbios_boot_state.pcie_gen_bootup_value =
-                       fiji_get_current_pcie_speed(hwmgr);
-       data->vbios_boot_state.pcie_lane_bootup_value =
-                       (uint16_t)fiji_get_current_pcie_lane_number(hwmgr);
-
-       /* set boot power state */
-       ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
-       ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
-       ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
-       ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
-       return 0;
-}
-
-static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
-       return phm_hwmgr_backend_fini(hwmgr);
-}
-
-static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data;
-       uint32_t i;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       bool stay_in_boot;
-       int result;
-
-       data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = data;
-
-       data->dll_default_on = false;
-       data->sram_end = SMC_RAM_END;
-
-       for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
-               data->activity_target[i] = FIJI_AT_DFLT;
-
-       data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
-       data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT;
-       data->mclk_dpm0_activity_target = 0xa;
-
-       data->sclk_dpm_key_disabled = 0;
-       data->mclk_dpm_key_disabled = 0;
-       data->pcie_dpm_key_disabled = 0;
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_UnTabledHardwareInterface);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_TablelessHardwareInterface);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep);
-
-       data->gpio_debug = 0;
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_DynamicPatchPowerState);
-
-       /* need to set voltage control types before EVV patching */
-       data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE;
-       data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
-       data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
-
-       data->force_pcie_gen = PP_PCIEGenInvalid;
-
-       if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                       VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
-               data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EnableMVDDControl))
-               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                               VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
-                       data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
-
-       if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EnableMVDDControl);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ControlVDDCI)) {
-               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
-                       data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
-               else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
-                       data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
-       }
-
-       if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ControlVDDCI);
-
-       if (table_info && table_info->cac_dtp_table->usClockStretchAmount)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ClockStretcher);
-
-       fiji_init_dpm_defaults(hwmgr);
-
-       /* Get leakage voltage based on leakage ID. */
-       fiji_get_evv_voltages(hwmgr);
-
-       /* Patch our voltage dependency table with actual leakage voltage
-        * We need to perform leakage translation before it's used by other functions
-        */
-       fiji_complete_dependency_tables(hwmgr);
-
-       /* Parse pptable data read from VBIOS */
-       fiji_set_private_data_based_on_pptable(hwmgr);
-
-       /* ULV Support */
-       data->ulv.ulv_supported = true; /* ULV feature is enabled by default */
-
-       /* Initalize Dynamic State Adjustment Rule Settings */
-       result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
-       if (!result) {
-               data->uvd_enabled = false;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_EnableSMU7ThermalManagement);
-               data->vddc_phase_shed_control = false;
-       }
-
-       stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StayInBootState);
-
-       if (0 == result) {
-               struct cgs_system_info sys_info = {0};
-
-               data->is_tlu_enabled = false;
-               hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
-                               FIJI_MAX_HARDWARE_POWERLEVELS;
-               hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
-               hwmgr->platform_descriptor.minimumClocksReductionPercentage  = 50;
-
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
-               if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp &&
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucFanControlMode) {
-                       hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
-                                       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-                       hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
-                                       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
-                       hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
-                                       table_info->cac_dtp_table->usOperatingTempMinLimit;
-                       hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
-                                       table_info->cac_dtp_table->usOperatingTempMaxLimit;
-                       hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
-                                       table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
-                       hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
-                                       table_info->cac_dtp_table->usOperatingTempStep;
-                       hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
-                                       table_info->cac_dtp_table->usTargetOperatingTemp;
-
-                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_ODFuzzyFanControlSupport);
-               }
-
-               sys_info.size = sizeof(struct cgs_system_info);
-               sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
-               result = cgs_query_system_info(hwmgr->device, &sys_info);
-               if (result)
-                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
-               else
-                       data->pcie_gen_cap = (uint32_t)sys_info.value;
-               if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-                       data->pcie_spc_cap = 20;
-               sys_info.size = sizeof(struct cgs_system_info);
-               sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
-               result = cgs_query_system_info(hwmgr->device, &sys_info);
-               if (result)
-                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
-               else
-                       data->pcie_lane_cap = (uint32_t)sys_info.value;
-       } else {
-               /* Ignore return value in here, we are cleaning up a mess. */
-               fiji_hwmgr_backend_fini(hwmgr);
-       }
-
-       return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       data->clock_registers.vCG_SPLL_FUNC_CNTL =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_SPLL_FUNC_CNTL);
-       data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_SPLL_FUNC_CNTL_2);
-       data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_SPLL_FUNC_CNTL_3);
-       data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_SPLL_FUNC_CNTL_4);
-       data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_SPLL_SPREAD_SPECTRUM);
-       data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_SPLL_SPREAD_SPECTRUM_2);
-
-       return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int fiji_get_memory_type(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t temp;
-
-       temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
-       data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
-                       ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
-                        MC_SEQ_MISC0_GDDR5_SHIFT));
-
-       return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
-       return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       data->uvd_power_gated = false;
-       data->vce_power_gated = false;
-       data->samu_power_gated = false;
-       data->acp_power_gated = false;
-       data->pg_acp_init = true;
-
-       return 0;
-}
-
-static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       data->low_sclk_interrupt_threshold = 0;
-
-       return 0;
-}
-
-static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = fiji_read_clock_registers(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to read clock registers!", result = tmp_result);
-
-       tmp_result = fiji_get_memory_type(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to get memory type!", result = tmp_result);
-
-       tmp_result = fiji_enable_acpi_power_management(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable ACPI power management!", result = tmp_result);
-
-       tmp_result = fiji_init_power_gate_state(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to init power gate state!", result = tmp_result);
-
-       tmp_result = tonga_get_mc_microcode_version(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to get MC microcode version!", result = tmp_result);
-
-       tmp_result = fiji_init_sclk_threshold(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to init sclk threshold!", result = tmp_result);
-
-       return result;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-*/
-static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr)
-{
-       const struct fiji_hwmgr *data =
-                       (const struct fiji_hwmgr *)(hwmgr->backend);
-
-       return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
-       /* enable voltage control */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
-       return 0;
-}
-
-/**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    vol_table  the pointer to changing voltage table
-* @return    0 in success
-*/
-
-static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr,
-               struct pp_atomctrl_voltage_table *vol_table)
-{
-       uint32_t i, j;
-       uint16_t vvalue;
-       bool found = false;
-       struct pp_atomctrl_voltage_table *table;
-
-       PP_ASSERT_WITH_CODE((NULL != vol_table),
-                       "Voltage Table empty.", return -EINVAL);
-       table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
-                       GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       table->mask_low = vol_table->mask_low;
-       table->phase_delay = vol_table->phase_delay;
-
-       for (i = 0; i < vol_table->count; i++) {
-               vvalue = vol_table->entries[i].value;
-               found = false;
-
-               for (j = 0; j < table->count; j++) {
-                       if (vvalue == table->entries[j].value) {
-                               found = true;
-                               break;
-                       }
-               }
-
-               if (!found) {
-                       table->entries[table->count].value = vvalue;
-                       table->entries[table->count].smio_low =
-                                       vol_table->entries[i].smio_low;
-                       table->count++;
-               }
-       }
-
-       memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
-       kfree(table);
-
-       return 0;
-}
-
-static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
-       uint32_t i;
-       int result;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table);
-
-       PP_ASSERT_WITH_CODE((0 != dep_table->count),
-                       "Voltage Dependency Table empty.", return -EINVAL);
-
-       vol_table->mask_low = 0;
-       vol_table->phase_delay = 0;
-       vol_table->count = dep_table->count;
-
-       for (i = 0; i < dep_table->count; i++) {
-               vol_table->entries[i].value = dep_table->entries[i].mvdd;
-               vol_table->entries[i].smio_low = 0;
-       }
-
-       result = fiji_trim_voltage_table(hwmgr, vol_table);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to trim MVDD table.", return result);
-
-       return 0;
-}
-
-static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
-       uint32_t i;
-       int result;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table);
-
-       PP_ASSERT_WITH_CODE((0 != dep_table->count),
-                       "Voltage Dependency Table empty.", return -EINVAL);
-
-       vol_table->mask_low = 0;
-       vol_table->phase_delay = 0;
-       vol_table->count = dep_table->count;
-
-       for (i = 0; i < dep_table->count; i++) {
-               vol_table->entries[i].value = dep_table->entries[i].vddci;
-               vol_table->entries[i].smio_low = 0;
-       }
-
-       result = fiji_trim_voltage_table(hwmgr, vol_table);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to trim VDDCI table.", return result);
-
-       return 0;
-}
-
-static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
-       int i = 0;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table);
-
-       PP_ASSERT_WITH_CODE((0 != lookup_table->count),
-                       "Voltage Lookup Table empty.", return -EINVAL);
-
-       vol_table->mask_low = 0;
-       vol_table->phase_delay = 0;
-
-       vol_table->count = lookup_table->count;
-
-       for (i = 0; i < vol_table->count; i++) {
-               vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
-               vol_table->entries[i].smio_low = 0;
-       }
-
-       return 0;
-}
-
-/* ---- Voltage Tables ----
- * If the voltage table would be bigger than
- * what will fit into the state table on
- * the SMC keep only the higher entries.
- */
-static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr,
-               uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table)
-{
-       unsigned int i, diff;
-
-       if (vol_table->count <= max_vol_steps)
-               return;
-
-       diff = vol_table->count - max_vol_steps;
-
-       for (i = 0; i < max_vol_steps; i++)
-               vol_table->entries[i] = vol_table->entries[i + diff];
-
-       vol_table->count = max_vol_steps;
-
-       return;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)hwmgr->pptable;
-       int result;
-
-       if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               result = atomctrl_get_voltage_table_v3(hwmgr,
-                               VOLTAGE_TYPE_MVDDC,     VOLTAGE_OBJ_GPIO_LUT,
-                               &(data->mvdd_voltage_table));
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve MVDD table.",
-                               return result);
-       } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-               result = fiji_get_svi2_mvdd_voltage_table(hwmgr,
-                               table_info->vdd_dep_on_mclk);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve SVI2 MVDD table from dependancy table.",
-                               return result;);
-       }
-
-       if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               result = atomctrl_get_voltage_table_v3(hwmgr,
-                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
-                               &(data->vddci_voltage_table));
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve VDDCI table.",
-                               return result);
-       } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-               result = fiji_get_svi2_vddci_voltage_table(hwmgr,
-                               table_info->vdd_dep_on_mclk);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve SVI2 VDDCI table from dependancy table.",
-                               return result);
-       }
-
-       if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               result = fiji_get_svi2_vdd_voltage_table(hwmgr,
-                               table_info->vddc_lookup_table);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve SVI2 VDDC table from lookup table.",
-                               return result);
-       }
-
-       PP_ASSERT_WITH_CODE(
-                       (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)),
-                       "Too many voltage values for VDDC. Trimming to fit state table.",
-                       fiji_trim_voltage_table_to_fit_state_table(hwmgr,
-                                       SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)));
-
-       PP_ASSERT_WITH_CODE(
-                       (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)),
-                       "Too many voltage values for VDDCI. Trimming to fit state table.",
-                       fiji_trim_voltage_table_to_fit_state_table(hwmgr,
-                                       SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)));
-
-       PP_ASSERT_WITH_CODE(
-                       (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)),
-                       "Too many voltage values for MVDD. Trimming to fit state table.",
-                       fiji_trim_voltage_table_to_fit_state_table(hwmgr,
-                                       SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)));
-
-       return 0;
-}
-
-static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       /* Program additional LP registers
-        * that are no longer programmed by VBIOS
-        */
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
-       return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_program_static_screen_threshold_parameters(
-               struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       /* Set static screen threshold unit */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
-                       data->static_screen_threshold_unit);
-       /* Set static screen threshold */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
-                       data->static_screen_threshold);
-
-       return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
-       uint32_t displayGap =
-                       cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixCG_DISPLAY_GAP_CNTL);
-
-       displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
-                       DISP_GAP, DISPLAY_GAP_IGNORE);
-
-       displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
-                       DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_DISPLAY_GAP_CNTL, displayGap);
-
-       return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       /* Clear reset for voting clients before enabling DPM */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
-       return 0;
-}
-
-static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
-       /* Reset voting clients before disabling DPM */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_0, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_1, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_2, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_3, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_4, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_5, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_6, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_7, 0);
-
-       return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-       uint32_t tmp;
-       int result;
-       bool error = false;
-
-       result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, DpmTable),
-                       &tmp, data->sram_end);
-
-       if (0 == result)
-               data->dpm_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, SoftRegisters),
-                       &tmp, data->sram_end);
-
-       if (!result) {
-               data->soft_regs_start = tmp;
-               smu_data->soft_regs_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, mcRegisterTable),
-                       &tmp, data->sram_end);
-
-       if (!result)
-               data->mc_reg_table_start = tmp;
-
-       result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, FanTable),
-                       &tmp, data->sram_end);
-
-       if (!result)
-               data->fan_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
-                       &tmp, data->sram_end);
-
-       if (!result)
-               data->arb_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, Version),
-                       &tmp, data->sram_end);
-
-       if (!result)
-               hwmgr->microcode_version_info.SMC = tmp;
-
-       error |= (0 != result);
-
-       return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
-               uint32_t arb_src, uint32_t arb_dest)
-{
-       uint32_t mc_arb_dram_timing;
-       uint32_t mc_arb_dram_timing2;
-       uint32_t burst_time;
-       uint32_t mc_cg_config;
-
-       switch (arb_src) {
-       case MC_CG_ARB_FREQ_F0:
-               mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-               mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-               burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-               break;
-       case MC_CG_ARB_FREQ_F1:
-               mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
-               mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
-               burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       switch (arb_dest) {
-       case MC_CG_ARB_FREQ_F0:
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
-               PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
-               break;
-       case MC_CG_ARB_FREQ_F1:
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
-               PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
-       mc_cg_config |= 0x0000000F;
-       cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
-       PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
-       return 0;
-}
-
-/**
-* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   if success then 0;
-*/
-static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
-       return fiji_copy_and_switch_arb_sets(hwmgr,
-                       MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
-       uint32_t tmp;
-
-       tmp = (cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
-                       0x0000ff00) >> 8;
-
-       if (tmp == MC_CG_ARB_FREQ_F0)
-               return 0;
-
-       return fiji_copy_and_switch_arb_sets(hwmgr,
-                       tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
-               struct fiji_single_dpm_table *dpm_table, uint32_t count)
-{
-       int i;
-       PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER,
-                       "Fatal error, can not set up single DPM table entries "
-                       "to exceed max number!",);
-
-       dpm_table->count = count;
-       for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
-               dpm_table->dpm_levels[i].enabled = false;
-
-       return 0;
-}
-
-static void fiji_setup_pcie_table_entry(
-       struct fiji_single_dpm_table *dpm_table,
-       uint32_t index, uint32_t pcie_gen,
-       uint32_t pcie_lanes)
-{
-       dpm_table->dpm_levels[index].value = pcie_gen;
-       dpm_table->dpm_levels[index].param1 = pcie_lanes;
-       dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-       uint32_t i, max_entry;
-
-       PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
-                       data->use_pcie_power_saving_levels), "No pcie performance levels!",
-                       return -EINVAL);
-
-       if (data->use_pcie_performance_levels &&
-                       !data->use_pcie_power_saving_levels) {
-               data->pcie_gen_power_saving = data->pcie_gen_performance;
-               data->pcie_lane_power_saving = data->pcie_lane_performance;
-       } else if (!data->use_pcie_performance_levels &&
-                       data->use_pcie_power_saving_levels) {
-               data->pcie_gen_performance = data->pcie_gen_power_saving;
-               data->pcie_lane_performance = data->pcie_lane_power_saving;
-       }
-
-       fiji_reset_single_dpm_table(hwmgr,
-                       &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK);
-
-       if (pcie_table != NULL) {
-               /* max_entry is used to make sure we reserve one PCIE level
-                * for boot level (fix for A+A PSPP issue).
-                * If PCIE table from PPTable have ULV entry + 8 entries,
-                * then ignore the last entry.*/
-               max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ?
-                               SMU73_MAX_LEVELS_LINK : pcie_table->count;
-               for (i = 1; i < max_entry; i++) {
-                       fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
-                                       get_pcie_gen_support(data->pcie_gen_cap,
-                                                       pcie_table->entries[i].gen_speed),
-                                       get_pcie_lane_support(data->pcie_lane_cap,
-                                                       pcie_table->entries[i].lane_width));
-               }
-               data->dpm_table.pcie_speed_table.count = max_entry - 1;
-       } else {
-               /* Hardcode Pcie Table */
-               fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Min_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Min_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Max_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Max_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Max_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Max_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-
-               data->dpm_table.pcie_speed_table.count = 6;
-       }
-       /* Populate last level for boot PCIE level, but do not increment count. */
-       fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
-                       data->dpm_table.pcie_speed_table.count,
-                       get_pcie_gen_support(data->pcie_gen_cap,
-                                       PP_Min_PCIEGen),
-                       get_pcie_lane_support(data->pcie_lane_cap,
-                                       PP_Max_PCIELane));
-
-       return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i;
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
-                       table_info->vdd_dep_on_sclk;
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-                       table_info->vdd_dep_on_mclk;
-
-       PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
-                       "SCLK dependency table is missing. This table is mandatory",
-                       return -EINVAL);
-       PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
-                       "SCLK dependency table has to have is missing. "
-                       "This table is mandatory",
-                       return -EINVAL);
-
-       PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
-                       "MCLK dependency table is missing. This table is mandatory",
-                       return -EINVAL);
-       PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
-                       "MCLK dependency table has to have is missing. "
-                       "This table is mandatory",
-                       return -EINVAL);
-
-       /* clear the state table to reset everything to default */
-       fiji_reset_single_dpm_table(hwmgr,
-                       &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS);
-       fiji_reset_single_dpm_table(hwmgr,
-                       &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY);
-
-       /* Initialize Sclk DPM table based on allow Sclk values */
-       data->dpm_table.sclk_table.count = 0;
-       for (i = 0; i < dep_sclk_table->count; i++) {
-               if (i == 0 || data->dpm_table.sclk_table.dpm_levels
-                               [data->dpm_table.sclk_table.count - 1].value !=
-                                               dep_sclk_table->entries[i].clk) {
-                       data->dpm_table.sclk_table.dpm_levels
-                       [data->dpm_table.sclk_table.count].value =
-                                       dep_sclk_table->entries[i].clk;
-                       data->dpm_table.sclk_table.dpm_levels
-                       [data->dpm_table.sclk_table.count].enabled =
-                                       (i == 0) ? true : false;
-                       data->dpm_table.sclk_table.count++;
-               }
-       }
-
-       /* Initialize Mclk DPM table based on allow Mclk values */
-       data->dpm_table.mclk_table.count = 0;
-       for (i=0; i<dep_mclk_table->count; i++) {
-               if ( i==0 || data->dpm_table.mclk_table.dpm_levels
-                               [data->dpm_table.mclk_table.count - 1].value !=
-                                               dep_mclk_table->entries[i].clk) {
-                       data->dpm_table.mclk_table.dpm_levels
-                       [data->dpm_table.mclk_table.count].value =
-                                       dep_mclk_table->entries[i].clk;
-                       data->dpm_table.mclk_table.dpm_levels
-                       [data->dpm_table.mclk_table.count].enabled =
-                                       (i == 0) ? true : false;
-                       data->dpm_table.mclk_table.count++;
-               }
-       }
-
-       /* setup PCIE gen speed levels */
-       fiji_setup_default_pcie_table(hwmgr);
-
-       /* save a copy of the default DPM table */
-       memcpy(&(data->golden_dpm_table), &(data->dpm_table),
-                       sizeof(struct fiji_dpm_table));
-
-       return 0;
-}
-
-/**
- * @brief PhwFiji_GetVoltageOrder
- *  Returns index of requested voltage record in lookup(table)
- * @param lookup_table - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t fiji_get_voltage_index(
-               struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
-{
-       uint8_t count = (uint8_t) (lookup_table->count);
-       uint8_t i;
-
-       PP_ASSERT_WITH_CODE((NULL != lookup_table),
-                       "Lookup Table empty.", return 0);
-       PP_ASSERT_WITH_CODE((0 != count),
-                       "Lookup Table empty.", return 0);
-
-       for (i = 0; i < lookup_table->count; i++) {
-               /* find first voltage equal or bigger than requested */
-               if (lookup_table->entries[i].us_vdd >= voltage)
-                       return i;
-       }
-       /* voltage is bigger than max voltage in the table */
-       return i - 1;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    table  the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       uint32_t count;
-       uint8_t index;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
-                       table_info->vddc_lookup_table;
-       /* tables is already swapped, so in order to use the value from it,
-        * we need to swap it back.
-        * We are populating vddc CAC data to BapmVddc table
-        * in split and merged mode
-        */
-       for( count = 0; count<lookup_table->count; count++) {
-               index = fiji_get_voltage_index(lookup_table,
-                               data->vddc_voltage_table.entries[count].value);
-               table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 -
-                               (lookup_table->entries[index].us_cac_low *
-                                               VOLTAGE_SCALE)) / 25);
-               table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 -
-                               (lookup_table->entries[index].us_cac_high *
-                                               VOLTAGE_SCALE)) / 25);
-       }
-
-       return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always  0
-*/
-
-int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       int result;
-
-       result = fiji_populate_cac_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate CAC voltage tables to SMC",
-                       return -EINVAL);
-
-       return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_Ulv *state)
-{
-       int result = 0;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       state->CcPwrDynRm = 0;
-       state->CcPwrDynRm1 = 0;
-
-       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
-       state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset *
-                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 );
-
-       state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
-       if (!result) {
-               CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-               CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-               CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-       }
-       return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int32_t fiji_get_dpm_level_enable_mask_value(
-               struct fiji_single_dpm_table* dpm_table)
-{
-       int32_t i;
-       int32_t mask = 0;
-
-       for (i = dpm_table->count; i > 0; i--) {
-               mask = mask << 1;
-               if (dpm_table->dpm_levels[i - 1].enabled)
-                       mask |= 0x1;
-               else
-                       mask &= 0xFFFFFFFE;
-       }
-       return mask;
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_dpm_table *dpm_table = &data->dpm_table;
-       int i;
-
-       /* Index (dpm_table->pcie_speed_table.count)
-        * is reserved for PCIE boot level. */
-       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-               table->LinkLevel[i].PcieGenSpeed  =
-                               (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-               table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
-                               dpm_table->pcie_speed_table.dpm_levels[i].param1);
-               table->LinkLevel[i].EnabledForActivity = 1;
-               table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
-               table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
-               table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
-       }
-
-       data->smc_state_table.LinkLevelCount =
-                       (uint8_t)dpm_table->pcie_speed_table.count;
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-                       fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-       return 0;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    clock  the engine clock to use to populate the structure
-* @param    sclk   the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-               uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
-       const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t ref_clock;
-       uint32_t ref_divider;
-       uint32_t fbdiv;
-       int result;
-
-       /* get the engine clock dividers for this clock value */
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error retrieving Engine Clock dividers from VBIOS.",
-                       return result);
-
-       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
-       ref_clock = atomctrl_get_reference_clock(hwmgr);
-       ref_divider = 1 + dividers.uc_pll_ref_div;
-
-       /* low 14 bits is fraction and high 12 bits is divider */
-       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
-       /* SPLL_FUNC_CNTL setup */
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_REF_DIV, dividers.uc_pll_ref_div);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_PDIV_A,  dividers.uc_pll_post_div);
-
-       /* SPLL_FUNC_CNTL_3 setup*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
-                       SPLL_FB_DIV, fbdiv);
-
-       /* set to use fractional accumulation*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
-                       SPLL_DITHEN, 1);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
-               struct pp_atomctrl_internal_ss_info ssInfo;
-
-               uint32_t vco_freq = clock * dividers.uc_pll_post_div;
-               if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
-                               vco_freq, &ssInfo)) {
-                       /*
-                        * ss_info.speed_spectrum_percentage -- in unit of 0.01%
-                        * ss_info.speed_spectrum_rate -- in unit of khz
-                        *
-                        * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
-                        */
-                       uint32_t clk_s = ref_clock * 5 /
-                                       (ref_divider * ssInfo.speed_spectrum_rate);
-                       /* clkv = 2 * D * fbdiv / NS */
-                       uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
-                                       fbdiv / (clk_s * 10000);
-
-                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
-                                       CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
-                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
-                                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
-                       cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
-                                       CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
-               }
-       }
-
-       sclk->SclkFrequency        = clock;
-       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
-       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
-       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
-       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
-       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
-
-       return 0;
-}
-
-static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
-{
-       uint32_t  i;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_voltage_table *vddci_table =
-                       &(data->vddci_voltage_table);
-
-       for (i = 0; i < vddci_table->count; i++) {
-               if (vddci_table->entries[i].value >= vddci)
-                       return vddci_table->entries[i].value;
-       }
-
-       PP_ASSERT_WITH_CODE(false,
-                       "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-                       return vddci_table->entries[i-1].value);
-}
-
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-               struct phm_ppt_v1_clock_voltage_dependency_table* dep_table,
-               uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
-       uint32_t i;
-       uint16_t vddci;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       *voltage = *mvdd = 0;
-
-       /* clock - voltage dependency table is empty table */
-       if (dep_table->count == 0)
-               return -EINVAL;
-
-       for (i = 0; i < dep_table->count; i++) {
-               /* find first sclk bigger than request */
-               if (dep_table->entries[i].clk >= clock) {
-                       *voltage |= (dep_table->entries[i].vddc *
-                                       VOLTAGE_SCALE) << VDDC_SHIFT;
-                       if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
-                               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       else if (dep_table->entries[i].vddci)
-                               *voltage |= (dep_table->entries[i].vddci *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       else {
-                               vddci = fiji_find_closest_vddci(hwmgr,
-                                               (dep_table->entries[i].vddc -
-                                                               (uint16_t)data->vddc_vddci_delta));
-                               *voltage |= (vddci * VOLTAGE_SCALE) <<  VDDCI_SHIFT;
-                       }
-
-                       if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-                               *mvdd = data->vbios_boot_state.mvdd_bootup_value *
-                                       VOLTAGE_SCALE;
-                       else if (dep_table->entries[i].mvdd)
-                               *mvdd = (uint32_t) dep_table->entries[i].mvdd *
-                                       VOLTAGE_SCALE;
-
-                       *voltage |= 1 << PHASES_SHIFT;
-                       return 0;
-               }
-       }
-
-       /* sclk is bigger than max sclk in the dependence table */
-       *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-       if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
-               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
-                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-       else if (dep_table->entries[i-1].vddci) {
-               vddci = fiji_find_closest_vddci(hwmgr,
-                               (dep_table->entries[i].vddc -
-                                               (uint16_t)data->vddc_vddci_delta));
-               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-       }
-
-       if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-               *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
-       else if (dep_table->entries[i].mvdd)
-               *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
-       return 0;
-}
-
-static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
-               uint32_t clock_insr)
-{
-       uint8_t i;
-       uint32_t temp;
-       uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
-
-       PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
-       for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
-               temp = clock >> i;
-
-               if (temp >= min || i == 0)
-                       break;
-       }
-       return i;
-}
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    clock the engine clock to use to populate the structure
-* @param    sclk        the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, uint16_t sclk_al_threshold,
-               struct SMU73_Discrete_GraphicsLevel *level)
-{
-       int result;
-       /* PP_Clocks minClocks; */
-       uint32_t threshold, mvdd;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
-       /* populate graphics levels */
-       result = fiji_get_dependency_volt_by_clk(hwmgr,
-                       table_info->vdd_dep_on_sclk, clock,
-                       &level->MinVoltage, &mvdd);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find VDDC voltage value for "
-                       "VDDC engine clock dependency table",
-                       return result);
-
-       level->SclkFrequency = clock;
-       level->ActivityLevel = sclk_al_threshold;
-       level->CcPwrDynRm = 0;
-       level->CcPwrDynRm1 = 0;
-       level->EnabledForActivity = 0;
-       level->EnabledForThrottle = 1;
-       level->UpHyst = 10;
-       level->DownHyst = 0;
-       level->VoltageDownHyst = 0;
-       level->PowerThrottle = 0;
-
-       threshold = clock * data->fast_watermark_threshold / 100;
-
-
-       data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
-               level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
-                                                               hwmgr->display_config.min_core_set_clock_in_sr);
-
-
-       /* Default to slow, highest DPM level will be
-        * set to PPSMC_DISPLAY_WATERMARK_LOW later.
-        */
-       level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
-       return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_dpm_table *dpm_table = &data->dpm_table;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-       uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
-       int result = 0;
-       uint32_t array = data->dpm_table_start +
-                       offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
-                       SMU73_MAX_LEVELS_GRAPHICS;
-       struct SMU73_Discrete_GraphicsLevel *levels =
-                       data->smc_state_table.GraphicsLevel;
-       uint32_t i, max_entry;
-       uint8_t hightest_pcie_level_enabled = 0,
-                       lowest_pcie_level_enabled = 0,
-                       mid_pcie_level_enabled = 0,
-                       count = 0;
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-               result = fiji_populate_single_graphic_level(hwmgr,
-                               dpm_table->sclk_table.dpm_levels[i].value,
-                               (uint16_t)data->activity_target[i],
-                               &levels[i]);
-               if (result)
-                       return result;
-
-               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-               if (i > 1)
-                       levels[i].DeepSleepDivId = 0;
-       }
-
-       /* Only enable level 0 for now.*/
-       levels[0].EnabledForActivity = 1;
-
-       /* set highest level watermark to high */
-       levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
-                       PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       data->smc_state_table.GraphicsDpmLevelCount =
-                       (uint8_t)dpm_table->sclk_table.count;
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-                       fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-       if (pcie_table != NULL) {
-               PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
-                               "There must be 1 or more PCIE levels defined in PPTable.",
-                               return -EINVAL);
-               max_entry = pcie_entry_cnt - 1;
-               for (i = 0; i < dpm_table->sclk_table.count; i++)
-                       levels[i].pcieDpmLevel =
-                                       (uint8_t) ((i < max_entry)? i : max_entry);
-       } else {
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << (hightest_pcie_level_enabled + 1))) != 0 ))
-                       hightest_pcie_level_enabled++;
-
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << lowest_pcie_level_enabled)) == 0 ))
-                       lowest_pcie_level_enabled++;
-
-               while ((count < hightest_pcie_level_enabled) &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 ))
-                       count++;
-
-               mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) <
-                               hightest_pcie_level_enabled?
-                                               (lowest_pcie_level_enabled + 1 + count) :
-                                               hightest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to hightest_pcie_level_enabled */
-               for(i = 2; i < dpm_table->sclk_table.count; i++)
-                       levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to lowest_pcie_level_enabled */
-               levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to mid_pcie_level_enabled */
-               levels[1].pcieDpmLevel = mid_pcie_level_enabled;
-       }
-       /* level count will send to smc once at init smc table and never change */
-       result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
-                       (uint32_t)array_size, data->sram_end);
-
-       return result;
-}
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP  Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz,       450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz,       500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz,       550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz,       600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz,       650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz,       700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz,       750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz,       800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
-       if (mem_clock <= 10000) return 0x0;
-       if (mem_clock <= 15000) return 0x1;
-       if (mem_clock <= 20000) return 0x2;
-       if (mem_clock <= 25000) return 0x3;
-       if (mem_clock <= 30000) return 0x4;
-       if (mem_clock <= 35000) return 0x5;
-       if (mem_clock <= 40000) return 0x6;
-       if (mem_clock <= 45000) return 0x7;
-       if (mem_clock <= 50000) return 0x8;
-       if (mem_clock <= 55000) return 0x9;
-       if (mem_clock <= 60000) return 0xa;
-       if (mem_clock <= 65000) return 0xb;
-       if (mem_clock <= 70000) return 0xc;
-       if (mem_clock <= 75000) return 0xd;
-       if (mem_clock <= 80000) return 0xe;
-       /* mem_clock > 800MHz */
-       return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    clock   the memory clock to use to populate the structure
-* @param    sclk    the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
-    uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
-       struct pp_atomctrl_memory_clock_param mem_param;
-       int result;
-
-       result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to get Memory PLL Dividers.",);
-
-       /* Save the result data to outpupt memory level structure */
-       mclk->MclkFrequency   = clock;
-       mclk->MclkDivider     = (uint8_t)mem_param.mpll_post_divider;
-       mclk->FreqRange       = fiji_get_mclk_frequency_ratio(clock);
-
-       return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int result = 0;
-
-       if (table_info->vdd_dep_on_mclk) {
-               result = fiji_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_mclk, clock,
-                               &mem_level->MinVoltage, &mem_level->MinMvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find MinVddc voltage value from memory "
-                               "VDDC voltage dependency table", return result);
-       }
-
-       mem_level->EnabledForThrottle = 1;
-       mem_level->EnabledForActivity = 0;
-       mem_level->UpHyst = 0;
-       mem_level->DownHyst = 100;
-       mem_level->VoltageDownHyst = 0;
-       mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-       mem_level->StutterEnable = false;
-
-       mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       /* enable stutter mode if all the follow condition applied
-        * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
-        * &(data->DisplayTiming.numExistingDisplays));
-        */
-       data->display_timing.num_existing_displays = 1;
-
-       if ((data->mclk_stutter_mode_threshold) &&
-               (clock <= data->mclk_stutter_mode_threshold) &&
-               (!data->is_uvd_enabled) &&
-               (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
-                               STUTTER_ENABLE) & 0x1))
-               mem_level->StutterEnable = true;
-
-       result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
-       if (!result) {
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
-       }
-       return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_dpm_table *dpm_table = &data->dpm_table;
-       int result;
-       /* populate MCLK dpm table to SMU7 */
-       uint32_t array = data->dpm_table_start +
-                       offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
-       uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
-                       SMU73_MAX_LEVELS_MEMORY;
-       struct SMU73_Discrete_MemoryLevel *levels =
-                       data->smc_state_table.MemoryLevel;
-       uint32_t i;
-
-       for (i = 0; i < dpm_table->mclk_table.count; i++) {
-               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-                               "can not populate memory level as memory clock is zero",
-                               return -EINVAL);
-               result = fiji_populate_single_memory_level(hwmgr,
-                               dpm_table->mclk_table.dpm_levels[i].value,
-                               &levels[i]);
-               if (result)
-                       return result;
-       }
-
-       /* Only enable level 0 for now. */
-       levels[0].EnabledForActivity = 1;
-
-       /* in order to prevent MC activity from stutter mode to push DPM up.
-        * the UVD change complements this by putting the MCLK in
-        * a higher state by default such that we are not effected by
-        * up threshold or and MCLK DPM latency.
-        */
-       levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
-       CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
-       data->smc_state_table.MemoryDpmLevelCount =
-                       (uint8_t)dpm_table->mclk_table.count;
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-                       fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-       /* set highest level watermark to high */
-       levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
-                       PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       /* level count will send to smc once at init smc table and never change */
-       result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
-                       (uint32_t)array_size, data->sram_end);
-
-       return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
-* @param    voltage     the SMC VOLTAGE structure to be populated
-*/
-int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
-               uint32_t mclk, SMIO_Pattern *smio_pat)
-{
-       const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i = 0;
-
-       if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-               /* find mvdd value which clock is more than request */
-               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
-                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
-                               smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
-                               break;
-                       }
-               }
-               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
-                               "MVDD Voltage is outside the supported range.",
-                               return -EINVAL);
-       } else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = 0;
-       const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       SMIO_Pattern vol_level;
-       uint32_t mvdd;
-       uint16_t us_mvdd;
-       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
-       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-       if (!data->sclk_dpm_key_disabled) {
-               /* Get MinVoltage and Frequency from DPM0,
-                * already converted to SMC_UL */
-               table->ACPILevel.SclkFrequency =
-                               data->dpm_table.sclk_table.dpm_levels[0].value;
-               result = fiji_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_sclk,
-                               table->ACPILevel.SclkFrequency,
-                               &table->ACPILevel.MinVoltage, &mvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Cannot find ACPI VDDC voltage value "
-                               "in Clock Dependency Table",);
-       } else {
-               table->ACPILevel.SclkFrequency =
-                               data->vbios_boot_state.sclk_bootup_value;
-               table->ACPILevel.MinVoltage =
-                               data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
-       }
-
-       /* get the engine clock dividers for this clock value */
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
-                       table->ACPILevel.SclkFrequency,  &dividers);
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error retrieving Engine Clock dividers from VBIOS.",
-                       return result);
-
-       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
-       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-       table->ACPILevel.DeepSleepDivId = 0;
-
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_PWRON, 0);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_RESET, 1);
-       spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
-                       SCLK_MUX_SEL, 4);
-
-       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
-       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
-       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       table->ACPILevel.CcPwrDynRm = 0;
-       table->ACPILevel.CcPwrDynRm1 = 0;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-       if (!data->mclk_dpm_key_disabled) {
-               /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
-               table->MemoryACPILevel.MclkFrequency =
-                               data->dpm_table.mclk_table.dpm_levels[0].value;
-               result = fiji_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_mclk,
-                               table->MemoryACPILevel.MclkFrequency,
-                               &table->MemoryACPILevel.MinVoltage, &mvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Cannot find ACPI VDDCI voltage value "
-                               "in Clock Dependency Table",);
-       } else {
-               table->MemoryACPILevel.MclkFrequency =
-                               data->vbios_boot_state.mclk_bootup_value;
-               table->MemoryACPILevel.MinVoltage =
-                               data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
-       }
-
-       us_mvdd = 0;
-       if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
-                       (data->mclk_dpm_key_disabled))
-               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
-       else {
-               if (!fiji_populate_mvdd_value(hwmgr,
-                               data->dpm_table.mclk_table.dpm_levels[0].value,
-                               &vol_level))
-                       us_mvdd = vol_level.Voltage;
-       }
-
-       table->MemoryACPILevel.MinMvdd =
-                       PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
-       table->MemoryACPILevel.EnabledForThrottle = 0;
-       table->MemoryACPILevel.EnabledForActivity = 0;
-       table->MemoryACPILevel.UpHyst = 0;
-       table->MemoryACPILevel.DownHyst = 100;
-       table->MemoryACPILevel.VoltageDownHyst = 0;
-       table->MemoryACPILevel.ActivityLevel =
-                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-       table->MemoryACPILevel.StutterEnable = false;
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
-       return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       table->VceLevelCount = (uint8_t)(mm_table->count);
-       table->VceBootLevel = 0;
-
-       for(count = 0; count < table->VceLevelCount; count++) {
-               table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
-               table->VceLevel[count].MinVoltage = 0;
-               table->VceLevel[count].MinVoltage |=
-                               (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->VceLevel[count].MinVoltage |=
-                               ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /*retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->VceLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for VCE engine clock",
-                               return result);
-
-               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       table->AcpLevelCount = (uint8_t)(mm_table->count);
-       table->AcpBootLevel = 0;
-
-       for (count = 0; count < table->AcpLevelCount; count++) {
-               table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
-               table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->AcpLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for engine clock", return result);
-
-               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
-               int32_t eng_clock, int32_t mem_clock,
-               struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
-       uint32_t dram_timing;
-       uint32_t dram_timing2;
-       uint32_t burstTime;
-       ULONG state, trrds, trrdl;
-       int result;
-
-       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-                       eng_clock, mem_clock);
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error calling VBIOS to set DRAM_TIMING.", return result);
-
-       dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-       dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-       burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
-       state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
-       trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
-       trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
-       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
-       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
-       arb_regs->McArbBurstTime   = (uint8_t)burstTime;
-       arb_regs->TRRDS            = (uint8_t)trrds;
-       arb_regs->TRRDL            = (uint8_t)trrdl;
-
-       return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
-       uint32_t i, j;
-       int result = 0;
-
-       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-                       result = fiji_populate_memory_timing_parameters(hwmgr,
-                                       data->dpm_table.sclk_table.dpm_levels[i].value,
-                                       data->dpm_table.mclk_table.dpm_levels[j].value,
-                                       &arb_regs.entries[i][j]);
-                       if (result)
-                               break;
-               }
-       }
-
-       if (!result)
-               result = fiji_copy_bytes_to_smc(
-                               hwmgr->smumgr,
-                               data->arb_table_start,
-                               (uint8_t *)&arb_regs,
-                               sizeof(SMU73_Discrete_MCArbDramTimingTable),
-                               data->sram_end);
-       return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       table->UvdLevelCount = (uint8_t)(mm_table->count);
-       table->UvdBootLevel = 0;
-
-       for (count = 0; count < table->UvdLevelCount; count++) {
-               table->UvdLevel[count].MinVoltage = 0;
-               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-               table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].VclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Vclk clock", return result);
-
-               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].DclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Dclk clock", return result);
-
-               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
-       }
-       return result;
-}
-
-static int fiji_find_boot_level(struct fiji_single_dpm_table *table,
-               uint32_t value, uint32_t *boot_level)
-{
-       int result = -EINVAL;
-       uint32_t i;
-
-       for (i = 0; i < table->count; i++) {
-               if (value == table->dpm_levels[i].value) {
-                       *boot_level = i;
-                       result = 0;
-               }
-       }
-       return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       int result = 0;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       /* find boot level from dpm table */
-       result = fiji_find_boot_level(&(data->dpm_table.sclk_table),
-                       data->vbios_boot_state.sclk_bootup_value,
-                       (uint32_t *)&(table->GraphicsBootLevel));
-
-       result = fiji_find_boot_level(&(data->dpm_table.mclk_table),
-                       data->vbios_boot_state.mclk_bootup_value,
-                       (uint32_t *)&(table->MemoryBootLevel));
-
-       table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
-                       VOLTAGE_SCALE;
-       table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
-                       VOLTAGE_SCALE;
-       table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
-                       VOLTAGE_SCALE;
-
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-       return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint8_t count, level;
-
-       count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-       for (level = 0; level < count; level++) {
-               if(table_info->vdd_dep_on_sclk->entries[level].clk >=
-                               data->vbios_boot_state.sclk_bootup_value) {
-                       data->smc_state_table.GraphicsBootLevel = level;
-                       break;
-               }
-       }
-
-       count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
-       for (level = 0; level < count; level++) {
-               if(table_info->vdd_dep_on_mclk->entries[level].clk >=
-                               data->vbios_boot_state.mclk_bootup_value) {
-                       data->smc_state_table.MemoryBootLevel = level;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
-                       volt_with_cks, value;
-       uint16_t clock_freq_u16;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
-                       volt_offset = 0;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-
-       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
-       /* Read SMU_Eefuse to read and calculate RO and determine
-        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
-        */
-       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (146 * 4));
-       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (148 * 4));
-       efuse &= 0xFF000000;
-       efuse = efuse >> 24;
-       efuse2 &= 0xF;
-
-       if (efuse2 == 1)
-               ro = (2300 - 1350) * efuse / 255 + 1350;
-       else
-               ro = (2500 - 1000) * efuse / 255 + 1000;
-
-       if (ro >= 1660)
-               type = 0;
-       else
-               type = 1;
-
-       /* Populate Stretch amount */
-       data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
-       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
-       for (i = 0; i < sclk_table->count; i++) {
-               data->smc_state_table.Sclk_CKS_masterEn0_7 |=
-                               sclk_table->entries[i].cks_enable << i;
-               volt_without_cks = (uint32_t)((14041 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
-                       (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
-               volt_with_cks = (uint32_t)((13946 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
-                       (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
-               if (volt_without_cks >= volt_with_cks)
-                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
-               data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
-       }
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       STRETCH_ENABLE, 0x0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       staticEnable, 0x1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x0);
-
-       /* Populate CKS Lookup Table */
-       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-               stretch_amount2 = 0;
-       else if (stretch_amount == 3 || stretch_amount == 4)
-               stretch_amount2 = 1;
-       else {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ClockStretcher);
-               PP_ASSERT_WITH_CODE(false,
-                               "Stretch Amount in PPTable not supported\n",
-                               return -EINVAL);
-       }
-
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL);
-       value &= 0xFFC2FF87;
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
-                       fiji_clock_stretcher_lookup_table[stretch_amount2][0];
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
-                       fiji_clock_stretcher_lookup_table[stretch_amount2][1];
-       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
-                       GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].
-                       SclkFrequency) / 100);
-       if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
-                       clock_freq_u16 &&
-           fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
-                       clock_freq_u16) {
-               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
-               value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
-               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
-               value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
-               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
-               value |= (fiji_clock_stretch_amount_conversion
-                               [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
-                                [stretch_amount]) << 3;
-       }
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
-                       CKS_LOOKUPTableEntry[0].minFreq);
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
-                       CKS_LOOKUPTableEntry[0].maxFreq);
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
-                       fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
-                       (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL, value);
-
-       /* Populate DDT Lookup Table */
-       for (i = 0; i < 4; i++) {
-               /* Assign the minimum and maximum VID stored
-                * in the last row of Clock Stretcher Voltage Table.
-                */
-               data->smc_state_table.ClockStretcherDataTable.
-               ClockStretcherDataTableEntry[i].minVID =
-                               (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
-               data->smc_state_table.ClockStretcherDataTable.
-               ClockStretcherDataTableEntry[i].maxVID =
-                               (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
-               /* Loop through each SCLK and check the frequency
-                * to see if it lies within the frequency for clock stretcher.
-                */
-               for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
-                       cks_setting = 0;
-                       clock_freq = PP_SMC_TO_HOST_UL(
-                                       data->smc_state_table.GraphicsLevel[j].SclkFrequency);
-                       /* Check the allowed frequency against the sclk level[j].
-                        *  Sclk's endianness has already been converted,
-                        *  and it's in 10Khz unit,
-                        *  as opposed to Data table, which is in Mhz unit.
-                        */
-                       if (clock_freq >=
-                                       (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
-                               cks_setting |= 0x2;
-                               if (clock_freq <
-                                               (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
-                                       cks_setting |= 0x1;
-                       }
-                       data->smc_state_table.ClockStretcherDataTable.
-                       ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
-               }
-               CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.
-                               ClockStretcherDataTable.
-                               ClockStretcherDataTableEntry[i].setting);
-       }
-
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
-       value &= 0xFFFFFFFE;
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
-       return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint16_t config;
-
-       config = VR_MERGED_WITH_VDDC;
-       table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
-       /* Set Vddc Voltage Controller */
-       if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               config = VR_SVI2_PLANE_1;
-               table->VRConfig |= config;
-       } else {
-               PP_ASSERT_WITH_CODE(false,
-                               "VDDC should be on SVI2 control in merged mode!",);
-       }
-       /* Set Vddci Voltage Controller */
-       if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-               config = VR_SVI2_PLANE_2;  /* only in merged mode */
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               config = VR_SMIO_PATTERN_1;
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       } else {
-               config = VR_STATIC_VOLTAGE;
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       }
-       /* Set Mvdd Voltage Controller */
-       if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-               config = VR_SVI2_PLANE_2;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               config = VR_SMIO_PATTERN_2;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       } else {
-               config = VR_STATIC_VOLTAGE;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       }
-
-       return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput  the pointer to input data (PowerState)
-* @return   always 0
-*/
-static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table);
-       const struct fiji_ulv_parm *ulv = &(data->ulv);
-       uint8_t i;
-       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
-       result = fiji_setup_default_dpm_tables(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to setup default DPM tables!", return result);
-
-       if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
-               fiji_populate_smc_voltage_tables(hwmgr, table);
-
-       table->SystemFlags = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StepVddc))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-       if (data->is_memory_gddr5)
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-       if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
-               result = fiji_populate_ulv_state(hwmgr, table);
-               PP_ASSERT_WITH_CODE(0 == result,
-                               "Failed to initialize ULV state!", return result);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
-       }
-
-       result = fiji_populate_smc_link_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Link Level!", return result);
-
-       result = fiji_populate_all_graphic_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Graphics Level!", return result);
-
-       result = fiji_populate_all_memory_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Memory Level!", return result);
-
-       result = fiji_populate_smc_acpi_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ACPI Level!", return result);
-
-       result = fiji_populate_smc_vce_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize VCE Level!", return result);
-
-       result = fiji_populate_smc_acp_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ACP Level!", return result);
-
-       result = fiji_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize SAMU Level!", return result);
-
-       /* Since only the initial state is completely set up at this point
-        * (the other states are just copies of the boot state) we only
-        * need to populate the  ARB settings for the initial state.
-        */
-       result = fiji_program_memory_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to Write ARB settings for the initial state.", return result);
-
-       result = fiji_populate_smc_uvd_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize UVD Level!", return result);
-
-       result = fiji_populate_smc_boot_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Boot Level!", return result);
-
-       result = fiji_populate_smc_initailial_state(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Boot State!", return result);
-
-       result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate BAPM Parameters!", return result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ClockStretcher)) {
-               result = fiji_populate_clock_stretcher_data_table(hwmgr);
-               PP_ASSERT_WITH_CODE(0 == result,
-                               "Failed to populate Clock Stretcher Data Table!",
-                               return result);
-       }
-
-       table->GraphicsVoltageChangeEnable  = 1;
-       table->GraphicsThermThrottleEnable  = 1;
-       table->GraphicsInterval = 1;
-       table->VoltageInterval  = 1;
-       table->ThermalInterval  = 1;
-       table->TemperatureLimitHigh =
-                       table_info->cac_dtp_table->usTargetOperatingTemp *
-                       FIJI_Q88_FORMAT_CONVERSION_UNIT;
-       table->TemperatureLimitLow  =
-                       (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-                       FIJI_Q88_FORMAT_CONVERSION_UNIT;
-       table->MemoryVoltageChangeEnable = 1;
-       table->MemoryInterval = 1;
-       table->VoltageResponseTime = 0;
-       table->PhaseResponseTime = 0;
-       table->MemoryThermThrottleEnable = 1;
-       table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/
-       table->PCIeGenInterval = 1;
-       table->VRConfig = 0;
-
-       result = fiji_populate_vr_config(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate VRConfig setting!", return result);
-
-       table->ThermGpio = 17;
-       table->SclkStepSize = 0x4000;
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
-               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot);
-       } else {
-               table->VRHotGpio = FIJI_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot);
-       }
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-                       &gpio_pin)) {
-               table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_AutomaticDCTransition);
-       } else {
-               table->AcDcGpio = FIJI_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_AutomaticDCTransition);
-       }
-
-       /* Thermal Output GPIO */
-       if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
-                       &gpio_pin)) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ThermalOutGPIO);
-
-               table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
-               /* For porlarity read GPIOPAD_A with assigned Gpio pin
-                * since VBIOS will program this register to set 'inactive state',
-                * driver can then determine 'active state' from this and
-                * program SMU with correct polarity
-                */
-               table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
-                               (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-               /* if required, combine VRHot/PCC with thermal out GPIO */
-               if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot) &&
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_CombinePCCWithThermalSignal))
-                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-       } else {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ThermalOutGPIO);
-               table->ThermOutGpio = 17;
-               table->ThermOutPolarity = 1;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-       }
-
-       for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
-               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-       result = fiji_copy_bytes_to_smc(hwmgr->smumgr,
-                       data->dpm_table_start +
-                       offsetof(SMU73_Discrete_DpmTable, SystemFlags),
-                       (uint8_t *)&(table->SystemFlags),
-                       sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
-                       data->sram_end);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to upload dpm data to SMC memory!", return result);
-
-       return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-       const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t tmp;
-       int result;
-
-       /* This is a read-modify-write on the first byte of the ARB table.
-        * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
-        * is the field 'current'.
-        * This solution is ugly, but we never write the whole table only
-        * individual fields in it.
-        * In reality this field should not be in that structure
-        * but in a soft register.
-        */
-       result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-                       data->arb_table_start, &tmp, data->sram_end);
-
-       if (result)
-               return result;
-
-       tmp &= 0x00FFFFFF;
-       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-       return fiji_write_smc_sram_dword(hwmgr->smumgr,
-                       data->arb_table_start,  tmp, data->sram_end);
-}
-
-static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
-       if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_RegulatorHot))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
-       return 0;
-}
-
-static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-                       SCLK_PWRMGT_OFF, 0);
-       return 0;
-}
-
-static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_ulv_parm *ulv = &(data->ulv);
-
-       if (ulv->ulv_supported)
-               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
-       return 0;
-}
-
-static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_ulv_parm *ulv = &(data->ulv);
-
-       if (ulv->ulv_supported)
-               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
-       return 0;
-}
-
-static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep)) {
-               if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to enable Master Deep Sleep switch failed!",
-                                       return -1);
-       } else {
-               if (smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to disable Master Deep Sleep switch failed!",
-                                       return -1);
-               }
-       }
-
-       return 0;
-}
-
-static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep)) {
-               if (smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to disable Master Deep Sleep switch failed!",
-                                       return -1);
-               }
-       }
-
-       return 0;
-}
-
-static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t   val, val0, val2;
-       uint32_t   i, cpl_cntl, cpl_threshold, mc_threshold;
-
-       /* enable SCLK dpm */
-       if(!data->sclk_dpm_key_disabled)
-               PP_ASSERT_WITH_CODE(
-               (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
-               "Failed to enable SCLK DPM during DPM Start Function!",
-               return -1);
-
-       /* enable MCLK dpm */
-       if(0 == data->mclk_dpm_key_disabled) {
-               cpl_threshold = 0;
-               mc_threshold = 0;
-
-               /* Read per MCD tile (0 - 7) */
-               for (i = 0; i < 8; i++) {
-                       PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i);
-                       val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000;
-                       if (0xf0000000 != val) {
-                               /* count number of MCQ that has channel(s) enabled */
-                               cpl_threshold++;
-                               /* only harvest 3 or full 4 supported */
-                               mc_threshold = val ? 3 : 4;
-                       }
-               }
-               PP_ASSERT_WITH_CODE(0 != cpl_threshold,
-                               "Number of MCQ is zero!", return -EINVAL;);
-
-               mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) <<
-                               LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) |
-                                               LCAC_MC0_CNTL__MC0_ENABLE_MASK;
-               cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) <<
-                               LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) |
-                                               LCAC_CPL_CNTL__CPL_ENABLE_MASK;
-               cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT));
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixLCAC_MC0_CNTL, mc_threshold);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixLCAC_MC1_CNTL, mc_threshold);
-               if (8 == cpl_threshold) {
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC2_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC3_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC4_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC5_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC6_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC7_CNTL, mc_threshold);
-               }
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixLCAC_CPL_CNTL, cpl_cntl);
-
-               udelay(5);
-
-               mc_threshold = mc_threshold |
-                               (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT);
-               cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixLCAC_MC0_CNTL, mc_threshold);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixLCAC_MC1_CNTL, mc_threshold);
-               if (8 == cpl_threshold) {
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC2_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC3_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC4_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC5_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC6_CNTL, mc_threshold);
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixLCAC_MC7_CNTL, mc_threshold);
-               }
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixLCAC_CPL_CNTL, cpl_cntl);
-
-               /* Program CAC_EN per MCD (0-7) Tile */
-               val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD);
-               val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK |
-                               MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK |
-                               MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK |
-                               MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK |
-                               MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK |
-                               MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK |
-                               MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK |
-                               MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK |
-                               MC_CONFIG_MCD__MC_RD_ENABLE_MASK);
-
-               for (i = 0; i < 8; i++) {
-                       /* Enable MCD i Tile read & write */
-                       val2  = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) |
-                                       (1 << i));
-                       cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2);
-                       /* Enbale CAC_ON MCD i Tile */
-                       val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL);
-                       val2 |= MC_SEQ_CNTL__CAC_EN_MASK;
-                       cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2);
-               }
-               /* Set MC_CONFIG_MCD back to its default setting val0 */
-               cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0);
-
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_MCLKDPM_Enable)),
-                               "Failed to enable MCLK DPM during DPM Start Function!",
-                               return -1);
-       }
-       return 0;
-}
-
-static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       /*enable general power management */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-                       GLOBAL_PWRMGT_EN, 1);
-       /* enable sclk deep sleep */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-                       DYNAMIC_PM_EN, 1);
-       /* prepare for PCIE DPM */
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       data->soft_regs_start + offsetof(SMU73_SoftRegisters,
-                                       VoltageChangeTimeout), 0x1000);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
-                       SWRST_COMMAND_1, RESETLC, 0x0);
-
-       PP_ASSERT_WITH_CODE(
-                       (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                       PPSMC_MSG_Voltage_Cntl_Enable)),
-                       "Failed to enable voltage DPM during DPM Start Function!",
-                       return -1);
-
-       if (fiji_enable_sclk_mclk_dpm(hwmgr)) {
-               printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
-               return -1;
-       }
-
-       /* enable PCIE dpm */
-       if(!data->pcie_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_PCIeDPM_Enable)),
-                               "Failed to enable pcie DPM during DPM Start Function!",
-                               return -1);
-       }
-
-       return 0;
-}
-
-static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       /* disable SCLK dpm */
-       if (!data->sclk_dpm_key_disabled)
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_DPM_Disable) == 0),
-                               "Failed to disable SCLK DPM!",
-                               return -1);
-
-       /* disable MCLK dpm */
-       if (!data->mclk_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
-                               "Failed to force MCLK DPM0!",
-                               return -1);
-
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_MCLKDPM_Disable) == 0),
-                               "Failed to disable MCLK DPM!",
-                               return -1);
-       }
-
-       return 0;
-}
-
-static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       /* disable general power management */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-                       GLOBAL_PWRMGT_EN, 0);
-       /* disable sclk deep sleep */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-                       DYNAMIC_PM_EN, 0);
-
-       /* disable PCIE dpm */
-       if (!data->pcie_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
-                               "Failed to disable pcie DPM during DPM Stop Function!",
-                               return -1);
-       }
-
-       if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
-               printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
-               return -1;
-       }
-
-       PP_ASSERT_WITH_CODE(
-                       (smum_send_msg_to_smc(hwmgr->smumgr,
-                                       PPSMC_MSG_Voltage_Cntl_Disable) == 0),
-                       "Failed to disable voltage DPM during DPM Stop Function!",
-                       return -1);
-
-       return 0;
-}
-
-static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
-               uint32_t sources)
-{
-       bool protection;
-       enum DPM_EVENT_SRC src;
-
-       switch (sources) {
-       default:
-               printk(KERN_ERR "Unknown throttling event sources.");
-               /* fall through */
-       case 0:
-               protection = false;
-               /* src is unused */
-               break;
-       case (1 << PHM_AutoThrottleSource_Thermal):
-               protection = true;
-               src = DPM_EVENT_SRC_DIGITAL;
-               break;
-       case (1 << PHM_AutoThrottleSource_External):
-               protection = true;
-               src = DPM_EVENT_SRC_EXTERNAL;
-               break;
-       case (1 << PHM_AutoThrottleSource_External) |
-                       (1 << PHM_AutoThrottleSource_Thermal):
-               protection = true;
-               src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
-               break;
-       }
-       /* Order matters - don't enable thermal protection for the wrong source. */
-       if (protection) {
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
-                               DPM_EVENT_SRC, src);
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-                               THERMAL_PROTECTION_DIS,
-                               !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_ThermalController));
-       } else
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-                               THERMAL_PROTECTION_DIS, 1);
-}
-
-static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
-               PHM_AutoThrottleSource source)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (!(data->active_auto_throttle_sources & (1 << source))) {
-               data->active_auto_throttle_sources |= 1 << source;
-               fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
-       }
-       return 0;
-}
-
-static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
-       return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
-               PHM_AutoThrottleSource source)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (data->active_auto_throttle_sources & (1 << source)) {
-               data->active_auto_throttle_sources &= ~(1 << source);
-               fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
-       }
-       return 0;
-}
-
-static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
-       return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1;
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "DPM is already running right now, no need to enable DPM!",
-                       return 0);
-
-       if (fiji_voltage_control(hwmgr)) {
-               tmp_result = fiji_enable_voltage_control(hwmgr);
-               PP_ASSERT_WITH_CODE(tmp_result == 0,
-                               "Failed to enable voltage control!",
-                               result = tmp_result);
-       }
-
-       if (fiji_voltage_control(hwmgr)) {
-               tmp_result = fiji_construct_voltage_tables(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == tmp_result),
-                               "Failed to contruct voltage tables!",
-                               result = tmp_result);
-       }
-
-       tmp_result = fiji_initialize_mc_reg_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to initialize MC reg table!", result = tmp_result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EngineSpreadSpectrumSupport))
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ThermalController))
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
-       tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to program static screen threshold parameters!",
-                       result = tmp_result);
-
-       tmp_result = fiji_enable_display_gap(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable display gap!", result = tmp_result);
-
-       tmp_result = fiji_program_voting_clients(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to program voting clients!", result = tmp_result);
-
-       tmp_result = fiji_process_firmware_header(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to process firmware header!", result = tmp_result);
-
-       tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to initialize switch from ArbF0 to F1!",
-                       result = tmp_result);
-
-       tmp_result = fiji_init_smc_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to initialize SMC table!", result = tmp_result);
-
-       tmp_result = fiji_init_arb_table_index(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to initialize ARB table index!", result = tmp_result);
-
-       tmp_result = fiji_populate_pm_fuses(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to populate PM fuses!", result = tmp_result);
-
-       tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
-       tmp_result = tonga_notify_smc_display_change(hwmgr, false);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to notify no display!", result = tmp_result);
-
-       tmp_result = fiji_enable_sclk_control(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable SCLK control!", result = tmp_result);
-
-       tmp_result = fiji_enable_ulv(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable ULV!", result = tmp_result);
-
-       tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable deep sleep master switch!", result = tmp_result);
-
-       tmp_result = fiji_start_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to start DPM!", result = tmp_result);
-
-       tmp_result = fiji_enable_smc_cac(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable SMC CAC!", result = tmp_result);
-
-       tmp_result = fiji_enable_power_containment(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable power containment!", result = tmp_result);
-
-       tmp_result = fiji_power_control_set_level(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to power control set level!", result = tmp_result);
-
-       tmp_result = fiji_enable_thermal_auto_throttle(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable thermal auto throttle!", result = tmp_result);
-
-       return result;
-}
-
-static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
-       PP_ASSERT_WITH_CODE(tmp_result == 0,
-                       "DPM is not running right now, no need to disable DPM!",
-                       return 0);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ThermalController))
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
-       tmp_result = fiji_disable_power_containment(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable power containment!", result = tmp_result);
-
-       tmp_result = fiji_disable_smc_cac(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable SMC CAC!", result = tmp_result);
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
-       tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable thermal auto throttle!", result = tmp_result);
-
-       tmp_result = fiji_stop_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to stop DPM!", result = tmp_result);
-
-       tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable deep sleep master switch!", result = tmp_result);
-
-       tmp_result = fiji_disable_ulv(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable ULV!", result = tmp_result);
-
-       tmp_result = fiji_clear_voting_clients(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to clear voting clients!", result = tmp_result);
-
-       tmp_result = fiji_reset_to_default(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to reset to default!", result = tmp_result);
-
-       tmp_result = fiji_force_switch_to_arbf0(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to force to switch arbf0!", result = tmp_result);
-
-       return result;
-}
-
-static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t level, tmp;
-
-       if (!data->sclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++;
-                       if (level)
-                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                               PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                               (1 << level));
-               }
-       }
-
-       if (!data->mclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++;
-                       if (level)
-                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                               PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                               (1 << level));
-               }
-       }
-
-       if (!data->pcie_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++;
-                       if (level)
-                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                               PPSMC_MSG_PCIeDPM_ForceLevel,
-                                               (1 << level));
-               }
-       }
-       return 0;
-}
-
-static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       phm_apply_dal_min_voltage_request(hwmgr);
-
-       if (!data->sclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-       }
-       return 0;
-}
-
-static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (!fiji_is_dpm_running(hwmgr))
-               return -EINVAL;
-
-       if (!data->pcie_dpm_key_disabled) {
-               smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_PCIeDPM_UnForceLevel);
-       }
-
-       return fiji_upload_dpmlevel_enable_mask(hwmgr);
-}
-
-static uint32_t fiji_get_lowest_enabled_level(
-               struct pp_hwmgr *hwmgr, uint32_t mask)
-{
-       uint32_t level = 0;
-
-       while(0 == (mask & (1 << level)))
-               level++;
-
-       return level;
-}
-
-static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data =
-                       (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t level;
-
-       if (!data->sclk_dpm_key_disabled)
-               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-                       level = fiji_get_lowest_enabled_level(hwmgr,
-                                                             data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                                           PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                                           (1 << level));
-
-       }
-
-       if (!data->mclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-                       level = fiji_get_lowest_enabled_level(hwmgr,
-                                                             data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                                           (1 << level));
-               }
-       }
-
-       if (!data->pcie_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-                       level = fiji_get_lowest_enabled_level(hwmgr,
-                                                             data->dpm_level_enable_mask.pcie_dpm_enable_mask);
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                                           PPSMC_MSG_PCIeDPM_ForceLevel,
-                                                           (1 << level));
-               }
-       }
-
-       return 0;
-
-}
-static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
-                               enum amd_dpm_forced_level level)
-{
-       int ret = 0;
-
-       switch (level) {
-       case AMD_DPM_FORCED_LEVEL_HIGH:
-               ret = fiji_force_dpm_highest(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       case AMD_DPM_FORCED_LEVEL_LOW:
-               ret = fiji_force_dpm_lowest(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       case AMD_DPM_FORCED_LEVEL_AUTO:
-               ret = fiji_unforce_dpm_levels(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       default:
-               break;
-       }
-
-       hwmgr->dpm_level = level;
-
-       return ret;
-}
-
-static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
-       return sizeof(struct fiji_power_state);
-}
-
-static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
-               void *state, struct pp_power_state *power_state,
-               void *pp_table, uint32_t classification_flag)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_power_state  *fiji_power_state =
-                       (struct fiji_power_state *)(&(power_state->hardware));
-       struct fiji_performance_level *performance_level;
-       ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
-       ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
-                       (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-       ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-                       (ATOM_Tonga_SCLK_Dependency_Table *)
-                       (((unsigned long)powerplay_table) +
-                               le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-       ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
-                       (ATOM_Tonga_MCLK_Dependency_Table *)
-                       (((unsigned long)powerplay_table) +
-                               le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
-       /* The following fields are not initialized here: id orderedList allStatesList */
-       power_state->classification.ui_label =
-                       (le16_to_cpu(state_entry->usClassification) &
-                       ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
-                       ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
-       power_state->classification.flags = classification_flag;
-       /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
-       power_state->classification.temporary_state = false;
-       power_state->classification.to_be_deleted = false;
-
-       power_state->validation.disallowOnDC =
-                       (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
-                                       ATOM_Tonga_DISALLOW_ON_DC));
-
-       power_state->pcie.lanes = 0;
-
-       power_state->display.disableFrameModulation = false;
-       power_state->display.limitRefreshrate = false;
-       power_state->display.enableVariBright =
-                       (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
-                                       ATOM_Tonga_ENABLE_VARIBRIGHT));
-
-       power_state->validation.supportedPowerLevels = 0;
-       power_state->uvd_clocks.VCLK = 0;
-       power_state->uvd_clocks.DCLK = 0;
-       power_state->temperatures.min = 0;
-       power_state->temperatures.max = 0;
-
-       performance_level = &(fiji_power_state->performance_levels
-                       [fiji_power_state->performance_level_count++]);
-
-       PP_ASSERT_WITH_CODE(
-                       (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS),
-                       "Performance levels exceeds SMC limit!",
-                       return -1);
-
-       PP_ASSERT_WITH_CODE(
-                       (fiji_power_state->performance_level_count <=
-                                       hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
-                       "Performance levels exceeds Driver limit!",
-                       return -1);
-
-       /* Performance levels are arranged from low to high. */
-       performance_level->memory_clock = mclk_dep_table->entries
-                       [state_entry->ucMemoryClockIndexLow].ulMclk;
-       performance_level->engine_clock = sclk_dep_table->entries
-                       [state_entry->ucEngineClockIndexLow].ulSclk;
-       performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
-                       state_entry->ucPCIEGenLow);
-       performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-                       state_entry->ucPCIELaneHigh);
-
-       performance_level = &(fiji_power_state->performance_levels
-                       [fiji_power_state->performance_level_count++]);
-       performance_level->memory_clock = mclk_dep_table->entries
-                       [state_entry->ucMemoryClockIndexHigh].ulMclk;
-       performance_level->engine_clock = sclk_dep_table->entries
-                       [state_entry->ucEngineClockIndexHigh].ulSclk;
-       performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
-                       state_entry->ucPCIEGenHigh);
-       performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-                       state_entry->ucPCIELaneHigh);
-
-       return 0;
-}
-
-static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
-               unsigned long entry_index, struct pp_power_state *state)
-{
-       int result;
-       struct fiji_power_state *ps;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-                       table_info->vdd_dep_on_mclk;
-
-       state->hardware.magic = PHM_VIslands_Magic;
-
-       ps = (struct fiji_power_state *)(&state->hardware);
-
-       result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
-                       fiji_get_pp_table_entry_callback_func);
-
-       /* This is the earliest time we have all the dependency table and the VBIOS boot state
-        * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
-        * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
-        */
-       if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
-               if (dep_mclk_table->entries[0].clk !=
-                               data->vbios_boot_state.mclk_bootup_value)
-                       printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
-                                       "does not match VBIOS boot MCLK level");
-               if (dep_mclk_table->entries[0].vddci !=
-                               data->vbios_boot_state.vddci_bootup_value)
-                       printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
-                                       "does not match VBIOS boot VDDCI level");
-       }
-
-       /* set DC compatible flag if this state supports DC */
-       if (!state->validation.disallowOnDC)
-               ps->dc_compatible = true;
-
-       if (state->classification.flags & PP_StateClassificationFlag_ACPI)
-               data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
-       ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
-       ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
-       if (!result) {
-               uint32_t i;
-
-               switch (state->classification.ui_label) {
-               case PP_StateUILabel_Performance:
-                       data->use_pcie_performance_levels = true;
-
-                       for (i = 0; i < ps->performance_level_count; i++) {
-                               if (data->pcie_gen_performance.max <
-                                               ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_performance.max =
-                                                       ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_gen_performance.min >
-                                               ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_performance.min =
-                                                       ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_lane_performance.max <
-                                               ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_performance.max =
-                                                       ps->performance_levels[i].pcie_lane;
-
-                               if (data->pcie_lane_performance.min >
-                                               ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_performance.min =
-                                                       ps->performance_levels[i].pcie_lane;
-                       }
-                       break;
-               case PP_StateUILabel_Battery:
-                       data->use_pcie_power_saving_levels = true;
-
-                       for (i = 0; i < ps->performance_level_count; i++) {
-                               if (data->pcie_gen_power_saving.max <
-                                               ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_power_saving.max =
-                                                       ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_gen_power_saving.min >
-                                               ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_power_saving.min =
-                                                       ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_lane_power_saving.max <
-                                               ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_power_saving.max =
-                                                       ps->performance_levels[i].pcie_lane;
-
-                               if (data->pcie_lane_power_saving.min >
-                                               ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_power_saving.min =
-                                                       ps->performance_levels[i].pcie_lane;
-                       }
-                       break;
-               default:
-                       break;
-               }
-       }
-       return 0;
-}
-
-static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
-                               struct pp_power_state  *request_ps,
-                       const struct pp_power_state *current_ps)
-{
-       struct fiji_power_state *fiji_ps =
-                               cast_phw_fiji_power_state(&request_ps->hardware);
-       uint32_t sclk;
-       uint32_t mclk;
-       struct PP_Clocks minimum_clocks = {0};
-       bool disable_mclk_switching;
-       bool disable_mclk_switching_for_frame_lock;
-       struct cgs_display_info info = {0};
-       const struct phm_clock_and_voltage_limits *max_limits;
-       uint32_t i;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int32_t count;
-       int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
-       data->battery_state = (PP_StateUILabel_Battery ==
-                       request_ps->classification.ui_label);
-
-       PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2,
-                                "VI should always have 2 performance levels",);
-
-       max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
-                       &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
-                       &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
-       /* Cap clock DPM tables at DC MAX if it is in DC. */
-       if (PP_PowerSource_DC == hwmgr->power_source) {
-               for (i = 0; i < fiji_ps->performance_level_count; i++) {
-                       if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk)
-                               fiji_ps->performance_levels[i].memory_clock = max_limits->mclk;
-                       if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk)
-                               fiji_ps->performance_levels[i].engine_clock = max_limits->sclk;
-               }
-       }
-
-       fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
-       fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
-       fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
-       /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState)) {
-               max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
-               stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
-               for (count = table_info->vdd_dep_on_sclk->count - 1;
-                               count >= 0; count--) {
-                       if (stable_pstate_sclk >=
-                                       table_info->vdd_dep_on_sclk->entries[count].clk) {
-                               stable_pstate_sclk =
-                                               table_info->vdd_dep_on_sclk->entries[count].clk;
-                               break;
-                       }
-               }
-
-               if (count < 0)
-                       stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
-               stable_pstate_mclk = max_limits->mclk;
-
-               minimum_clocks.engineClock = stable_pstate_sclk;
-               minimum_clocks.memoryClock = stable_pstate_mclk;
-       }
-
-       if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
-               minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
-       if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
-               minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
-       fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
-       if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
-               PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
-                               hwmgr->platform_descriptor.overdriveLimit.engineClock),
-                               "Overdrive sclk exceeds limit",
-                               hwmgr->gfx_arbiter.sclk_over_drive =
-                                               hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
-               if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
-                       fiji_ps->performance_levels[1].engine_clock =
-                                       hwmgr->gfx_arbiter.sclk_over_drive;
-       }
-
-       if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
-               PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
-                               hwmgr->platform_descriptor.overdriveLimit.memoryClock),
-                               "Overdrive mclk exceeds limit",
-                               hwmgr->gfx_arbiter.mclk_over_drive =
-                                               hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
-               if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
-                       fiji_ps->performance_levels[1].memory_clock =
-                                       hwmgr->gfx_arbiter.mclk_over_drive;
-       }
-
-       disable_mclk_switching_for_frame_lock = phm_cap_enabled(
-                                   hwmgr->platform_descriptor.platformCaps,
-                                   PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
-       disable_mclk_switching = (1 < info.display_count) ||
-                                   disable_mclk_switching_for_frame_lock;
-
-       sclk = fiji_ps->performance_levels[0].engine_clock;
-       mclk = fiji_ps->performance_levels[0].memory_clock;
-
-       if (disable_mclk_switching)
-               mclk = fiji_ps->performance_levels
-               [fiji_ps->performance_level_count - 1].memory_clock;
-
-       if (sclk < minimum_clocks.engineClock)
-               sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
-                               max_limits->sclk : minimum_clocks.engineClock;
-
-       if (mclk < minimum_clocks.memoryClock)
-               mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
-                               max_limits->mclk : minimum_clocks.memoryClock;
-
-       fiji_ps->performance_levels[0].engine_clock = sclk;
-       fiji_ps->performance_levels[0].memory_clock = mclk;
-
-       fiji_ps->performance_levels[1].engine_clock =
-               (fiji_ps->performance_levels[1].engine_clock >=
-                               fiji_ps->performance_levels[0].engine_clock) ?
-                                               fiji_ps->performance_levels[1].engine_clock :
-                                               fiji_ps->performance_levels[0].engine_clock;
-
-       if (disable_mclk_switching) {
-               if (mclk < fiji_ps->performance_levels[1].memory_clock)
-                       mclk = fiji_ps->performance_levels[1].memory_clock;
-
-               fiji_ps->performance_levels[0].memory_clock = mclk;
-               fiji_ps->performance_levels[1].memory_clock = mclk;
-       } else {
-               if (fiji_ps->performance_levels[1].memory_clock <
-                               fiji_ps->performance_levels[0].memory_clock)
-                       fiji_ps->performance_levels[1].memory_clock =
-                                       fiji_ps->performance_levels[0].memory_clock;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState)) {
-               for (i = 0; i < fiji_ps->performance_level_count; i++) {
-                       fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
-                       fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
-                       fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
-                       fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
-               }
-       }
-
-       return 0;
-}
-
-static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       const struct fiji_power_state *fiji_ps =
-                       cast_const_phw_fiji_power_state(states->pnew_state);
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-       uint32_t sclk = fiji_ps->performance_levels
-                       [fiji_ps->performance_level_count - 1].engine_clock;
-       struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-       uint32_t mclk = fiji_ps->performance_levels
-                       [fiji_ps->performance_level_count - 1].memory_clock;
-       uint32_t i;
-       struct cgs_display_info info = {0};
-
-       data->need_update_smu7_dpm_table = 0;
-
-       for (i = 0; i < sclk_table->count; i++) {
-               if (sclk == sclk_table->dpm_levels[i].value)
-                       break;
-       }
-
-       if (i >= sclk_table->count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-       else {
-               if(data->display_timing.min_clock_in_sr !=
-                       hwmgr->display_config.min_core_set_clock_in_sr)
-                       data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
-       }
-
-       for (i = 0; i < mclk_table->count; i++) {
-               if (mclk == mclk_table->dpm_levels[i].value)
-                       break;
-       }
-
-       if (i >= mclk_table->count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       if (data->display_timing.num_existing_displays != info.display_count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
-       return 0;
-}
-
-static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
-               const struct fiji_power_state *fiji_ps)
-{
-       uint32_t i;
-       uint32_t sclk, max_sclk = 0;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
-       for (i = 0; i < fiji_ps->performance_level_count; i++) {
-               sclk = fiji_ps->performance_levels[i].engine_clock;
-               if (max_sclk < sclk)
-                       max_sclk = sclk;
-       }
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-               if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
-                       return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
-                                       dpm_table->pcie_speed_table.dpm_levels
-                                       [dpm_table->pcie_speed_table.count - 1].value :
-                                       dpm_table->pcie_speed_table.dpm_levels[i].value);
-       }
-
-       return 0;
-}
-
-static int fiji_request_link_speed_change_before_state_change(
-               struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       const struct fiji_power_state *fiji_nps =
-                       cast_const_phw_fiji_power_state(states->pnew_state);
-       const struct fiji_power_state *fiji_cps =
-                       cast_const_phw_fiji_power_state(states->pcurrent_state);
-
-       uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps);
-       uint16_t current_link_speed;
-
-       if (data->force_pcie_gen == PP_PCIEGenInvalid)
-               current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps);
-       else
-               current_link_speed = data->force_pcie_gen;
-
-       data->force_pcie_gen = PP_PCIEGenInvalid;
-       data->pspp_notify_required = false;
-       if (target_link_speed > current_link_speed) {
-               switch(target_link_speed) {
-               case PP_PCIEGen3:
-                       if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
-                               break;
-                       data->force_pcie_gen = PP_PCIEGen2;
-                       if (current_link_speed == PP_PCIEGen2)
-                               break;
-               case PP_PCIEGen2:
-                       if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
-                               break;
-               default:
-                       data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr);
-                       break;
-               }
-       } else {
-               if (target_link_speed < current_link_speed)
-                       data->pspp_notify_required = true;
-       }
-
-       return 0;
-}
-
-static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if ((0 == data->sclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-                       (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-               PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
-                                   "Trying to freeze SCLK DPM when DPM is disabled",
-                                   );
-               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_SCLKDPM_FreezeLevel),
-                               "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
-                               return -1);
-       }
-
-       if ((0 == data->mclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-                DPMTABLE_OD_UPDATE_MCLK)) {
-               PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
-                                   "Trying to freeze MCLK DPM when DPM is disabled",
-                                   );
-               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_MCLKDPM_FreezeLevel),
-                               "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
-                               return -1);
-       }
-
-       return 0;
-}
-
-static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
-               struct pp_hwmgr *hwmgr, const void *input)
-{
-       int result = 0;
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       const struct fiji_power_state *fiji_ps =
-                       cast_const_phw_fiji_power_state(states->pnew_state);
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t sclk = fiji_ps->performance_levels
-                       [fiji_ps->performance_level_count - 1].engine_clock;
-       uint32_t mclk = fiji_ps->performance_levels
-                       [fiji_ps->performance_level_count - 1].memory_clock;
-       struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
-       struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table;
-       uint32_t dpm_count, clock_percent;
-       uint32_t i;
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
-               dpm_table->sclk_table.dpm_levels
-               [dpm_table->sclk_table.count - 1].value = sclk;
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_OD6PlusinACSupport) ||
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_OD6PlusinDCSupport)) {
-               /* Need to do calculation based on the golden DPM table
-                * as the Heatmap GPU Clock axis is also based on the default values
-                */
-                       PP_ASSERT_WITH_CODE(
-                               (golden_dpm_table->sclk_table.dpm_levels
-                                               [golden_dpm_table->sclk_table.count - 1].value != 0),
-                               "Divide by 0!",
-                               return -1);
-                       dpm_count = dpm_table->sclk_table.count < 2 ?
-                                       0 : dpm_table->sclk_table.count - 2;
-                       for (i = dpm_count; i > 1; i--) {
-                               if (sclk > golden_dpm_table->sclk_table.dpm_levels
-                                               [golden_dpm_table->sclk_table.count-1].value) {
-                                       clock_percent =
-                                               ((sclk - golden_dpm_table->sclk_table.dpm_levels
-                                                       [golden_dpm_table->sclk_table.count-1].value) * 100) /
-                                               golden_dpm_table->sclk_table.dpm_levels
-                                                       [golden_dpm_table->sclk_table.count-1].value;
-
-                                       dpm_table->sclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->sclk_table.dpm_levels[i].value +
-                                                       (golden_dpm_table->sclk_table.dpm_levels[i].value *
-                                                               clock_percent)/100;
-
-                               } else if (golden_dpm_table->sclk_table.dpm_levels
-                                               [dpm_table->sclk_table.count-1].value > sclk) {
-                                       clock_percent =
-                                               ((golden_dpm_table->sclk_table.dpm_levels
-                                               [golden_dpm_table->sclk_table.count - 1].value - sclk) *
-                                                               100) /
-                                               golden_dpm_table->sclk_table.dpm_levels
-                                                       [golden_dpm_table->sclk_table.count-1].value;
-
-                                       dpm_table->sclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->sclk_table.dpm_levels[i].value -
-                                                       (golden_dpm_table->sclk_table.dpm_levels[i].value *
-                                                                       clock_percent) / 100;
-                               } else
-                                       dpm_table->sclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->sclk_table.dpm_levels[i].value;
-                       }
-               }
-       }
-
-       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
-               dpm_table->mclk_table.dpm_levels
-                       [dpm_table->mclk_table.count - 1].value = mclk;
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_OD6PlusinACSupport) ||
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
-                       PP_ASSERT_WITH_CODE(
-                                       (golden_dpm_table->mclk_table.dpm_levels
-                                               [golden_dpm_table->mclk_table.count-1].value != 0),
-                                       "Divide by 0!",
-                                       return -1);
-                       dpm_count = dpm_table->mclk_table.count < 2 ?
-                                       0 : dpm_table->mclk_table.count - 2;
-                       for (i = dpm_count; i > 1; i--) {
-                               if (mclk > golden_dpm_table->mclk_table.dpm_levels
-                                               [golden_dpm_table->mclk_table.count-1].value) {
-                                       clock_percent = ((mclk -
-                                                       golden_dpm_table->mclk_table.dpm_levels
-                                                       [golden_dpm_table->mclk_table.count-1].value) * 100) /
-                                                       golden_dpm_table->mclk_table.dpm_levels
-                                                       [golden_dpm_table->mclk_table.count-1].value;
-
-                                       dpm_table->mclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->mclk_table.dpm_levels[i].value +
-                                                       (golden_dpm_table->mclk_table.dpm_levels[i].value *
-                                                                       clock_percent) / 100;
-
-                               } else if (golden_dpm_table->mclk_table.dpm_levels
-                                               [dpm_table->mclk_table.count-1].value > mclk) {
-                                       clock_percent = ((golden_dpm_table->mclk_table.dpm_levels
-                                                       [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) /
-                                                                       golden_dpm_table->mclk_table.dpm_levels
-                                                                       [golden_dpm_table->mclk_table.count-1].value;
-
-                                       dpm_table->mclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->mclk_table.dpm_levels[i].value -
-                                                       (golden_dpm_table->mclk_table.dpm_levels[i].value *
-                                                                       clock_percent) / 100;
-                               } else
-                                       dpm_table->mclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->mclk_table.dpm_levels[i].value;
-                       }
-               }
-       }
-
-       if (data->need_update_smu7_dpm_table &
-                       (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
-               result = fiji_populate_all_graphic_levels(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
-                               return result);
-       }
-
-       if (data->need_update_smu7_dpm_table &
-                       (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
-               /*populate MCLK dpm table to SMU7 */
-               result = fiji_populate_all_memory_levels(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
-                               return result);
-       }
-
-       return result;
-}
-
-static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
-                         struct fiji_single_dpm_table * dpm_table,
-                            uint32_t low_limit, uint32_t high_limit)
-{
-       uint32_t i;
-
-       for (i = 0; i < dpm_table->count; i++) {
-               if ((dpm_table->dpm_levels[i].value < low_limit) ||
-                   (dpm_table->dpm_levels[i].value > high_limit))
-                       dpm_table->dpm_levels[i].enabled = false;
-               else
-                       dpm_table->dpm_levels[i].enabled = true;
-       }
-       return 0;
-}
-
-static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
-               const struct fiji_power_state *fiji_ps)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t high_limit_count;
-
-       PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1),
-                       "power state did not have any performance level",
-                       return -1);
-
-       high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1;
-
-       fiji_trim_single_dpm_states(hwmgr,
-                       &(data->dpm_table.sclk_table),
-                       fiji_ps->performance_levels[0].engine_clock,
-                       fiji_ps->performance_levels[high_limit_count].engine_clock);
-
-       fiji_trim_single_dpm_states(hwmgr,
-                       &(data->dpm_table.mclk_table),
-                       fiji_ps->performance_levels[0].memory_clock,
-                       fiji_ps->performance_levels[high_limit_count].memory_clock);
-
-       return 0;
-}
-
-static int fiji_generate_dpm_level_enable_mask(
-               struct pp_hwmgr *hwmgr, const void *input)
-{
-       int result;
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       const struct fiji_power_state *fiji_ps =
-                       cast_const_phw_fiji_power_state(states->pnew_state);
-
-       result = fiji_trim_dpm_states(hwmgr, fiji_ps);
-       if (result)
-               return result;
-
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-                       fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-                       fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
-       data->last_mclk_dpm_enable_mask =
-                       data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-
-       if (data->uvd_enabled) {
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
-                       data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
-       }
-
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-                       fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
-       return 0;
-}
-
-int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
-                                 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
-                                 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-                       PPSMC_MSG_VCEDPM_Enable :
-                       PPSMC_MSG_VCEDPM_Disable);
-}
-
-int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-                       PPSMC_MSG_SAMUDPM_Enable :
-                       PPSMC_MSG_SAMUDPM_Disable);
-}
-
-int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-                       PPSMC_MSG_ACPDPM_Enable :
-                       PPSMC_MSG_ACPDPM_Disable);
-}
-
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (!bgate) {
-               data->smc_state_table.UvdBootLevel = 0;
-               if (table_info->mm_dep_table->count > 0)
-                       data->smc_state_table.UvdBootLevel =
-                                       (uint8_t) (table_info->mm_dep_table->count - 1);
-               mm_boot_level_offset = data->dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0x00FFFFFF;
-               mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
-               cgs_write_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_UVDDPM) ||
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_StablePState))
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_UVDDPM_SetEnabledMask,
-                                       (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
-       }
-
-       return fiji_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       const struct fiji_power_state *fiji_nps =
-                       cast_const_phw_fiji_power_state(states->pnew_state);
-       const struct fiji_power_state *fiji_cps =
-                       cast_const_phw_fiji_power_state(states->pcurrent_state);
-
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (fiji_nps->vce_clks.evclk >0 &&
-       (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) {
-               data->smc_state_table.VceBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
-
-               mm_boot_level_offset = data->dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0xFF00FFFF;
-               mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
-               cgs_write_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_StablePState)) {
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_VCEDPM_SetEnabledMask,
-                                       (uint32_t)1 << data->smc_state_table.VceBootLevel);
-
-                       fiji_enable_disable_vce_dpm(hwmgr, true);
-               } else if (fiji_nps->vce_clks.evclk == 0 &&
-                               fiji_cps != NULL &&
-                               fiji_cps->vce_clks.evclk > 0)
-                       fiji_enable_disable_vce_dpm(hwmgr, false);
-       }
-
-       return 0;
-}
-
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (!bgate) {
-               data->smc_state_table.SamuBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
-               mm_boot_level_offset = data->dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0xFFFFFF00;
-               mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
-               cgs_write_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_StablePState))
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                                       (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
-       }
-
-       return fiji_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (!bgate) {
-               data->smc_state_table.AcpBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
-               mm_boot_level_offset = data->dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable, AcpBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0xFFFF00FF;
-               mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8;
-               cgs_write_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_StablePState))
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                               PPSMC_MSG_ACPDPM_SetEnabledMask,
-                                               (uint32_t)(1 << data->smc_state_table.AcpBootLevel));
-       }
-
-       return fiji_enable_disable_acp_dpm(hwmgr, !bgate);
-}
-
-static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       int result = 0;
-       uint32_t low_sclk_interrupt_threshold = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification)
-               && (hwmgr->gfx_arbiter.sclk_threshold !=
-                               data->low_sclk_interrupt_threshold)) {
-               data->low_sclk_interrupt_threshold =
-                               hwmgr->gfx_arbiter.sclk_threshold;
-               low_sclk_interrupt_threshold =
-                               data->low_sclk_interrupt_threshold;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-               result = fiji_copy_bytes_to_smc(
-                               hwmgr->smumgr,
-                               data->dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable,
-                                       LowSclkInterruptThreshold),
-                               (uint8_t *)&low_sclk_interrupt_threshold,
-                               sizeof(uint32_t),
-                               data->sram_end);
-       }
-
-       return result;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-               return fiji_program_memory_timing_parameters(hwmgr);
-
-       return 0;
-}
-
-static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if ((0 == data->sclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
-               PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
-                                   "Trying to Unfreeze SCLK DPM when DPM is disabled",
-                                   );
-               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-                       "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
-                       return -1);
-       }
-
-       if ((0 == data->mclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
-               PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
-                                   "Trying to Unfreeze MCLK DPM when DPM is disabled",
-                                   );
-               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-                   "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
-                   return -1);
-       }
-
-       data->need_update_smu7_dpm_table = 0;
-
-       return 0;
-}
-
-/* Look up the voltaged based on DAL's requested level.
- * and then send the requested VDDC voltage to SMC
- */
-static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
-       return;
-}
-
-int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       /* Apply minimum voltage based on DAL's request level */
-       fiji_apply_dal_minimum_voltage_request(hwmgr);
-
-       if (0 == data->sclk_dpm_key_disabled) {
-               /* Checking if DPM is running.  If we discover hang because of this,
-                *  we should skip this message.
-                */
-               if (!fiji_is_dpm_running(hwmgr))
-                       printk(KERN_ERR "[ powerplay ] "
-                                       "Trying to set Enable Mask when DPM is disabled \n");
-
-               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-                       result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-                       PP_ASSERT_WITH_CODE((0 == result),
-                               "Set Sclk Dpm enable Mask failed", return -1);
-               }
-       }
-
-       if (0 == data->mclk_dpm_key_disabled) {
-               /* Checking if DPM is running.  If we discover hang because of this,
-                *  we should skip this message.
-                */
-               if (!fiji_is_dpm_running(hwmgr))
-                       printk(KERN_ERR "[ powerplay ]"
-                                       " Trying to set Enable Mask when DPM is disabled \n");
-
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-                       result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-                       PP_ASSERT_WITH_CODE((0 == result),
-                               "Set Mclk Dpm enable Mask failed", return -1);
-               }
-       }
-
-       return 0;
-}
-
-static int fiji_notify_link_speed_change_after_state_change(
-               struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       const struct fiji_power_state *fiji_ps =
-                       cast_const_phw_fiji_power_state(states->pnew_state);
-       uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps);
-       uint8_t  request;
-
-       if (data->pspp_notify_required) {
-               if (target_link_speed == PP_PCIEGen3)
-                       request = PCIE_PERF_REQ_GEN3;
-               else if (target_link_speed == PP_PCIEGen2)
-                       request = PCIE_PERF_REQ_GEN2;
-               else
-                       request = PCIE_PERF_REQ_GEN1;
-
-               if(request == PCIE_PERF_REQ_GEN1 &&
-                               fiji_get_current_pcie_speed(hwmgr) > 0)
-                       return 0;
-
-               if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
-                       if (PP_PCIEGen2 == target_link_speed)
-                               printk("PSPP request to switch to Gen2 from Gen3 Failed!");
-                       else
-                               printk("PSPP request to switch to Gen1 from Gen2 Failed!");
-               }
-       }
-
-       return 0;
-}
-
-static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr,
-               const void *input)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to find DPM states clocks in DPM table!",
-                       result = tmp_result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PCIEPerformanceRequest)) {
-               tmp_result =
-                       fiji_request_link_speed_change_before_state_change(hwmgr, input);
-               PP_ASSERT_WITH_CODE((0 == tmp_result),
-                               "Failed to request link speed change before state change!",
-                               result = tmp_result);
-       }
-
-       tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
-       tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to populate and upload SCLK MCLK DPM levels!",
-                       result = tmp_result);
-
-       tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to generate DPM level enabled mask!",
-                       result = tmp_result);
-
-       tmp_result = fiji_update_vce_dpm(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to update VCE DPM!",
-                       result = tmp_result);
-
-       tmp_result = fiji_update_sclk_threshold(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to update SCLK threshold!",
-                       result = tmp_result);
-
-       tmp_result = fiji_program_mem_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to program memory timing parameters!",
-                       result = tmp_result);
-
-       tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to unfreeze SCLK MCLK DPM!",
-                       result = tmp_result);
-
-       tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to upload DPM level enabled mask!",
-                       result = tmp_result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PCIEPerformanceRequest)) {
-               tmp_result =
-                       fiji_notify_link_speed_change_after_state_change(hwmgr, input);
-               PP_ASSERT_WITH_CODE((0 == tmp_result),
-                               "Failed to notify link speed change after state change!",
-                               result = tmp_result);
-       }
-
-       return result;
-}
-
-static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
-       struct pp_power_state  *ps;
-       struct fiji_power_state  *fiji_ps;
-
-       if (hwmgr == NULL)
-               return -EINVAL;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
-       if (low)
-               return fiji_ps->performance_levels[0].engine_clock;
-       else
-               return fiji_ps->performance_levels
-                               [fiji_ps->performance_level_count-1].engine_clock;
-}
-
-static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
-       struct pp_power_state  *ps;
-       struct fiji_power_state  *fiji_ps;
-
-       if (hwmgr == NULL)
-               return -EINVAL;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
-       if (low)
-               return fiji_ps->performance_levels[0].memory_clock;
-       else
-               return fiji_ps->performance_levels
-                               [fiji_ps->performance_level_count-1].memory_clock;
-}
-
-static void fiji_print_current_perforce_level(
-               struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
-       uint32_t sclk, mclk, activity_percent = 0;
-       uint32_t offset;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
-       sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-       smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
-       mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-       seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n",
-                       mclk / 100, sclk / 100);
-
-       offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
-       activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
-       activity_percent += 0x80;
-       activity_percent >>= 8;
-
-       seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
-       seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
-       seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t num_active_displays = 0;
-       uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-       uint32_t display_gap2;
-       uint32_t pre_vbi_time_in_us;
-       uint32_t frame_time_in_us;
-       uint32_t ref_clock;
-       uint32_t refresh_rate = 0;
-       struct cgs_display_info info = {0};
-       struct cgs_mode_info mode_info;
-
-       info.mode_info = &mode_info;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-       num_active_displays = info.display_count;
-
-       display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
-                       DISP_GAP, (num_active_displays > 0)?
-                       DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-       ref_clock = mode_info.ref_clock;
-       refresh_rate = mode_info.refresh_rate;
-
-       if (refresh_rate == 0)
-               refresh_rate = 60;
-
-       frame_time_in_us = 1000000 / refresh_rate;
-
-       pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
-       display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       data->soft_regs_start +
-                       offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       data->soft_regs_start +
-                       offsetof(SMU73_SoftRegisters, VBlankTimeout),
-                       (frame_time_in_us - pre_vbi_time_in_us));
-
-       if (num_active_displays == 1)
-               tonga_notify_smc_display_change(hwmgr, true);
-
-       return 0;
-}
-
-int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
-       return fiji_program_display_gap(hwmgr);
-}
-
-static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr,
-               uint16_t us_max_fan_pwm)
-{
-       hwmgr->thermal_controller.
-       advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
-       if (phm_is_hw_access_blocked(hwmgr))
-               return 0;
-
-       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                       PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
-               uint16_t us_max_fan_rpm)
-{
-       hwmgr->thermal_controller.
-       advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
-       if (phm_is_hw_access_blocked(hwmgr))
-               return 0;
-
-       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                       PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-int fiji_dpm_set_interrupt_state(void *private_data,
-                                        unsigned src_id, unsigned type,
-                                        int enabled)
-{
-       uint32_t cg_thermal_int;
-       struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
-       if (hwmgr == NULL)
-               return -EINVAL;
-
-       switch (type) {
-       case AMD_THERMAL_IRQ_LOW_TO_HIGH:
-               if (enabled) {
-                       cg_thermal_int = cgs_read_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-                       cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
-                       cgs_write_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-               } else {
-                       cg_thermal_int = cgs_read_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-                       cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
-                       cgs_write_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-               }
-               break;
-
-       case AMD_THERMAL_IRQ_HIGH_TO_LOW:
-               if (enabled) {
-                       cg_thermal_int = cgs_read_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-                       cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
-                       cgs_write_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-               } else {
-                       cg_thermal_int = cgs_read_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-                       cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
-                       cgs_write_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-               }
-               break;
-       default:
-               break;
-       }
-       return 0;
-}
-
-int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
-                                       const void *thermal_interrupt_info)
-{
-       int result;
-       const struct pp_interrupt_registration_info *info =
-                       (const struct pp_interrupt_registration_info *)
-                       thermal_interrupt_info;
-
-       if (info == NULL)
-               return -EINVAL;
-
-       result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
-                               fiji_dpm_set_interrupt_state,
-                               info->call_back, info->context);
-
-       if (result)
-               return -EINVAL;
-
-       result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
-                               fiji_dpm_set_interrupt_state,
-                               info->call_back, info->context);
-
-       if (result)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-       if (mode) {
-               /* stop auto-manage */
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl))
-                       fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
-               fiji_fan_ctrl_set_static_mode(hwmgr, mode);
-       } else
-               /* restart auto-manage */
-               fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
-       return 0;
-}
-
-static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
-       if (hwmgr->fan_ctrl_is_in_default_mode)
-               return hwmgr->fan_ctrl_default_mode;
-       else
-               return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
-               enum pp_clock_type type, uint32_t mask)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-               return -EINVAL;
-
-       switch (type) {
-       case PP_SCLK:
-               if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
-               break;
-
-       case PP_MCLK:
-               if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
-               break;
-
-       case PP_PCIE:
-       {
-               uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-               uint32_t level = 0;
-
-               while (tmp >>= 1)
-                       level++;
-
-               if (!data->pcie_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_PCIeDPM_ForceLevel,
-                                       level);
-               break;
-       }
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
-               enum pp_clock_type type, char *buf)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-       struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-       struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
-       int i, now, size = 0;
-       uint32_t clock, pcie_speed;
-
-       switch (type) {
-       case PP_SCLK:
-               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-               for (i = 0; i < sclk_table->count; i++) {
-                       if (clock > sclk_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < sclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
-                                       i, sclk_table->dpm_levels[i].value / 100,
-                                       (i == now) ? "*" : "");
-               break;
-       case PP_MCLK:
-               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-               for (i = 0; i < mclk_table->count; i++) {
-                       if (clock > mclk_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < mclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
-                                       i, mclk_table->dpm_levels[i].value / 100,
-                                       (i == now) ? "*" : "");
-               break;
-       case PP_PCIE:
-               pcie_speed = fiji_get_current_pcie_speed(hwmgr);
-               for (i = 0; i < pcie_table->count; i++) {
-                       if (pcie_speed != pcie_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < pcie_table->count; i++)
-                       size += sprintf(buf + size, "%d: %s %s\n", i,
-                                       (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
-                                       (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
-                                       (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
-                                       (i == now) ? "*" : "");
-               break;
-       default:
-               break;
-       }
-       return size;
-}
-
-static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
-                                                          const struct fiji_performance_level *pl2)
-{
-       return ((pl1->memory_clock == pl2->memory_clock) &&
-                 (pl1->engine_clock == pl2->engine_clock) &&
-                 (pl1->pcie_gen == pl2->pcie_gen) &&
-                 (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
-       const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
-       const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
-       int i;
-
-       if (equal == NULL || psa == NULL || psb == NULL)
-               return -EINVAL;
-
-       /* If the two states don't even have the same number of performance levels they cannot be the same state. */
-       if (psa->performance_level_count != psb->performance_level_count) {
-               *equal = false;
-               return 0;
-       }
-
-       for (i = 0; i < psa->performance_level_count; i++) {
-               if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
-                       /* If we have found even one performance level pair that is different the states are different. */
-                       *equal = false;
-                       return 0;
-               }
-       }
-
-       /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
-       *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
-       *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
-       *equal &= (psa->sclk_threshold == psb->sclk_threshold);
-       *equal &= (psa->acp_clk == psb->acp_clk);
-
-       return 0;
-}
-
-bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       bool is_update_required = false;
-       struct cgs_display_info info = {0,0,NULL};
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       if (data->display_timing.num_existing_displays != info.display_count)
-               is_update_required = true;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-               if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
-                       is_update_required = true;
-       }
-
-       return is_update_required;
-}
-
-static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-       struct fiji_single_dpm_table *golden_sclk_table =
-                       &(data->golden_dpm_table.sclk_table);
-       int value;
-
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-       return value;
-}
-
-static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_single_dpm_table *golden_sclk_table =
-                       &(data->golden_dpm_table.sclk_table);
-       struct pp_power_state  *ps;
-       struct fiji_power_state  *fiji_ps;
-
-       if (value > 20)
-               value = 20;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
-       fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
-                       value / 100 +
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-       return 0;
-}
-
-static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-       struct fiji_single_dpm_table *golden_mclk_table =
-                       &(data->golden_dpm_table.mclk_table);
-       int value;
-
-       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-       return value;
-}
-
-static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct fiji_single_dpm_table *golden_mclk_table =
-                       &(data->golden_dpm_table.mclk_table);
-       struct pp_power_state  *ps;
-       struct fiji_power_state  *fiji_ps;
-
-       if (value > 20)
-               value = 20;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
-       fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
-                       value / 100 +
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-       return 0;
-}
-
-static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
-       .backend_init = &fiji_hwmgr_backend_init,
-       .backend_fini = &fiji_hwmgr_backend_fini,
-       .asic_setup = &fiji_setup_asic_task,
-       .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
-       .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
-       .force_dpm_level = &fiji_dpm_force_dpm_level,
-       .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
-       .get_power_state_size = &fiji_get_power_state_size,
-       .get_pp_table_entry = &fiji_get_pp_table_entry,
-       .patch_boot_state = &fiji_patch_boot_state,
-       .apply_state_adjust_rules = &fiji_apply_state_adjust_rules,
-       .power_state_set = &fiji_set_power_state_tasks,
-       .get_sclk = &fiji_dpm_get_sclk,
-       .get_mclk = &fiji_dpm_get_mclk,
-       .print_current_perforce_level = &fiji_print_current_perforce_level,
-       .powergate_uvd = &fiji_phm_powergate_uvd,
-       .powergate_vce = &fiji_phm_powergate_vce,
-       .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating,
-       .notify_smc_display_config_after_ps_adjustment =
-                       &tonga_notify_smc_display_config_after_ps_adjustment,
-       .display_config_changed = &fiji_display_configuration_changed_task,
-       .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output,
-       .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output,
-       .get_temperature = fiji_thermal_get_temperature,
-       .stop_thermal_controller = fiji_thermal_stop_thermal_controller,
-       .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info,
-       .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent,
-       .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent,
-       .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default,
-       .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm,
-       .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm,
-       .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller,
-       .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
-       .set_fan_control_mode = fiji_set_fan_control_mode,
-       .get_fan_control_mode = fiji_get_fan_control_mode,
-       .check_states_equal = fiji_check_states_equal,
-       .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
-       .force_clock_level = fiji_force_clock_level,
-       .print_clock_levels = fiji_print_clock_levels,
-       .get_sclk_od = fiji_get_sclk_od,
-       .set_sclk_od = fiji_set_sclk_od,
-       .get_mclk_od = fiji_get_mclk_od,
-       .set_mclk_od = fiji_set_mclk_od,
-};
-
-int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
-       hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
-       hwmgr->pptable_func = &tonga_pptable_funcs;
-       pp_fiji_thermal_initialize(hwmgr);
-       return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
deleted file mode 100644 (file)
index bf67c2a..0000000
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _FIJI_HWMGR_H_
-#define _FIJI_HWMGR_H_
-
-#include "hwmgr.h"
-#include "smu73.h"
-#include "smu73_discrete.h"
-#include "ppatomctrl.h"
-#include "fiji_ppsmc.h"
-#include "pp_endian.h"
-
-#define FIJI_MAX_HARDWARE_POWERLEVELS  2
-#define FIJI_AT_DFLT   30
-
-#define FIJI_VOLTAGE_CONTROL_NONE                   0x0
-#define FIJI_VOLTAGE_CONTROL_BY_GPIO                0x1
-#define FIJI_VOLTAGE_CONTROL_BY_SVID2               0x2
-#define FIJI_VOLTAGE_CONTROL_MERGED                 0x3
-
-#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
-#define DPMTABLE_UPDATE_SCLK        0x00000004
-#define DPMTABLE_UPDATE_MCLK        0x00000008
-
-struct fiji_performance_level {
-       uint32_t  memory_clock;
-       uint32_t  engine_clock;
-       uint16_t  pcie_gen;
-       uint16_t  pcie_lane;
-};
-
-struct fiji_uvd_clocks {
-       uint32_t  vclk;
-       uint32_t  dclk;
-};
-
-struct fiji_vce_clocks {
-       uint32_t  evclk;
-       uint32_t  ecclk;
-};
-
-struct fiji_power_state {
-    uint32_t                  magic;
-    struct fiji_uvd_clocks    uvd_clks;
-    struct fiji_vce_clocks    vce_clks;
-    uint32_t                  sam_clk;
-    uint32_t                  acp_clk;
-    uint16_t                  performance_level_count;
-    bool                      dc_compatible;
-    uint32_t                  sclk_threshold;
-    struct fiji_performance_level  performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct fiji_dpm_level {
-       bool    enabled;
-    uint32_t   value;
-    uint32_t   param1;
-};
-
-#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define FIJI_MINIMUM_ENGINE_CLOCK 2500
-
-struct fiji_single_dpm_table {
-       uint32_t                count;
-       struct fiji_dpm_level   dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct fiji_dpm_table {
-       struct fiji_single_dpm_table  sclk_table;
-       struct fiji_single_dpm_table  mclk_table;
-       struct fiji_single_dpm_table  pcie_speed_table;
-       struct fiji_single_dpm_table  vddc_table;
-       struct fiji_single_dpm_table  vddci_table;
-       struct fiji_single_dpm_table  mvdd_table;
-};
-
-struct fiji_clock_registers {
-       uint32_t  vCG_SPLL_FUNC_CNTL;
-       uint32_t  vCG_SPLL_FUNC_CNTL_2;
-       uint32_t  vCG_SPLL_FUNC_CNTL_3;
-       uint32_t  vCG_SPLL_FUNC_CNTL_4;
-       uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t  vDLL_CNTL;
-       uint32_t  vMCLK_PWRMGT_CNTL;
-       uint32_t  vMPLL_AD_FUNC_CNTL;
-       uint32_t  vMPLL_DQ_FUNC_CNTL;
-       uint32_t  vMPLL_FUNC_CNTL;
-       uint32_t  vMPLL_FUNC_CNTL_1;
-       uint32_t  vMPLL_FUNC_CNTL_2;
-       uint32_t  vMPLL_SS1;
-       uint32_t  vMPLL_SS2;
-};
-
-struct fiji_voltage_smio_registers {
-       uint32_t vS0_VID_LOWER_SMIO_CNTL;
-};
-
-#define FIJI_MAX_LEAKAGE_COUNT  8
-struct fiji_leakage_voltage {
-       uint16_t  count;
-       uint16_t  leakage_id[FIJI_MAX_LEAKAGE_COUNT];
-       uint16_t  actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
-};
-
-struct fiji_vbios_boot_state {
-       uint16_t    mvdd_bootup_value;
-       uint16_t    vddc_bootup_value;
-       uint16_t    vddci_bootup_value;
-       uint32_t    sclk_bootup_value;
-       uint32_t    mclk_bootup_value;
-       uint16_t    pcie_gen_bootup_value;
-       uint16_t    pcie_lane_bootup_value;
-};
-
-struct fiji_bacos {
-       uint32_t                       best_match;
-       uint32_t                       baco_flags;
-       struct fiji_performance_level  performance_level;
-};
-
-/* Ultra Low Voltage parameter structure */
-struct fiji_ulv_parm {
-       bool                           ulv_supported;
-       uint32_t                       cg_ulv_parameter;
-       uint32_t                       ulv_volt_change_delay;
-       struct fiji_performance_level  ulv_power_level;
-};
-
-struct fiji_display_timing {
-       uint32_t  min_clock_in_sr;
-       uint32_t  num_existing_displays;
-};
-
-struct fiji_dpmlevel_enable_mask {
-       uint32_t  uvd_dpm_enable_mask;
-       uint32_t  vce_dpm_enable_mask;
-       uint32_t  acp_dpm_enable_mask;
-       uint32_t  samu_dpm_enable_mask;
-       uint32_t  sclk_dpm_enable_mask;
-       uint32_t  mclk_dpm_enable_mask;
-       uint32_t  pcie_dpm_enable_mask;
-};
-
-struct fiji_pcie_perf_range {
-       uint16_t  max;
-       uint16_t  min;
-};
-
-struct fiji_hwmgr {
-       struct fiji_dpm_table                   dpm_table;
-       struct fiji_dpm_table                   golden_dpm_table;
-
-       uint32_t                                                voting_rights_clients0;
-       uint32_t                                                voting_rights_clients1;
-       uint32_t                                                voting_rights_clients2;
-       uint32_t                                                voting_rights_clients3;
-       uint32_t                                                voting_rights_clients4;
-       uint32_t                                                voting_rights_clients5;
-       uint32_t                                                voting_rights_clients6;
-       uint32_t                                                voting_rights_clients7;
-       uint32_t                                                static_screen_threshold_unit;
-       uint32_t                                                static_screen_threshold;
-       uint32_t                                                voltage_control;
-       uint32_t                                                vddc_vddci_delta;
-
-       uint32_t                                                active_auto_throttle_sources;
-
-       struct fiji_clock_registers            clock_registers;
-       struct fiji_voltage_smio_registers      voltage_smio_registers;
-
-       bool                           is_memory_gddr5;
-       uint16_t                       acpi_vddc;
-       bool                           pspp_notify_required;
-       uint16_t                       force_pcie_gen;
-       uint16_t                       acpi_pcie_gen;
-       uint32_t                       pcie_gen_cap;
-       uint32_t                       pcie_lane_cap;
-       uint32_t                       pcie_spc_cap;
-       struct fiji_leakage_voltage          vddc_leakage;
-       struct fiji_leakage_voltage          Vddci_leakage;
-
-       uint32_t                             mvdd_control;
-       uint32_t                             vddc_mask_low;
-       uint32_t                             mvdd_mask_low;
-       uint16_t                            max_vddc_in_pptable;
-       uint16_t                            min_vddc_in_pptable;
-       uint16_t                            max_vddci_in_pptable;
-       uint16_t                            min_vddci_in_pptable;
-       uint32_t                             mclk_strobe_mode_threshold;
-       uint32_t                             mclk_stutter_mode_threshold;
-       uint32_t                             mclk_edc_enable_threshold;
-       uint32_t                             mclk_edcwr_enable_threshold;
-       bool                                is_uvd_enabled;
-       struct fiji_vbios_boot_state        vbios_boot_state;
-
-       bool                           battery_state;
-       bool                           is_tlu_enabled;
-
-       /* ---- SMC SRAM Address of firmware header tables ---- */
-       uint32_t                             sram_end;
-       uint32_t                             dpm_table_start;
-       uint32_t                             soft_regs_start;
-       uint32_t                             mc_reg_table_start;
-       uint32_t                             fan_table_start;
-       uint32_t                             arb_table_start;
-       struct SMU73_Discrete_DpmTable       smc_state_table;
-       struct SMU73_Discrete_Ulv            ulv_setting;
-
-       /* ---- Stuff originally coming from Evergreen ---- */
-       uint32_t                             vddci_control;
-       struct pp_atomctrl_voltage_table     vddc_voltage_table;
-       struct pp_atomctrl_voltage_table     vddci_voltage_table;
-       struct pp_atomctrl_voltage_table     mvdd_voltage_table;
-
-       uint32_t                             mgcg_cgtt_local2;
-       uint32_t                             mgcg_cgtt_local3;
-       uint32_t                             gpio_debug;
-       uint32_t                             mc_micro_code_feature;
-       uint32_t                             highest_mclk;
-       uint16_t                             acpi_vddci;
-       uint8_t                              mvdd_high_index;
-       uint8_t                              mvdd_low_index;
-       bool                                 dll_default_on;
-       bool                                 performance_request_registered;
-
-       /* ---- Low Power Features ---- */
-       struct fiji_bacos                    bacos;
-       struct fiji_ulv_parm                 ulv;
-
-       /* ---- CAC Stuff ---- */
-       uint32_t                       cac_table_start;
-       bool                           cac_configuration_required;
-       bool                           driver_calculate_cac_leakage;
-       bool                           cac_enabled;
-
-       /* ---- DPM2 Parameters ---- */
-       uint32_t                       power_containment_features;
-       bool                           enable_dte_feature;
-       bool                           enable_tdc_limit_feature;
-       bool                           enable_pkg_pwr_tracking_feature;
-       bool                           disable_uvd_power_tune_feature;
-       const struct fiji_pt_defaults  *power_tune_defaults;
-       struct SMU73_Discrete_PmFuses  power_tune_table;
-       uint32_t                       dte_tj_offset;
-       uint32_t                       fast_watermark_threshold;
-
-       /* ---- Phase Shedding ---- */
-       bool                           vddc_phase_shed_control;
-
-       /* ---- DI/DT ---- */
-       struct fiji_display_timing        display_timing;
-
-       /* ---- Thermal Temperature Setting ---- */
-       struct fiji_dpmlevel_enable_mask     dpm_level_enable_mask;
-       uint32_t                             need_update_smu7_dpm_table;
-       uint32_t                             sclk_dpm_key_disabled;
-       uint32_t                             mclk_dpm_key_disabled;
-       uint32_t                             pcie_dpm_key_disabled;
-       uint32_t                             min_engine_clocks;
-       struct fiji_pcie_perf_range          pcie_gen_performance;
-       struct fiji_pcie_perf_range          pcie_lane_performance;
-       struct fiji_pcie_perf_range          pcie_gen_power_saving;
-       struct fiji_pcie_perf_range          pcie_lane_power_saving;
-       bool                                 use_pcie_performance_levels;
-       bool                                 use_pcie_power_saving_levels;
-       uint32_t                             activity_target[SMU73_MAX_LEVELS_GRAPHICS];
-       uint32_t                             mclk_activity_target;
-       uint32_t                             mclk_dpm0_activity_target;
-       uint32_t                             low_sclk_interrupt_threshold;
-       uint32_t                             last_mclk_dpm_enable_mask;
-       bool                                 uvd_enabled;
-
-       /* ---- Power Gating States ---- */
-       bool                           uvd_power_gated;
-       bool                           vce_power_gated;
-       bool                           samu_power_gated;
-       bool                           acp_power_gated;
-       bool                           pg_acp_init;
-       bool                           frtc_enabled;
-       bool                           frtc_status_changed;
-};
-
-/* To convert to Q8.8 format for firmware */
-#define FIJI_Q88_FORMAT_CONVERSION_UNIT             256
-
-enum Fiji_I2CLineID {
-    Fiji_I2CLineID_DDC1 = 0x90,
-    Fiji_I2CLineID_DDC2 = 0x91,
-    Fiji_I2CLineID_DDC3 = 0x92,
-    Fiji_I2CLineID_DDC4 = 0x93,
-    Fiji_I2CLineID_DDC5 = 0x94,
-    Fiji_I2CLineID_DDC6 = 0x95,
-    Fiji_I2CLineID_SCLSDA = 0x96,
-    Fiji_I2CLineID_DDCVGA = 0x97
-};
-
-#define Fiji_I2C_DDC1DATA          0
-#define Fiji_I2C_DDC1CLK           1
-#define Fiji_I2C_DDC2DATA          2
-#define Fiji_I2C_DDC2CLK           3
-#define Fiji_I2C_DDC3DATA          4
-#define Fiji_I2C_DDC3CLK           5
-#define Fiji_I2C_SDA               40
-#define Fiji_I2C_SCL               41
-#define Fiji_I2C_DDC4DATA          65
-#define Fiji_I2C_DDC4CLK           66
-#define Fiji_I2C_DDC5DATA          0x48
-#define Fiji_I2C_DDC5CLK           0x49
-#define Fiji_I2C_DDC6DATA          0x4a
-#define Fiji_I2C_DDC6CLK           0x4b
-#define Fiji_I2C_DDCVGADATA        0x4c
-#define Fiji_I2C_DDCVGACLK         0x4d
-
-#define FIJI_UNUSED_GPIO_PIN       0x7F
-
-extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-
-#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
deleted file mode 100644 (file)
index 4465845..0000000
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "smumgr.h"
-#include "fiji_hwmgr.h"
-#include "fiji_powertune.h"
-#include "fiji_smumgr.h"
-#include "smu73_discrete.h"
-#include "pp_debug.h"
-
-#define VOLTAGE_SCALE  4
-#define POWERTUNE_DEFAULT_SET_MAX    1
-
-const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-               /*sviLoadLIneEn,  SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
-               {1,               0xF,             0xFD,
-               /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
-               0x19,        5,               45}
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct  phm_ppt_v1_information *table_info =
-                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t tmp = 0;
-
-       if(table_info &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID)
-               fiji_hwmgr->power_tune_defaults =
-                               &fiji_power_tune_data_set_array
-                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
-       else
-               fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
-       /* Assume disabled */
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_CAC);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SQRamping);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_DBRamping);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_TDRamping);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_TCPRamping);
-
-       fiji_hwmgr->dte_tj_offset = tmp;
-
-       if (!tmp) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_CAC);
-
-               fiji_hwmgr->fast_watermark_threshold = 100;
-
-               if (hwmgr->powercontainment_enabled) {
-                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                   PHM_PlatformCaps_PowerContainment);
-                       tmp = 1;
-                       fiji_hwmgr->enable_dte_feature = tmp ? false : true;
-                       fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
-                       fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
-               }
-       }
-}
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- *  This is the unit expected by SMC firmware
- */
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
-       uint32_t tmp;
-       tmp = raw_setting * 4096 / 100;
-       return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
-{
-       switch (line) {
-       case Fiji_I2CLineID_DDC1 :
-               *scl = Fiji_I2C_DDC1CLK;
-               *sda = Fiji_I2C_DDC1DATA;
-               break;
-       case Fiji_I2CLineID_DDC2 :
-               *scl = Fiji_I2C_DDC2CLK;
-               *sda = Fiji_I2C_DDC2DATA;
-               break;
-       case Fiji_I2CLineID_DDC3 :
-               *scl = Fiji_I2C_DDC3CLK;
-               *sda = Fiji_I2C_DDC3DATA;
-               break;
-       case Fiji_I2CLineID_DDC4 :
-               *scl = Fiji_I2C_DDC4CLK;
-               *sda = Fiji_I2C_DDC4DATA;
-               break;
-       case Fiji_I2CLineID_DDC5 :
-               *scl = Fiji_I2C_DDC5CLK;
-               *sda = Fiji_I2C_DDC5DATA;
-               break;
-       case Fiji_I2CLineID_DDC6 :
-               *scl = Fiji_I2C_DDC6CLK;
-               *sda = Fiji_I2C_DDC6DATA;
-               break;
-       case Fiji_I2CLineID_SCLSDA :
-               *scl = Fiji_I2C_SCL;
-               *sda = Fiji_I2C_SDA;
-               break;
-       case Fiji_I2CLineID_DDCVGA :
-               *scl = Fiji_I2C_DDCVGACLK;
-               *sda = Fiji_I2C_DDCVGADATA;
-               break;
-       default:
-               *scl = 0;
-               *sda = 0;
-               break;
-       }
-}
-
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-       SMU73_Discrete_DpmTable  *dpm_table = &(data->smc_state_table);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
-       struct pp_advance_fan_control_parameters *fan_table=
-                       &hwmgr->thermal_controller.advanceFanControlParameters;
-       uint8_t uc_scl, uc_sda;
-
-       /* TDP number of fraction bits are changed from 8 to 7 for Fiji
-        * as requested by SMC team
-        */
-       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
-                       (uint16_t)(cac_dtp_table->usTDP * 128));
-       dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
-                       (uint16_t)(cac_dtp_table->usTDP * 128));
-
-       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
-                       "Target Operating Temp is out of Range!",);
-
-       dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
-       dpm_table->GpuTjHyst = 8;
-
-       dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
-       /* The following are for new Fiji Multi-input fan/thermal control */
-       dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTargetOperatingTemp * 256);
-       dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitHotspot * 256);
-       dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitLiquid1 * 256);
-       dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitLiquid2 * 256);
-       dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitVrVddc * 256);
-       dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitVrMvdd * 256);
-       dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitPlx * 256);
-
-       dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainEdge));
-       dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainHotspot));
-       dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainLiquid));
-       dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainVrVddc));
-       dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
-       dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainPlx));
-       dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainHbm));
-
-       dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
-       dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
-       dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
-       dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
-       get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
-       dpm_table->Liquid_I2C_LineSCL = uc_scl;
-       dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
-       get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
-       dpm_table->Vr_I2C_LineSCL = uc_scl;
-       dpm_table->Vr_I2C_LineSDA = uc_sda;
-
-       get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
-       dpm_table->Plx_I2C_LineSCL = uc_scl;
-       dpm_table->Plx_I2C_LineSDA = uc_sda;
-
-       return 0;
-}
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-    struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-    const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
-    data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
-    data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
-    data->power_tune_table.SviLoadLineTrimVddC = 3;
-    data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-    return 0;
-}
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-       uint16_t tdc_limit;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
-       /* TDC number of fraction bits are changed from 8 to 7
-        * for Fiji as requested by SMC team
-        */
-       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
-       data->power_tune_table.TDC_VDDC_PkgLimit =
-                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-       data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-                       defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
-       data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
-       return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-       uint32_t temp;
-
-       if (fiji_read_smc_sram_dword(hwmgr->smumgr,
-                       fuse_table_offset +
-                       offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
-                       (uint32_t *)&temp, data->sram_end))
-               PP_ASSERT_WITH_CODE(false,
-                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
-                               return -EINVAL);
-       else {
-               data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
-               data->power_tune_table.LPMLTemperatureMin =
-                               (uint8_t)((temp >> 16) & 0xff);
-               data->power_tune_table.LPMLTemperatureMax =
-                               (uint8_t)((temp >> 8) & 0xff);
-               data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
-       }
-       return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
-       return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if( (hwmgr->thermal_controller.advanceFanControlParameters.
-                       usFanOutputSensitivity & (1 << 15)) ||
-                       0 == hwmgr->thermal_controller.advanceFanControlParameters.
-                       usFanOutputSensitivity )
-               hwmgr->thermal_controller.advanceFanControlParameters.
-               usFanOutputSensitivity = hwmgr->thermal_controller.
-                       advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
-       data->power_tune_table.FuzzyFan_PwmSetDelta =
-                       PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
-                                       advanceFanControlParameters.usFanOutputSensitivity);
-       return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               data->power_tune_table.GnbLPML[i] = 0;
-
-       return 0;
-}
-
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- /*   int  i, min, max;
-    struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-    uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
-    uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
-
-    min = max = pHiVID[0];
-    for (i = 0; i < 8; i++) {
-        if (0 != pHiVID[i]) {
-            if (min > pHiVID[i])
-                min = pHiVID[i];
-            if (max < pHiVID[i])
-                max = pHiVID[i];
-        }
-
-        if (0 != pLoVID[i]) {
-            if (min > pLoVID[i])
-                min = pLoVID[i];
-            if (max < pLoVID[i])
-                max = pLoVID[i];
-        }
-    }
-
-    PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
-    data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
-    data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
-*/
-    return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-       uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
-       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-       data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
-       data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
-       return 0;
-}
-
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       uint32_t pm_fuse_table_offset;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               if (fiji_read_smc_sram_dword(hwmgr->smumgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU73_Firmware_Header, PmFuseTable),
-                               &pm_fuse_table_offset, data->sram_end))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to get pm_fuse_table_offset Failed!",
-                                       return -EINVAL);
-
-               /* DW6 */
-               if (fiji_populate_svi_load_line(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate SviLoadLine Failed!",
-                                       return -EINVAL);
-               /* DW7 */
-               if (fiji_populate_tdc_limit(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
-               /* DW8 */
-               if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TdcWaterfallCtl, "
-                                       "LPMLTemperature Min and Max Failed!",
-                                       return -EINVAL);
-
-               /* DW9-DW12 */
-               if (0 != fiji_populate_temperature_scaler(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate LPMLTemperatureScaler Failed!",
-                                       return -EINVAL);
-
-               /* DW13-DW14 */
-               if(fiji_populate_fuzzy_fan(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate Fuzzy Fan Control parameters Failed!",
-                                       return -EINVAL);
-
-               /* DW15-DW18 */
-               if (fiji_populate_gnb_lpml(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate GnbLPML Failed!",
-                                       return -EINVAL);
-
-               /* DW19 */
-               if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate GnbLPML Min and Max Vid Failed!",
-                                       return -EINVAL);
-
-               /* DW20 */
-               if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
-                                       "Sidd Failed!", return -EINVAL);
-
-               if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
-                               (uint8_t *)&data->power_tune_table,
-                               sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to download PmFuseTable Failed!",
-                                       return -EINVAL);
-       }
-       return 0;
-}
-
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       int result = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_CAC)) {
-               int smc_result;
-               smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                               (uint16_t)(PPSMC_MSG_EnableCac));
-               PP_ASSERT_WITH_CODE((0 == smc_result),
-                               "Failed to enable CAC in SMC.", result = -1);
-
-        data->cac_enabled = (0 == smc_result) ? true : false;
-       }
-       return result;
-}
-
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       int result = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_CAC) && data->cac_enabled) {
-               int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                               (uint16_t)(PPSMC_MSG_DisableCac));
-               PP_ASSERT_WITH_CODE((smc_result == 0),
-                               "Failed to disable CAC in SMC.", result = -1);
-
-               data->cac_enabled = false;
-       }
-       return result;
-}
-
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if(data->power_containment_features &
-                       POWERCONTAINMENT_FEATURE_PkgPwrLimit)
-               return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_PkgPwrSetLimit, n);
-       return 0;
-}
-
-static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
-{
-       return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
-                       PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
-}
-
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int smc_result;
-       int result = 0;
-
-       data->power_containment_features = 0;
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               if (data->enable_dte_feature) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_EnableDTE));
-                       PP_ASSERT_WITH_CODE((0 == smc_result),
-                                       "Failed to enable DTE in SMC.", result = -1;);
-                       if (0 == smc_result)
-                               data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
-               }
-
-               if (data->enable_tdc_limit_feature) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_TDCLimitEnable));
-                       PP_ASSERT_WITH_CODE((0 == smc_result),
-                                       "Failed to enable TDCLimit in SMC.", result = -1;);
-                       if (0 == smc_result)
-                               data->power_containment_features |=
-                                               POWERCONTAINMENT_FEATURE_TDCLimit;
-               }
-
-               if (data->enable_pkg_pwr_tracking_feature) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
-                       PP_ASSERT_WITH_CODE((0 == smc_result),
-                                       "Failed to enable PkgPwrTracking in SMC.", result = -1;);
-                       if (0 == smc_result) {
-                               struct phm_cac_tdp_table *cac_table =
-                                               table_info->cac_dtp_table;
-                               uint32_t default_limit =
-                                       (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
-                               data->power_containment_features |=
-                                               POWERCONTAINMENT_FEATURE_PkgPwrLimit;
-
-                               if (fiji_set_power_limit(hwmgr, default_limit))
-                                       printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
-                       }
-               }
-       }
-       return result;
-}
-
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       int result = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment) &&
-                       data->power_containment_features) {
-               int smc_result;
-
-               if (data->power_containment_features &
-                               POWERCONTAINMENT_FEATURE_TDCLimit) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
-                       PP_ASSERT_WITH_CODE((smc_result == 0),
-                                       "Failed to disable TDCLimit in SMC.",
-                                       result = smc_result);
-               }
-
-               if (data->power_containment_features &
-                               POWERCONTAINMENT_FEATURE_DTE) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_DisableDTE));
-                       PP_ASSERT_WITH_CODE((smc_result == 0),
-                                       "Failed to disable DTE in SMC.",
-                                       result = smc_result);
-               }
-
-               if (data->power_containment_features &
-                               POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
-                       PP_ASSERT_WITH_CODE((smc_result == 0),
-                                       "Failed to disable PkgPwrTracking in SMC.",
-                                       result = smc_result);
-               }
-               data->power_containment_features = 0;
-       }
-
-       return result;
-}
-
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
-{
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-       int adjust_percent, target_tdp;
-       int result = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               /* adjustment percentage has already been validated */
-               adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
-                               hwmgr->platform_descriptor.TDPAdjustment :
-                               (-1 * hwmgr->platform_descriptor.TDPAdjustment);
-               /* SMC requested that target_tdp to be 7 bit fraction in DPM table
-                * but message to be 8 bit fraction for messages
-                */
-               target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
-               result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
-       }
-
-       return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
deleted file mode 100644 (file)
index fec7724..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_POWERTUNE_H
-#define FIJI_POWERTUNE_H
-
-enum fiji_pt_config_reg_type {
-       FIJI_CONFIGREG_MMR = 0,
-       FIJI_CONFIGREG_SMC_IND,
-       FIJI_CONFIGREG_DIDT_IND,
-       FIJI_CONFIGREG_CACHE,
-       FIJI_CONFIGREG_MAX
-};
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
-
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK             0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT           0x6
-#define DIDT_TD_CTRL0__UNUSED_0_MASK             0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT           0x6
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK            0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT          0x6
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xe0000000
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001d
-#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xe0000000
-#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001d
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xe0000000
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001d
-
-struct fiji_pt_config_reg {
-       uint32_t                           offset;
-       uint32_t                           mask;
-       uint32_t                           shift;
-       uint32_t                           value;
-       enum fiji_pt_config_reg_type       type;
-};
-
-struct fiji_pt_defaults
-{
-    uint8_t   SviLoadLineEn;
-    uint8_t   SviLoadLineVddC;
-    uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
-    uint8_t   TDC_MAWt;
-    uint8_t   TdcWaterfallCtl;
-    uint8_t   DTEAmbientTempBase;
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
-
-#endif  /* FIJI_POWERTUNE_H */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
deleted file mode 100644 (file)
index 92976b6..0000000
+++ /dev/null
@@ -1,687 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <asm/div64.h>
-#include "fiji_thermal.h"
-#include "fiji_hwmgr.h"
-#include "fiji_smumgr.h"
-#include "fiji_ppsmc.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
-               struct phm_fan_speed_info *fan_speed_info)
-{
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       fan_speed_info->supports_percent_read = true;
-       fan_speed_info->supports_percent_write = true;
-       fan_speed_info->min_percent = 0;
-       fan_speed_info->max_percent = 100;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
-               hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
-               fan_speed_info->supports_rpm_read = true;
-               fan_speed_info->supports_rpm_write = true;
-               fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
-               fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
-       } else {
-               fan_speed_info->min_rpm = 0;
-               fan_speed_info->max_rpm = 0;
-       }
-
-       return 0;
-}
-
-int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
-               uint32_t *speed)
-{
-       uint32_t duty100;
-       uint32_t duty;
-       uint64_t tmp64;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL1, FMAX_DUTY100);
-       duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
-       if (duty100 == 0)
-               return -EINVAL;
-
-
-       tmp64 = (uint64_t)duty * 100;
-       do_div(tmp64, duty100);
-       *speed = (uint32_t)tmp64;
-
-       if (*speed > 100)
-               *speed = 100;
-
-       return 0;
-}
-
-int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
-       uint32_t tach_period;
-       uint32_t crystal_clock_freq;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan ||
-                       (hwmgr->thermal_controller.fanInfo.
-                               ucTachometerPulsesPerRevolution == 0))
-               return 0;
-
-       tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_TACH_STATUS, TACH_PERIOD);
-
-       if (tach_period == 0)
-               return -EINVAL;
-
-       crystal_clock_freq = tonga_get_xclk(hwmgr);
-
-       *speed = 60 * crystal_clock_freq * 10000/ tach_period;
-
-       return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param    hwmgr  the address of the powerplay hardware manager.
-*           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
-       if (hwmgr->fan_ctrl_is_in_default_mode) {
-               hwmgr->fan_ctrl_default_mode =
-                               PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,     CGS_IND_REG__SMC,
-                                               CG_FDO_CTRL2, FDO_PWM_MODE);
-               hwmgr->tmin =
-                               PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                                               CG_FDO_CTRL2, TMIN);
-               hwmgr->fan_ctrl_is_in_default_mode = false;
-       }
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL2, TMIN, 0);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
-       return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
-       if (!hwmgr->fan_ctrl_is_in_default_mode) {
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_FDO_CTRL2, TMIN, hwmgr->tmin);
-               hwmgr->fan_ctrl_is_in_default_mode = true;
-       }
-
-       return 0;
-}
-
-int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
-               result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_FanSpeedInTableIsRPM))
-                       hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
-                                       hwmgr->thermal_controller.
-                                       advanceFanControlParameters.usMaxFanRPM);
-               else
-                       hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
-                                       hwmgr->thermal_controller.
-                                       advanceFanControlParameters.usMaxFanPWM);
-
-       } else {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
-               result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
-       }
-
-       if (!result && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ucTargetTemperature)
-               result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_SetFanTemperatureTarget,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucTargetTemperature);
-
-       return result;
-}
-
-
-int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
-}
-
-/**
-* Set Fan Speed in percent.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
-               uint32_t speed)
-{
-       uint32_t duty100;
-       uint32_t duty;
-       uint64_t tmp64;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       if (speed > 100)
-               speed = 100;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl))
-               fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (duty100 == 0)
-               return -EINVAL;
-
-       tmp64 = (uint64_t)speed * duty100;
-       do_div(tmp64, 100);
-       duty = (uint32_t)tmp64;
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
-       return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl)) {
-               result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-               if (!result)
-                       result = fiji_fan_ctrl_start_smc_fan_control(hwmgr);
-       } else
-               result = fiji_fan_ctrl_set_default_mode(hwmgr);
-
-       return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
-       uint32_t tach_period;
-       uint32_t crystal_clock_freq;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan ||
-                       (hwmgr->thermal_controller.fanInfo.
-                       ucTachometerPulsesPerRevolution == 0) ||
-                       (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
-                       (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
-               return 0;
-
-       crystal_clock_freq = tonga_get_xclk(hwmgr);
-
-       tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_TACH_STATUS, TACH_PERIOD, tach_period);
-
-       return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
-       int temp;
-
-       temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
-       /* Bit 9 means the reading is lower than the lowest usable value. */
-       if (temp & 0x200)
-               temp = FIJI_THERMAL_MAXIMUM_TEMP_READING;
-       else
-               temp = temp & 0x1ff;
-
-       temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-       return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param    hwmgr The address of the hardware manager.
-* @param    range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
-               uint32_t low_temp, uint32_t high_temp)
-{
-       uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP *
-                       PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP *
-                       PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-       if (low < low_temp)
-               low = low_temp;
-       if (high > high_temp)
-               high = high_temp;
-
-       if (low > high)
-               return -EINVAL;
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, DIG_THERM_INTH,
-                       (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, DIG_THERM_INTL,
-                       (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_CTRL, DIG_THERM_DPM,
-                       (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
-       return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-       if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_TACH_CTRL, EDGE_PER_REV,
-                               hwmgr->thermal_controller.fanInfo.
-                               ucTachometerPulsesPerRevolution - 1);
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
-       return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
-       uint32_t alert;
-
-       alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, THERM_INT_MASK);
-       alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-       /* send message to SMU to enable internal thermal interrupts */
-       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param    hwmgr The address of the hardware manager.
-*/
-static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
-       uint32_t alert;
-
-       alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, THERM_INT_MASK);
-       alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-       /* send message to SMU to disable internal thermal interrupts */
-       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param    hwmgr The address of the hardware manager.
-*/
-int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-       int result = fiji_thermal_disable_alert(hwmgr);
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               fiji_fan_ctrl_set_default_mode(hwmgr);
-
-       return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-       SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-       uint32_t duty100;
-       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-       uint16_t fdo_min, slope1, slope2;
-       uint32_t reference_clock;
-       int res;
-       uint64_t tmp64;
-
-       if (data->fan_table_start == 0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (duty100 == 0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
-                       usPWMMin * duty100;
-       do_div(tmp64, 10000);
-       fdo_min = (uint16_t)tmp64;
-
-       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-       fan_table.TempMin = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMin) / 100);
-       fan_table.TempMed = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMed) / 100);
-       fan_table.TempMax = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-       fan_table.Slope1 = cpu_to_be16(slope1);
-       fan_table.Slope2 = cpu_to_be16(slope2);
-
-       fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-       fan_table.HystDown = cpu_to_be16(hwmgr->
-                       thermal_controller.advanceFanControlParameters.ucTHyst);
-
-       fan_table.HystUp = cpu_to_be16(1);
-
-       fan_table.HystSlope = cpu_to_be16(1);
-
-       fan_table.TempRespLim = cpu_to_be16(5);
-
-       reference_clock = tonga_get_xclk(hwmgr);
-
-       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
-                       thermal_controller.advanceFanControlParameters.ulCycleDelay *
-                       reference_clock) / 1600);
-
-       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
-                       hwmgr->device, CGS_IND_REG__SMC,
-                       CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-       res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
-                       (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
-                       data->sram_end);
-
-       if (!res && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ucMinimumPWMLimit)
-               res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_SetFanMinPwm,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucMinimumPWMLimit);
-
-       if (!res && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
-               res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_SetFanSclkTarget,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
-       if (res)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-
-       return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled
- * PHM_PlatformCaps_MicrocodeFanControl even after
- * this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl)) {
-               fiji_fan_ctrl_start_smc_fan_control(hwmgr);
-               fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-       }
-
-       return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
-       if (range == NULL)
-               return -EINVAL;
-
-       return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from initialize thermal controller routine
-*/
-int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-    return fiji_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from enable alert routine
-*/
-int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       return fiji_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from disable alert routine
-*/
-static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       return fiji_thermal_disable_alert(hwmgr);
-}
-
-static const struct phm_master_table_item
-fiji_thermal_start_thermal_controller_master_list[] = {
-       {NULL, tf_fiji_thermal_initialize},
-       {NULL, tf_fiji_thermal_set_temperature_range},
-       {NULL, tf_fiji_thermal_enable_alert},
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this
- * so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
-       {NULL, tf_fiji_thermal_setup_fan_table},
-       {NULL, tf_fiji_thermal_start_smc_fan_control},
-       {NULL, NULL}
-};
-
-static const struct phm_master_table_header
-fiji_thermal_start_thermal_controller_master = {
-       0,
-       PHM_MasterTableFlag_None,
-       fiji_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item
-fiji_thermal_set_temperature_range_master_list[] = {
-       {NULL, tf_fiji_thermal_disable_alert},
-       {NULL, tf_fiji_thermal_set_temperature_range},
-       {NULL, tf_fiji_thermal_enable_alert},
-       {NULL, NULL}
-};
-
-static const struct phm_master_table_header
-fiji_thermal_set_temperature_range_master = {
-       0,
-       PHM_MasterTableFlag_None,
-       fiji_thermal_set_temperature_range_master_list
-};
-
-int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-       if (!hwmgr->thermal_controller.fanInfo.bNoFan)
-               fiji_fan_ctrl_set_default_mode(hwmgr);
-       return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param    hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       result = phm_construct_table(hwmgr,
-                       &fiji_thermal_set_temperature_range_master,
-                       &(hwmgr->set_temperature_range));
-
-       if (!result) {
-               result = phm_construct_table(hwmgr,
-                               &fiji_thermal_start_thermal_controller_master,
-                               &(hwmgr->start_thermal_controller));
-               if (result)
-                       phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
-       }
-
-       if (!result)
-               hwmgr->fan_ctrl_is_in_default_mode = true;
-       return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
deleted file mode 100644 (file)
index 8621493..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_THERMAL_H
-#define FIJI_THERMAL_H
-
-#include "hwmgr.h"
-
-#define FIJI_THERMAL_HIGH_ALERT_MASK         0x1
-#define FIJI_THERMAL_LOW_ALERT_MASK          0x2
-
-#define FIJI_THERMAL_MINIMUM_TEMP_READING    -256
-#define FIJI_THERMAL_MAXIMUM_TEMP_READING    255
-
-#define FIJI_THERMAL_MINIMUM_ALERT_TEMP      0
-#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP      255
-
-#define FDO_PWM_MODE_STATIC  1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
index 789f98ad2615027c8182ebfda29173e3b30b38db..14f8c1f4da3d7a385202f2e8ec24ee5e6ab0f3ad 100644 (file)
@@ -24,8 +24,6 @@
 #include "hwmgr.h"
 #include "hardwaremanager.h"
 #include "power_state.h"
-#include "pp_acpi.h"
-#include "amd_acpi.h"
 #include "pp_debug.h"
 
 #define PHM_FUNC_CHECK(hw) \
                        return -EINVAL;                         \
        } while (0)
 
-void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
-{
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
-
-       if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
-               acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-}
-
 bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
 {
        return hwmgr->block_hw_access;
index 27e07624ac28b5abc77c76a9ea2ad407c188ba11..1167205057b337d7f968f08d4218487c218797a9 100644 (file)
 #include "pp_debug.h"
 #include "ppatomctrl.h"
 #include "ppsmc.h"
-
-#define VOLTAGE_SCALE               4
+#include "pp_acpi.h"
+#include "amd_acpi.h"
 
 extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
+
+static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
+static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
+static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+       return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
 
 int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -56,10 +65,12 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
        hwmgr->device = pp_init->device;
        hwmgr->chip_family = pp_init->chip_family;
        hwmgr->chip_id = pp_init->chip_id;
-       hwmgr->hw_revision = pp_init->rev_id;
        hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
        hwmgr->power_source = PP_PowerSource_AC;
-       hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
+       hwmgr->pp_table_version = PP_TABLE_V1;
+
+       hwmgr_init_default_caps(hwmgr);
+       hwmgr_set_user_specify_caps(hwmgr);
 
        switch (hwmgr->chip_family) {
        case AMDGPU_FAMILY_CZ:
@@ -67,26 +78,38 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
                break;
        case AMDGPU_FAMILY_VI:
                switch (hwmgr->chip_id) {
+               case CHIP_TOPAZ:
+                       topaz_set_asic_special_caps(hwmgr);
+                       hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+                                               PP_VBI_TIME_SUPPORT_MASK |
+                                               PP_ENABLE_GFX_CG_THRU_SMU);
+                       hwmgr->pp_table_version = PP_TABLE_V0;
+                       break;
                case CHIP_TONGA:
-                       tonga_hwmgr_init(hwmgr);
+                       tonga_set_asic_special_caps(hwmgr);
+                       hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+                                               PP_VBI_TIME_SUPPORT_MASK);
                        break;
                case CHIP_FIJI:
-                       fiji_hwmgr_init(hwmgr);
+                       fiji_set_asic_special_caps(hwmgr);
+                       hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+                                               PP_VBI_TIME_SUPPORT_MASK |
+                                               PP_ENABLE_GFX_CG_THRU_SMU);
                        break;
                case CHIP_POLARIS11:
                case CHIP_POLARIS10:
-                       polaris10_hwmgr_init(hwmgr);
+                       polaris_set_asic_special_caps(hwmgr);
+                       hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
                        break;
                default:
                        return -EINVAL;
                }
+               smu7_hwmgr_init(hwmgr);
                break;
        default:
                return -EINVAL;
        }
 
-       phm_init_dynamic_caps(hwmgr);
-
        return 0;
 }
 
@@ -105,6 +128,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
        kfree(hwmgr->set_temperature_range.function_list);
 
        kfree(hwmgr->ps);
+       kfree(hwmgr->current_ps);
+       kfree(hwmgr->request_ps);
        kfree(hwmgr);
        return 0;
 }
@@ -129,10 +154,17 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
                                          sizeof(struct pp_power_state);
 
        hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
-
        if (hwmgr->ps == NULL)
                return -ENOMEM;
 
+       hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+       if (hwmgr->request_ps == NULL)
+               return -ENOMEM;
+
+       hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+       if (hwmgr->current_ps == NULL)
+               return -ENOMEM;
+
        state = hwmgr->ps;
 
        for (i = 0; i < table_entries; i++) {
@@ -140,7 +172,8 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
 
                if (state->classification.flags & PP_StateClassificationFlag_Boot) {
                        hwmgr->boot_ps = state;
-                       hwmgr->current_ps = hwmgr->request_ps = state;
+                       memcpy(hwmgr->current_ps, state, size);
+                       memcpy(hwmgr->request_ps, state, size);
                }
 
                state->id = i + 1; /* assigned unique num for every power state id */
@@ -150,6 +183,7 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
                state = (struct pp_power_state *)((unsigned long)state + size);
        }
 
+
        return 0;
 }
 
@@ -182,30 +216,6 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
        return 0;
 }
 
-int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
-                               uint32_t index, uint32_t value, uint32_t mask)
-{
-       uint32_t i;
-       uint32_t cur_value;
-
-       if (hwmgr == NULL || hwmgr->device == NULL) {
-               printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
-               return -EINVAL;
-       }
-
-       for (i = 0; i < hwmgr->usec_timeout; i++) {
-               cur_value = cgs_read_register(hwmgr->device, index);
-               if ((cur_value & mask) != (value & mask))
-                       break;
-               udelay(1);
-       }
-
-       /* timeout means wrong logic*/
-       if (i == hwmgr->usec_timeout)
-               return -1;
-       return 0;
-}
-
 
 /**
  * Returns once the part of the register indicated by the mask has
@@ -227,21 +237,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
        phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
 }
 
-void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
-                                       uint32_t indirect_port,
-                                       uint32_t index,
-                                       uint32_t value,
-                                       uint32_t mask)
-{
-       if (hwmgr == NULL || hwmgr->device == NULL) {
-               printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
-               return;
-       }
 
-       cgs_write_register(hwmgr->device, indirect_port, index);
-       phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
-                                     value, mask);
-}
 
 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
 {
@@ -403,12 +399,9 @@ int phm_reset_single_dpm_table(void *table,
 
        struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
 
-       PP_ASSERT_WITH_CODE(count <= max,
-                       "Fatal error, can not set up single DPM table entries to exceed max number!",
-                          );
+       dpm_table->count = count > max ? max : count;
 
-       dpm_table->count = count;
-       for (i = 0; i < max; i++)
+       for (i = 0; i < dpm_table->count; i++)
                dpm_table->dpm_level[i].enabled = false;
 
        return 0;
@@ -462,6 +455,27 @@ uint8_t phm_get_voltage_index(
        return i - 1;
 }
 
+uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+               uint32_t voltage)
+{
+       uint8_t count = (uint8_t) (voltage_table->count);
+       uint8_t i = 0;
+
+       PP_ASSERT_WITH_CODE((NULL != voltage_table),
+               "Voltage Table empty.", return 0;);
+       PP_ASSERT_WITH_CODE((0 != count),
+               "Voltage Table empty.", return 0;);
+
+       for (i = 0; i < count; i++) {
+               /* find first voltage bigger than requested */
+               if (voltage_table->entries[i].value >= voltage)
+                       return i;
+       }
+
+       /* voltage is bigger than max voltage in the table */
+       return i - 1;
+}
+
 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
 {
        uint32_t  i;
@@ -549,7 +563,8 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
                table_clk_vlt->entries[2].v = 810;
                table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
                table_clk_vlt->entries[3].v = 900;
-               pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+               if (pptable_info != NULL)
+                       pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
                hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
        }
 
@@ -615,3 +630,186 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
        printk(KERN_ERR "DAL requested level can not"
                        " found a available voltage in VDDC DPM Table \n");
 }
+
+void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
+{
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
+       if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
+               acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_DynamicPatchPowerState);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_DynamicPowerManagement);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_SMC);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_DynamicUVDState);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+       return;
+}
+
+int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
+{
+       if (amdgpu_sclk_deep_sleep_en)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep);
+       else
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep);
+
+       if (amdgpu_powercontainment)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                           PHM_PlatformCaps_PowerContainment);
+       else
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                           PHM_PlatformCaps_PowerContainment);
+
+       hwmgr->feature_mask = amdgpu_pp_feature_mask;
+
+       return 0;
+}
+
+int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+                               uint32_t sclk, uint16_t id, uint16_t *voltage)
+{
+       uint32_t vol;
+       int ret = 0;
+
+       if (hwmgr->chip_id < CHIP_POLARIS10) {
+               atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+               if (*voltage >= 2000 || *voltage == 0)
+                       *voltage = 1150;
+       } else {
+               ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+               *voltage = (uint16_t)vol/100;
+       }
+       return ret;
+}
+
+int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+       /* power tune caps Assume disabled */
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_SQRamping);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_DBRamping);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_TDRamping);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_TCPRamping);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                                       PHM_PlatformCaps_CAC);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_RegulatorHot);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_AutomaticDCTransition);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_TablelessHardwareInterface);
+
+       if (hwmgr->chip_id == CHIP_POLARIS11)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_SPLLShutdownSupport);
+       return 0;
+}
+
+int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SQRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_DBRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_TDRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_TCPRamping);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_TablelessHardwareInterface);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC);
+       return 0;
+}
+
+int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SQRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_DBRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_TDRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_TCPRamping);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                     PHM_PlatformCaps_UVDPowerGating);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                     PHM_PlatformCaps_VCEPowerGating);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                        PHM_PlatformCaps_TablelessHardwareInterface);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC);
+
+       return 0;
+}
+
+int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SQRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_DBRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_TDRamping);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_TCPRamping);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                        PHM_PlatformCaps_TablelessHardwareInterface);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                   PHM_PlatformCaps_EVV);
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
deleted file mode 100644 (file)
index b5edb51..0000000
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "polaris10_clockpowergating.h"
-
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cf_want_uvd_power_gating(hwmgr))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_UVDPowerOFF);
-       return 0;
-}
-
-int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cf_want_uvd_power_gating(hwmgr)) {
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                 PHM_PlatformCaps_UVDDynamicPowerGating)) {
-                       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_UVDPowerON, 1);
-               } else {
-                       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_UVDPowerON, 0);
-               }
-       }
-
-       return 0;
-}
-
-int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cf_want_vce_power_gating(hwmgr))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_VCEPowerOFF);
-       return 0;
-}
-
-int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cf_want_vce_power_gating(hwmgr))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_VCEPowerON);
-       return 0;
-}
-
-int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SamuPowerGating))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_SAMPowerOFF);
-       return 0;
-}
-
-int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SamuPowerGating))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_SAMPowerON);
-       return 0;
-}
-
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       data->uvd_power_gated = false;
-       data->vce_power_gated = false;
-       data->samu_power_gated = false;
-
-       polaris10_phm_powerup_uvd(hwmgr);
-       polaris10_phm_powerup_vce(hwmgr);
-       polaris10_phm_powerup_samu(hwmgr);
-
-       return 0;
-}
-
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (data->uvd_power_gated == bgate)
-               return 0;
-
-       data->uvd_power_gated = bgate;
-
-       if (bgate) {
-               cgs_set_clockgating_state(hwmgr->device,
-                               AMD_IP_BLOCK_TYPE_UVD,
-                               AMD_CG_STATE_GATE);
-               polaris10_update_uvd_dpm(hwmgr, true);
-               polaris10_phm_powerdown_uvd(hwmgr);
-       } else {
-               polaris10_phm_powerup_uvd(hwmgr);
-               polaris10_update_uvd_dpm(hwmgr, false);
-               cgs_set_clockgating_state(hwmgr->device,
-                               AMD_IP_BLOCK_TYPE_UVD,
-                               AMD_CG_STATE_UNGATE);
-       }
-
-       return 0;
-}
-
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (data->vce_power_gated == bgate)
-               return 0;
-
-       data->vce_power_gated = bgate;
-
-       if (bgate) {
-               cgs_set_clockgating_state(hwmgr->device,
-                               AMD_IP_BLOCK_TYPE_VCE,
-                               AMD_CG_STATE_GATE);
-               polaris10_update_vce_dpm(hwmgr, true);
-               polaris10_phm_powerdown_vce(hwmgr);
-       } else {
-               polaris10_phm_powerup_vce(hwmgr);
-               polaris10_update_vce_dpm(hwmgr, false);
-               cgs_set_clockgating_state(hwmgr->device,
-                               AMD_IP_BLOCK_TYPE_VCE,
-                               AMD_CG_STATE_UNGATE);
-       }
-       return 0;
-}
-
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (data->samu_power_gated == bgate)
-               return 0;
-
-       data->samu_power_gated = bgate;
-
-       if (bgate) {
-               polaris10_update_samu_dpm(hwmgr, true);
-               polaris10_phm_powerdown_samu(hwmgr);
-       } else {
-               polaris10_phm_powerup_samu(hwmgr);
-               polaris10_update_samu_dpm(hwmgr, false);
-       }
-
-       return 0;
-}
-
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
-                                       const uint32_t *msg_id)
-{
-       PPSMC_Msg msg;
-       uint32_t value;
-
-       switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
-       case PP_GROUP_GFX:
-               switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
-               case PP_BLOCK_GFX_CG:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_GFX_CGCG_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-                                       ? PPSMC_MSG_EnableClockGatingFeature
-                                       : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_GFX_CGLS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_GFX_3D:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_GFX_3DCG_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-
-                       if  (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_GFX_3DLS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_GFX_RLC:
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_GFX_RLC_LS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_GFX_CP:
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_GFX_CP_LS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_GFX_MG:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK |
-                                               CG_GFX_OTHERS_MGCG_MASK);
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               default:
-                       return -1;
-               }
-               break;
-
-       case PP_GROUP_SYS:
-               switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
-               case PP_BLOCK_SYS_BIF:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_BIF_MGCG_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       if  (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_BIF_MGLS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_MC:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_MC_MGCG_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_MC_MGLS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_DRM:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_DRM_MGCG_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_DRM_MGLS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_HDP:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_HDP_MGCG_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_HDP_MGLS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_SDMA:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_SDMA_MGCG_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_SDMA_MGLS_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_ROM:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
-                                               PPSMC_MSG_EnableClockGatingFeature :
-                                               PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_ROM_MASK;
-
-                               if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               default:
-                       return -1;
-
-               }
-               break;
-
-       default:
-               return -1;
-
-       }
-
-       return 0;
-}
-
-/* This function is for Polaris11 only for now,
- * Powerplay will only control the static per CU Power Gating.
- * Dynamic per CU Power Gating will be done in gfx.
- */
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
-{
-       struct cgs_system_info sys_info = {0};
-       uint32_t active_cus;
-       int result;
-
-       sys_info.size = sizeof(struct cgs_system_info);
-       sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
-
-       result = cgs_query_system_info(hwmgr->device, &sys_info);
-
-       if (result)
-               return -EINVAL;
-       else
-               active_cus = sys_info.value;
-
-       if (enable)
-               return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
-       else
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_GFX_CU_PG_DISABLE);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
deleted file mode 100644 (file)
index 88d68cb..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
-#define _POLARIS10_CLOCK_POWER_GATING_H_
-
-#include "polaris10_hwmgr.h"
-#include "pp_asicblocks.h"
-
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
-                                       const uint32_t *msg_id);
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
-
-#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
deleted file mode 100644 (file)
index f78ffd9..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef POLARIS10_DYN_DEFAULTS_H
-#define POLARIS10_DYN_DEFAULTS_H
-
-
-enum Polaris10dpm_TrendDetection {
-       Polaris10Adpm_TrendDetection_AUTO,
-       Polaris10Adpm_TrendDetection_UP,
-       Polaris10Adpm_TrendDetection_DOWN
-};
-typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
-
-/*  We need to fill in the default values */
-
-
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0              0x3FFFC102
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1              0x000400
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2              0xC00080
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3              0xC00200
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4              0xC01680
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5              0xC00033
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6              0xC00033
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7              0x3FFFC000
-
-
-#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT            0x200
-#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT        0
-#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT            0x00C8
-#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200
-#define PPPOLARIS10_REFERENCEDIVIDER_DFLT                  4
-
-#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT             1687
-
-#define PPPOLARIS10_CGULVPARAMETER_DFLT                    0x00040035
-#define PPPOLARIS10_CGULVCONTROL_DFLT                      0x00007450
-#define PPPOLARIS10_TARGETACTIVITY_DFLT                     50
-#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT                10
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
deleted file mode 100644 (file)
index 769636a..0000000
+++ /dev/null
@@ -1,5290 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <asm/div64.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_powertune.h"
-#include "polaris10_dyn_defaults.h"
-#include "polaris10_smumgr.h"
-#include "pp_debug.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "tonga_pptable.h"
-#include "pppcielanes.h"
-#include "amd_pcie_helpers.h"
-#include "hardwaremanager.h"
-#include "tonga_processpptables.h"
-#include "cgs_common.h"
-#include "smu74.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu74_discrete.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "oss/oss_3_0_d.h"
-#include "gca/gfx_8_0_d.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-#include "polaris10_thermal.h"
-#include "polaris10_clockpowergating.h"
-
-#define MC_CG_ARB_FREQ_F0           0x0a
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define MC_CG_ARB_FREQ_F2           0x0c
-#define MC_CG_ARB_FREQ_F3           0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0       0x05
-#define MC_CG_SEQ_DRAMCONF_S1       0x06
-#define MC_CG_SEQ_YCLK_SUSPEND      0x04
-#define MC_CG_SEQ_YCLK_RESUME       0x0a
-
-
-#define SMC_RAM_END 0x40000
-
-#define SMC_CG_IND_START            0xc0030000
-#define SMC_CG_IND_END              0xc0040000
-
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-
-#define VDDC_VDDCI_DELTA            200
-
-#define MEM_FREQ_LOW_LATENCY        25000
-#define MEM_FREQ_HIGH_LATENCY       80000
-
-#define MEM_LATENCY_HIGH            45
-#define MEM_LATENCY_LOW             35
-#define MEM_LATENCY_ERR             0xFFFF
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-
-#define PCIE_BUS_CLK                10000
-#define TCLK                        (PCIE_BUS_CLK / 10)
-
-
-static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/*  [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-  { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/*  [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
-       DPM_EVENT_SRC_ANALOG = 0,
-       DPM_EVENT_SRC_EXTERNAL = 1,
-       DPM_EVENT_SRC_DIGITAL = 2,
-       DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
-       DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
-};
-
-static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct polaris10_power_state *cast_phw_polaris10_power_state(
-                                 struct pp_hw_power_state *hw_ps)
-{
-       PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
-                               "Invalid Powerstate Type!",
-                                return NULL);
-
-       return (struct polaris10_power_state *)hw_ps;
-}
-
-const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
-                                const struct pp_hw_power_state *hw_ps)
-{
-       PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
-                               "Invalid Powerstate Type!",
-                                return NULL);
-
-       return (const struct polaris10_power_state *)hw_ps;
-}
-
-static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
-                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
-                       ? true : false;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
-{
-       cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
-       hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
-       return 0;
-}
-
-uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
-       uint32_t speedCntl = 0;
-
-       /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-       speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
-                       ixPCIE_LC_SPEED_CNTL);
-       return((uint16_t)PHM_GET_FIELD(speedCntl,
-                       PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
-       uint32_t link_width;
-
-       /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-       link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
-                       PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
-       PP_ASSERT_WITH_CODE((7 >= link_width),
-                       "Invalid PCIe lane width!", return 0);
-
-       return decode_pcie_lane_width(link_width);
-}
-
-/**
-* Enable voltage control
-*
-* @param    pHwMgr  the address of the powerplay hardware manager.
-* @return   always PP_Result_OK
-*/
-int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
-{
-       PP_ASSERT_WITH_CODE(
-               (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
-               "Failed to enable voltage DPM during DPM Start Function!",
-               return 1;
-       );
-
-       return 0;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-*/
-static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr)
-{
-       const struct polaris10_hwmgr *data =
-                       (const struct polaris10_hwmgr *)(hwmgr->backend);
-
-       return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
-       /* enable voltage control */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
-       return 0;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)hwmgr->pptable;
-       int result;
-
-       if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               result = atomctrl_get_voltage_table_v3(hwmgr,
-                               VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
-                               &(data->mvdd_voltage_table));
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve MVDD table.",
-                               return result);
-       } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-               result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
-                               table_info->vdd_dep_on_mclk);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve SVI2 MVDD table from dependancy table.",
-                               return result;);
-       }
-
-       if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               result = atomctrl_get_voltage_table_v3(hwmgr,
-                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
-                               &(data->vddci_voltage_table));
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve VDDCI table.",
-                               return result);
-       } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-               result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
-                               table_info->vdd_dep_on_mclk);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve SVI2 VDDCI table from dependancy table.",
-                               return result);
-       }
-
-       if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
-                               table_info->vddc_lookup_table);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to retrieve SVI2 VDDC table from lookup table.",
-                               return result);
-       }
-
-       PP_ASSERT_WITH_CODE(
-                       (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
-                       "Too many voltage values for VDDC. Trimming to fit state table.",
-                       phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
-                                                               &(data->vddc_voltage_table)));
-
-       PP_ASSERT_WITH_CODE(
-                       (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
-                       "Too many voltage values for VDDCI. Trimming to fit state table.",
-                       phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
-                                       &(data->vddci_voltage_table)));
-
-       PP_ASSERT_WITH_CODE(
-                       (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
-                       "Too many voltage values for MVDD. Trimming to fit state table.",
-                       phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
-                                                          &(data->mvdd_voltage_table)));
-
-       return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_program_static_screen_threshold_parameters(
-                                                       struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       /* Set static screen threshold unit */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
-                       data->static_screen_threshold_unit);
-       /* Set static screen threshold */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
-                       data->static_screen_threshold);
-
-       return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
-       uint32_t display_gap =
-                       cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixCG_DISPLAY_GAP_CNTL);
-
-       display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
-                       DISP_GAP, DISPLAY_GAP_IGNORE);
-
-       display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
-                       DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-       return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       /* Clear reset for voting clients before enabling DPM */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
-       return 0;
-}
-
-static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
-       /* Reset voting clients before disabling DPM */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_0, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_1, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_2, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_3, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_4, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_5, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_6, 0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_FREQ_TRAN_VOTING_7, 0);
-
-       return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-       uint32_t tmp;
-       int result;
-       bool error = false;
-
-       result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, DpmTable),
-                       &tmp, data->sram_end);
-
-       if (0 == result)
-               data->dpm_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, SoftRegisters),
-                       &tmp, data->sram_end);
-
-       if (!result) {
-               data->soft_regs_start = tmp;
-               smu_data->soft_regs_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, mcRegisterTable),
-                       &tmp, data->sram_end);
-
-       if (!result)
-               data->mc_reg_table_start = tmp;
-
-       result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, FanTable),
-                       &tmp, data->sram_end);
-
-       if (!result)
-               data->fan_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
-                       &tmp, data->sram_end);
-
-       if (!result)
-               data->arb_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, Version),
-                       &tmp, data->sram_end);
-
-       if (!result)
-               hwmgr->microcode_version_info.SMC = tmp;
-
-       error |= (0 != result);
-
-       return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
-               uint32_t arb_src, uint32_t arb_dest)
-{
-       uint32_t mc_arb_dram_timing;
-       uint32_t mc_arb_dram_timing2;
-       uint32_t burst_time;
-       uint32_t mc_cg_config;
-
-       switch (arb_src) {
-       case MC_CG_ARB_FREQ_F0:
-               mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-               mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-               burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-               break;
-       case MC_CG_ARB_FREQ_F1:
-               mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
-               mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
-               burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       switch (arb_dest) {
-       case MC_CG_ARB_FREQ_F0:
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
-               PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
-               break;
-       case MC_CG_ARB_FREQ_F1:
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
-               PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
-       mc_cg_config |= 0x0000000F;
-       cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
-       PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
-       return 0;
-}
-
-static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
-       return polaris10_copy_and_switch_arb_sets(hwmgr,
-                       MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
-       uint32_t tmp;
-
-       tmp = (cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
-                       0x0000ff00) >> 8;
-
-       if (tmp == MC_CG_ARB_FREQ_F0)
-               return 0;
-
-       return polaris10_copy_and_switch_arb_sets(hwmgr,
-                       tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-       uint32_t i, max_entry;
-
-       PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
-                       data->use_pcie_power_saving_levels), "No pcie performance levels!",
-                       return -EINVAL);
-
-       if (data->use_pcie_performance_levels &&
-                       !data->use_pcie_power_saving_levels) {
-               data->pcie_gen_power_saving = data->pcie_gen_performance;
-               data->pcie_lane_power_saving = data->pcie_lane_performance;
-       } else if (!data->use_pcie_performance_levels &&
-                       data->use_pcie_power_saving_levels) {
-               data->pcie_gen_performance = data->pcie_gen_power_saving;
-               data->pcie_lane_performance = data->pcie_lane_power_saving;
-       }
-
-       phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
-                                       SMU74_MAX_LEVELS_LINK,
-                                       MAX_REGULAR_DPM_NUMBER);
-
-       if (pcie_table != NULL) {
-               /* max_entry is used to make sure we reserve one PCIE level
-                * for boot level (fix for A+A PSPP issue).
-                * If PCIE table from PPTable have ULV entry + 8 entries,
-                * then ignore the last entry.*/
-               max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
-                               SMU74_MAX_LEVELS_LINK : pcie_table->count;
-               for (i = 1; i < max_entry; i++) {
-                       phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
-                                       get_pcie_gen_support(data->pcie_gen_cap,
-                                                       pcie_table->entries[i].gen_speed),
-                                       get_pcie_lane_support(data->pcie_lane_cap,
-                                                       pcie_table->entries[i].lane_width));
-               }
-               data->dpm_table.pcie_speed_table.count = max_entry - 1;
-
-               /* Setup BIF_SCLK levels */
-               for (i = 0; i < max_entry; i++)
-                       data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
-       } else {
-               /* Hardcode Pcie Table */
-               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Min_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Min_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Max_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Max_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Max_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
-                               get_pcie_gen_support(data->pcie_gen_cap,
-                                               PP_Max_PCIEGen),
-                               get_pcie_lane_support(data->pcie_lane_cap,
-                                               PP_Max_PCIELane));
-
-               data->dpm_table.pcie_speed_table.count = 6;
-       }
-       /* Populate last level for boot PCIE level, but do not increment count. */
-       phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
-                       data->dpm_table.pcie_speed_table.count,
-                       get_pcie_gen_support(data->pcie_gen_cap,
-                                       PP_Min_PCIEGen),
-                       get_pcie_lane_support(data->pcie_lane_cap,
-                                       PP_Max_PCIELane));
-
-       return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i;
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
-                       table_info->vdd_dep_on_sclk;
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-                       table_info->vdd_dep_on_mclk;
-
-       PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
-                       "SCLK dependency table is missing. This table is mandatory",
-                       return -EINVAL);
-       PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
-                       "SCLK dependency table has to have is missing."
-                       "This table is mandatory",
-                       return -EINVAL);
-
-       PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
-                       "MCLK dependency table is missing. This table is mandatory",
-                       return -EINVAL);
-       PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
-                       "MCLK dependency table has to have is missing."
-                       "This table is mandatory",
-                       return -EINVAL);
-
-       /* clear the state table to reset everything to default */
-       phm_reset_single_dpm_table(
-                       &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
-       phm_reset_single_dpm_table(
-                       &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
-
-
-       /* Initialize Sclk DPM table based on allow Sclk values */
-       data->dpm_table.sclk_table.count = 0;
-       for (i = 0; i < dep_sclk_table->count; i++) {
-               if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
-                                               dep_sclk_table->entries[i].clk) {
-
-                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
-                                       dep_sclk_table->entries[i].clk;
-
-                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
-                                       (i == 0) ? true : false;
-                       data->dpm_table.sclk_table.count++;
-               }
-       }
-
-       /* Initialize Mclk DPM table based on allow Mclk values */
-       data->dpm_table.mclk_table.count = 0;
-       for (i = 0; i < dep_mclk_table->count; i++) {
-               if (i == 0 || data->dpm_table.mclk_table.dpm_levels
-                               [data->dpm_table.mclk_table.count - 1].value !=
-                                               dep_mclk_table->entries[i].clk) {
-                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
-                                                       dep_mclk_table->entries[i].clk;
-                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
-                                                       (i == 0) ? true : false;
-                       data->dpm_table.mclk_table.count++;
-               }
-       }
-
-       /* setup PCIE gen speed levels */
-       polaris10_setup_default_pcie_table(hwmgr);
-
-       /* save a copy of the default DPM table */
-       memcpy(&(data->golden_dpm_table), &(data->dpm_table),
-                       sizeof(struct polaris10_dpm_table));
-
-       return 0;
-}
-
-uint8_t convert_to_vid(uint16_t vddc)
-{
-       return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
-                       SMU74_Discrete_DpmTable *table)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t count, level;
-
-       if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               count = data->mvdd_voltage_table.count;
-               if (count > SMU_MAX_SMIO_LEVELS)
-                       count = SMU_MAX_SMIO_LEVELS;
-               for (level = 0; level < count; level++) {
-                       table->SmioTable2.Pattern[level].Voltage =
-                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
-                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
-                       table->SmioTable2.Pattern[level].Smio =
-                               (uint8_t) level;
-                       table->Smio[level] |=
-                               data->mvdd_voltage_table.entries[level].smio_low;
-               }
-               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
-               table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
-       }
-
-       return 0;
-}
-
-static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
-                                       struct SMU74_Discrete_DpmTable *table)
-{
-       uint32_t count, level;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       count = data->vddci_voltage_table.count;
-
-       if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               if (count > SMU_MAX_SMIO_LEVELS)
-                       count = SMU_MAX_SMIO_LEVELS;
-               for (level = 0; level < count; ++level) {
-                       table->SmioTable1.Pattern[level].Voltage =
-                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
-                       table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
-
-                       table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
-               }
-       }
-
-       table->SmioMask1 = data->vddci_voltage_table.mask_low;
-
-       return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    table  the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       uint32_t count;
-       uint8_t index;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
-                       table_info->vddc_lookup_table;
-       /* tables is already swapped, so in order to use the value from it,
-        * we need to swap it back.
-        * We are populating vddc CAC data to BapmVddc table
-        * in split and merged mode
-        */
-       for (count = 0; count < lookup_table->count; count++) {
-               index = phm_get_voltage_index(lookup_table,
-                               data->vddc_voltage_table.entries[count].value);
-               table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
-               table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
-               table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
-       }
-
-       return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always  0
-*/
-
-int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       polaris10_populate_smc_vddci_table(hwmgr, table);
-       polaris10_populate_smc_mvdd_table(hwmgr, table);
-       polaris10_populate_cac_table(hwmgr, table);
-
-       return 0;
-}
-
-static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_Ulv *state)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       state->CcPwrDynRm = 0;
-       state->CcPwrDynRm1 = 0;
-
-       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
-       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
-                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
-       state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
-       return 0;
-}
-
-static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-       int i;
-
-       /* Index (dpm_table->pcie_speed_table.count)
-        * is reserved for PCIE boot level. */
-       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-               table->LinkLevel[i].PcieGenSpeed  =
-                               (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-               table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
-                               dpm_table->pcie_speed_table.dpm_levels[i].param1);
-               table->LinkLevel[i].EnabledForActivity = 1;
-               table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
-               table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
-               table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
-       }
-
-       data->smc_state_table.LinkLevelCount =
-                       (uint8_t)dpm_table->pcie_speed_table.count;
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-       return 0;
-}
-
-static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr)
-{
-       uint32_t reference_clock, tmp;
-       struct cgs_display_info info = {0};
-       struct cgs_mode_info mode_info;
-
-       info.mode_info = &mode_info;
-
-       tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
-       if (tmp)
-               return TCLK;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-       reference_clock = mode_info.ref_clock;
-
-       tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
-       if (0 != tmp)
-               return reference_clock / 4;
-
-       return reference_clock;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    clock  the engine clock to use to populate the structure
-* @param    sclk   the SMC SCLK structure to be populated
-*/
-static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-               uint32_t clock, SMU_SclkSetting *sclk_setting)
-{
-       const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
-       struct pp_atomctrl_clock_dividers_ai dividers;
-
-       uint32_t ref_clock;
-       uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
-       uint8_t i;
-       int result;
-       uint64_t temp;
-
-       sclk_setting->SclkFrequency = clock;
-       /* get the engine clock dividers for this clock value */
-       result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
-       if (result == 0) {
-               sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
-               sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
-               sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
-               sclk_setting->PllRange = dividers.ucSclkPllRange;
-               sclk_setting->Sclk_slew_rate = 0x400;
-               sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
-               sclk_setting->Pcc_down_slew_rate = 0xffff;
-               sclk_setting->SSc_En = dividers.ucSscEnable;
-               sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
-               sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
-               sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
-               return result;
-       }
-
-       ref_clock = polaris10_get_xclk(hwmgr);
-
-       for (i = 0; i < NUM_SCLK_RANGE; i++) {
-               if (clock > data->range_table[i].trans_lower_frequency
-               && clock <= data->range_table[i].trans_upper_frequency) {
-                       sclk_setting->PllRange = i;
-                       break;
-               }
-       }
-
-       sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-       temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
-       temp <<= 0x10;
-       do_div(temp, ref_clock);
-       sclk_setting->Fcw_frac = temp & 0xffff;
-
-       pcc_target_percent = 10; /*  Hardcode 10% for now. */
-       pcc_target_freq = clock - (clock * pcc_target_percent / 100);
-       sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-
-       ss_target_percent = 2; /*  Hardcode 2% for now. */
-       sclk_setting->SSc_En = 0;
-       if (ss_target_percent) {
-               sclk_setting->SSc_En = 1;
-               ss_target_freq = clock - (clock * ss_target_percent / 100);
-               sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-               temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
-               temp <<= 0x10;
-               do_div(temp, ref_clock);
-               sclk_setting->Fcw1_frac = temp & 0xffff;
-       }
-
-       return 0;
-}
-
-static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-               struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
-               uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
-       uint32_t i;
-       uint16_t vddci;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       *voltage = *mvdd = 0;
-
-       /* clock - voltage dependency table is empty table */
-       if (dep_table->count == 0)
-               return -EINVAL;
-
-       for (i = 0; i < dep_table->count; i++) {
-               /* find first sclk bigger than request */
-               if (dep_table->entries[i].clk >= clock) {
-                       *voltage |= (dep_table->entries[i].vddc *
-                                       VOLTAGE_SCALE) << VDDC_SHIFT;
-                       if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
-                               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       else if (dep_table->entries[i].vddci)
-                               *voltage |= (dep_table->entries[i].vddci *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       else {
-                               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               (dep_table->entries[i].vddc -
-                                                               (uint16_t)data->vddc_vddci_delta));
-                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       }
-
-                       if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-                               *mvdd = data->vbios_boot_state.mvdd_bootup_value *
-                                       VOLTAGE_SCALE;
-                       else if (dep_table->entries[i].mvdd)
-                               *mvdd = (uint32_t) dep_table->entries[i].mvdd *
-                                       VOLTAGE_SCALE;
-
-                       *voltage |= 1 << PHASES_SHIFT;
-                       return 0;
-               }
-       }
-
-       /* sclk is bigger than max sclk in the dependence table */
-       *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-       if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
-               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
-                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-       else if (dep_table->entries[i-1].vddci) {
-               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-                               (dep_table->entries[i].vddc -
-                                               (uint16_t)data->vddc_vddci_delta));
-               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-       }
-
-       if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-               *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
-       else if (dep_table->entries[i].mvdd)
-               *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
-       return 0;
-}
-
-static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] =
-{ {VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
-  {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
-  {VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
-  {VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
-  {VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
-  {VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
-  {VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
-  {VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
-
-static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr)
-{
-       uint32_t i, ref_clk;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
-       struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
-
-       ref_clk = polaris10_get_xclk(hwmgr);
-
-       if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
-               for (i = 0; i < NUM_SCLK_RANGE; i++) {
-                       table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
-                       table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
-                       table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
-
-                       table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
-                       table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
-
-                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
-                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
-                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
-               }
-               return;
-       }
-
-       for (i = 0; i < NUM_SCLK_RANGE; i++) {
-
-               data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
-               data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
-
-               table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
-               table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
-               table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
-
-               table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
-               table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
-
-               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
-               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
-               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
-       }
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    clock the engine clock to use to populate the structure
-* @param    sclk        the SMC SCLK structure to be populated
-*/
-
-static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, uint16_t sclk_al_threshold,
-               struct SMU74_Discrete_GraphicsLevel *level)
-{
-       int result, i, temp;
-       /* PP_Clocks minClocks; */
-       uint32_t mvdd;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       SMU_SclkSetting curr_sclk_setting = { 0 };
-
-       result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
-
-       /* populate graphics levels */
-       result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                       table_info->vdd_dep_on_sclk, clock,
-                       &level->MinVoltage, &mvdd);
-
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find VDDC voltage value for "
-                       "VDDC engine clock dependency table",
-                       return result);
-       level->ActivityLevel = sclk_al_threshold;
-
-       level->CcPwrDynRm = 0;
-       level->CcPwrDynRm1 = 0;
-       level->EnabledForActivity = 0;
-       level->EnabledForThrottle = 1;
-       level->UpHyst = 10;
-       level->DownHyst = 0;
-       level->VoltageDownHyst = 0;
-       level->PowerThrottle = 0;
-
-       /*
-       * TODO: get minimum clocks from dal configaration
-       * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
-       */
-       /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
-
-       /* get level->DeepSleepDivId
-       if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
-               level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
-       */
-       PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
-       for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
-               temp = clock >> i;
-
-               if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
-                       break;
-       }
-
-       level->DeepSleepDivId = i;
-
-       /* Default to slow, highest DPM level will be
-        * set to PPSMC_DISPLAY_WATERMARK_LOW later.
-        */
-       if (data->update_up_hyst)
-               level->UpHyst = (uint8_t)data->up_hyst;
-       if (data->update_down_hyst)
-               level->DownHyst = (uint8_t)data->down_hyst;
-
-       level->SclkSetting = curr_sclk_setting;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-       CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
-       return 0;
-}
-
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-       uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
-       int result = 0;
-       uint32_t array = data->dpm_table_start +
-                       offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
-                       SMU74_MAX_LEVELS_GRAPHICS;
-       struct SMU74_Discrete_GraphicsLevel *levels =
-                       data->smc_state_table.GraphicsLevel;
-       uint32_t i, max_entry;
-       uint8_t hightest_pcie_level_enabled = 0,
-               lowest_pcie_level_enabled = 0,
-               mid_pcie_level_enabled = 0,
-               count = 0;
-
-       polaris10_get_sclk_range_table(hwmgr);
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-
-               result = polaris10_populate_single_graphic_level(hwmgr,
-                               dpm_table->sclk_table.dpm_levels[i].value,
-                               (uint16_t)data->activity_target[i],
-                               &(data->smc_state_table.GraphicsLevel[i]));
-               if (result)
-                       return result;
-
-               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-               if (i > 1)
-                       levels[i].DeepSleepDivId = 0;
-       }
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_SPLLShutdownSupport))
-               data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
-
-       data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-       data->smc_state_table.GraphicsDpmLevelCount =
-                       (uint8_t)dpm_table->sclk_table.count;
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-
-       if (pcie_table != NULL) {
-               PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
-                               "There must be 1 or more PCIE levels defined in PPTable.",
-                               return -EINVAL);
-               max_entry = pcie_entry_cnt - 1;
-               for (i = 0; i < dpm_table->sclk_table.count; i++)
-                       levels[i].pcieDpmLevel =
-                                       (uint8_t) ((i < max_entry) ? i : max_entry);
-       } else {
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << (hightest_pcie_level_enabled + 1))) != 0))
-                       hightest_pcie_level_enabled++;
-
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << lowest_pcie_level_enabled)) == 0))
-                       lowest_pcie_level_enabled++;
-
-               while ((count < hightest_pcie_level_enabled) &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
-                       count++;
-
-               mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
-                               hightest_pcie_level_enabled ?
-                                               (lowest_pcie_level_enabled + 1 + count) :
-                                               hightest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to hightest_pcie_level_enabled */
-               for (i = 2; i < dpm_table->sclk_table.count; i++)
-                       levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to lowest_pcie_level_enabled */
-               levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to mid_pcie_level_enabled */
-               levels[1].pcieDpmLevel = mid_pcie_level_enabled;
-       }
-       /* level count will send to smc once at init smc table and never change */
-       result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
-                       (uint32_t)array_size, data->sram_end);
-
-       return result;
-}
-
-static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int result = 0;
-       struct cgs_display_info info = {0, 0, NULL};
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       if (table_info->vdd_dep_on_mclk) {
-               result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_mclk, clock,
-                               &mem_level->MinVoltage, &mem_level->MinMvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find MinVddc voltage value from memory "
-                               "VDDC voltage dependency table", return result);
-       }
-
-       mem_level->MclkFrequency = clock;
-       mem_level->EnabledForThrottle = 1;
-       mem_level->EnabledForActivity = 0;
-       mem_level->UpHyst = 0;
-       mem_level->DownHyst = 100;
-       mem_level->VoltageDownHyst = 0;
-       mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-       mem_level->StutterEnable = false;
-       mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       data->display_timing.num_existing_displays = info.display_count;
-
-       if ((data->mclk_stutter_mode_threshold) &&
-               (clock <= data->mclk_stutter_mode_threshold) &&
-               (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
-                               STUTTER_ENABLE) & 0x1))
-               mem_level->StutterEnable = true;
-
-       if (!result) {
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
-       }
-       return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-       int result;
-       /* populate MCLK dpm table to SMU7 */
-       uint32_t array = data->dpm_table_start +
-                       offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
-       uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
-                       SMU74_MAX_LEVELS_MEMORY;
-       struct SMU74_Discrete_MemoryLevel *levels =
-                       data->smc_state_table.MemoryLevel;
-       uint32_t i;
-
-       for (i = 0; i < dpm_table->mclk_table.count; i++) {
-               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-                               "can not populate memory level as memory clock is zero",
-                               return -EINVAL);
-               result = polaris10_populate_single_memory_level(hwmgr,
-                               dpm_table->mclk_table.dpm_levels[i].value,
-                               &levels[i]);
-               if (i == dpm_table->mclk_table.count - 1) {
-                       levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-                       levels[i].EnabledForActivity = 1;
-               }
-               if (result)
-                       return result;
-       }
-
-       /* In order to prevent MC activity from stutter mode to push DPM up,
-        * the UVD change complements this by putting the MCLK in
-        * a higher state by default such that we are not affected by
-        * up threshold or and MCLK DPM latency.
-        */
-       levels[0].ActivityLevel = 0x1f;
-       CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
-       data->smc_state_table.MemoryDpmLevelCount =
-                       (uint8_t)dpm_table->mclk_table.count;
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
-       /* level count will send to smc once at init smc table and never change */
-       result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
-                       (uint32_t)array_size, data->sram_end);
-
-       return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
-* @param    voltage     the SMC VOLTAGE structure to be populated
-*/
-int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
-               uint32_t mclk, SMIO_Pattern *smio_pat)
-{
-       const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i = 0;
-
-       if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-               /* find mvdd value which clock is more than request */
-               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
-                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
-                               smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
-                               break;
-                       }
-               }
-               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
-                               "MVDD Voltage is outside the supported range.",
-                               return -EINVAL);
-       } else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-               SMU74_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint32_t sclk_frequency;
-       const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       SMIO_Pattern vol_level;
-       uint32_t mvdd;
-       uint16_t us_mvdd;
-
-       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-
-       /* Get MinVoltage and Frequency from DPM0,
-        * already converted to SMC_UL */
-       sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
-       result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                       table_info->vdd_dep_on_sclk,
-                       sclk_frequency,
-                       &table->ACPILevel.MinVoltage, &mvdd);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Cannot find ACPI VDDC voltage value "
-                       "in Clock Dependency Table",
-                       );
-
-
-       result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
-       PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-       table->ACPILevel.DeepSleepDivId = 0;
-       table->ACPILevel.CcPwrDynRm = 0;
-       table->ACPILevel.CcPwrDynRm1 = 0;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
-
-
-       /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
-       table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
-       result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                       table_info->vdd_dep_on_mclk,
-                       table->MemoryACPILevel.MclkFrequency,
-                       &table->MemoryACPILevel.MinVoltage, &mvdd);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Cannot find ACPI VDDCI voltage value "
-                       "in Clock Dependency Table",
-                       );
-
-       us_mvdd = 0;
-       if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
-                       (data->mclk_dpm_key_disabled))
-               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
-       else {
-               if (!polaris10_populate_mvdd_value(hwmgr,
-                               data->dpm_table.mclk_table.dpm_levels[0].value,
-                               &vol_level))
-                       us_mvdd = vol_level.Voltage;
-       }
-
-       if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
-               table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
-       else
-               table->MemoryACPILevel.MinMvdd = 0;
-
-       table->MemoryACPILevel.StutterEnable = false;
-
-       table->MemoryACPILevel.EnabledForThrottle = 0;
-       table->MemoryACPILevel.EnabledForActivity = 0;
-       table->MemoryACPILevel.UpHyst = 0;
-       table->MemoryACPILevel.DownHyst = 100;
-       table->MemoryACPILevel.VoltageDownHyst = 0;
-       table->MemoryACPILevel.ActivityLevel =
-                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
-       return result;
-}
-
-static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-               SMU74_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->VceLevelCount = (uint8_t)(mm_table->count);
-       table->VceBootLevel = 0;
-
-       for (count = 0; count < table->VceLevelCount; count++) {
-               table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
-               table->VceLevel[count].MinVoltage = 0;
-               table->VceLevel[count].MinVoltage |=
-                               (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-
-               table->VceLevel[count].MinVoltage |=
-                               (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /*retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->VceLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for VCE engine clock",
-                               return result);
-
-               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU74_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
-               int32_t eng_clock, int32_t mem_clock,
-               SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
-       uint32_t dram_timing;
-       uint32_t dram_timing2;
-       uint32_t burst_time;
-       int result;
-
-       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-                       eng_clock, mem_clock);
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error calling VBIOS to set DRAM_TIMING.", return result);
-
-       dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-       dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-       burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-
-       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
-       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
-       arb_regs->McArbBurstTime   = (uint8_t)burst_time;
-
-       return 0;
-}
-
-static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
-       uint32_t i, j;
-       int result = 0;
-
-       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-                       result = polaris10_populate_memory_timing_parameters(hwmgr,
-                                       data->dpm_table.sclk_table.dpm_levels[i].value,
-                                       data->dpm_table.mclk_table.dpm_levels[j].value,
-                                       &arb_regs.entries[i][j]);
-                       if (result == 0)
-                               result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
-                       if (result != 0)
-                               return result;
-               }
-       }
-
-       result = polaris10_copy_bytes_to_smc(
-                       hwmgr->smumgr,
-                       data->arb_table_start,
-                       (uint8_t *)&arb_regs,
-                       sizeof(SMU74_Discrete_MCArbDramTimingTable),
-                       data->sram_end);
-       return result;
-}
-
-static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->UvdLevelCount = (uint8_t)(mm_table->count);
-       table->UvdBootLevel = 0;
-
-       for (count = 0; count < table->UvdLevelCount; count++) {
-               table->UvdLevel[count].MinVoltage = 0;
-               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-               table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-               table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].VclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Vclk clock", return result);
-
-               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].DclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Dclk clock", return result);
-
-               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-       }
-
-       return result;
-}
-
-static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       int result = 0;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       /* find boot level from dpm table */
-       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
-                       data->vbios_boot_state.sclk_bootup_value,
-                       (uint32_t *)&(table->GraphicsBootLevel));
-
-       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
-                       data->vbios_boot_state.mclk_bootup_value,
-                       (uint32_t *)&(table->MemoryBootLevel));
-
-       table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
-                       VOLTAGE_SCALE;
-       table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
-                       VOLTAGE_SCALE;
-       table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
-                       VOLTAGE_SCALE;
-
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-       return 0;
-}
-
-
-static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint8_t count, level;
-
-       count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-
-       for (level = 0; level < count; level++) {
-               if (table_info->vdd_dep_on_sclk->entries[level].clk >=
-                               data->vbios_boot_state.sclk_bootup_value) {
-                       data->smc_state_table.GraphicsBootLevel = level;
-                       break;
-               }
-       }
-
-       count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
-       for (level = 0; level < count; level++) {
-               if (table_info->vdd_dep_on_mclk->entries[level].clk >=
-                               data->vbios_boot_state.mclk_bootup_value) {
-                       data->smc_state_table.MemoryBootLevel = level;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-       uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint8_t i, stretch_amount, volt_offset = 0;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-
-       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
-       /* Read SMU_Eefuse to read and calculate RO and determine
-        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
-        */
-       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (67 * 4));
-       efuse &= 0xFF000000;
-       efuse = efuse >> 24;
-
-       if (hwmgr->chip_id == CHIP_POLARIS10) {
-               min = 1000;
-               max = 2300;
-       } else {
-               min = 1100;
-               max = 2100;
-       }
-
-       ro = efuse * (max -min)/255 + min;
-
-       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
-       for (i = 0; i < sclk_table->count; i++) {
-               data->smc_state_table.Sclk_CKS_masterEn0_7 |=
-                               sclk_table->entries[i].cks_enable << i;
-               if (hwmgr->chip_id == CHIP_POLARIS10) {
-                       volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
-                                               (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
-                       volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
-                                       (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
-               } else {
-                       volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
-                                               (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
-                       volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
-                                       (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
-               }
-
-               if (volt_without_cks >= volt_with_cks)
-                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-                                       sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
-
-               data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
-       }
-
-       data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
-       /* Populate CKS Lookup Table */
-       if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
-                       stretch_amount != 4 && stretch_amount != 5) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ClockStretcher);
-               PP_ASSERT_WITH_CODE(false,
-                               "Stretch Amount in PPTable not supported\n",
-                               return -EINVAL);
-       }
-
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
-       value &= 0xFFFFFFFE;
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
-       return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint16_t config;
-
-       config = VR_MERGED_WITH_VDDC;
-       table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
-       /* Set Vddc Voltage Controller */
-       if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               config = VR_SVI2_PLANE_1;
-               table->VRConfig |= config;
-       } else {
-               PP_ASSERT_WITH_CODE(false,
-                               "VDDC should be on SVI2 control in merged mode!",
-                               );
-       }
-       /* Set Vddci Voltage Controller */
-       if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-               config = VR_SVI2_PLANE_2;  /* only in merged mode */
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               config = VR_SMIO_PATTERN_1;
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       } else {
-               config = VR_STATIC_VOLTAGE;
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       }
-       /* Set Mvdd Voltage Controller */
-       if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-               config = VR_SVI2_PLANE_2;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
-                       offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
-       } else {
-               config = VR_STATIC_VOLTAGE;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       }
-
-       return 0;
-}
-
-
-int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
-       int result = 0;
-       struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
-       AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
-       AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
-       uint32_t tmp, i;
-       struct pp_smumgr *smumgr = hwmgr->smumgr;
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)hwmgr->pptable;
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-
-
-       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
-               return result;
-
-       result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
-
-       if (0 == result) {
-               table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
-               table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
-               table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
-               table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
-               table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
-               table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
-               table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
-               table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
-               table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
-               table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
-               table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
-               table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
-               table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
-               table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
-               table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
-               table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
-               table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
-               AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
-               AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
-               AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
-               AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
-               AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
-               AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
-               AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
-
-               for (i = 0; i < NUM_VFT_COLUMNS; i++) {
-                       AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
-                       AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
-               }
-
-               result = polaris10_read_smc_sram_dword(smumgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
-                               &tmp, data->sram_end);
-
-               polaris10_copy_bytes_to_smc(smumgr,
-                                       tmp,
-                                       (uint8_t *)&AVFS_meanNsigma,
-                                       sizeof(AVFS_meanNsigma_t),
-                                       data->sram_end);
-
-               result = polaris10_read_smc_sram_dword(smumgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
-                               &tmp, data->sram_end);
-               polaris10_copy_bytes_to_smc(smumgr,
-                                       tmp,
-                                       (uint8_t *)&AVFS_SclkOffset,
-                                       sizeof(AVFS_Sclk_Offset_t),
-                                       data->sram_end);
-
-               data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
-                                               (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
-                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
-                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
-               data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
-       }
-       return result;
-}
-
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
-       const struct polaris10_ulv_parm *ulv = &(data->ulv);
-       uint8_t i;
-       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-       pp_atomctrl_clock_dividers_vi dividers;
-
-       result = polaris10_setup_default_dpm_tables(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to setup default DPM tables!", return result);
-
-       if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control)
-               polaris10_populate_smc_voltage_tables(hwmgr, table);
-
-       table->SystemFlags = 0;
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StepVddc))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-       if (data->is_memory_gddr5)
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-       if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
-               result = polaris10_populate_ulv_state(hwmgr, table);
-               PP_ASSERT_WITH_CODE(0 == result,
-                               "Failed to initialize ULV state!", return result);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT);
-       }
-
-       result = polaris10_populate_smc_link_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Link Level!", return result);
-
-       result = polaris10_populate_all_graphic_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Graphics Level!", return result);
-
-       result = polaris10_populate_all_memory_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Memory Level!", return result);
-
-       result = polaris10_populate_smc_acpi_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ACPI Level!", return result);
-
-       result = polaris10_populate_smc_vce_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize VCE Level!", return result);
-
-       result = polaris10_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize SAMU Level!", return result);
-
-       /* Since only the initial state is completely set up at this point
-        * (the other states are just copies of the boot state) we only
-        * need to populate the  ARB settings for the initial state.
-        */
-       result = polaris10_program_memory_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to Write ARB settings for the initial state.", return result);
-
-       result = polaris10_populate_smc_uvd_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize UVD Level!", return result);
-
-       result = polaris10_populate_smc_boot_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Boot Level!", return result);
-
-       result = polaris10_populate_smc_initailial_state(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Boot State!", return result);
-
-       result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate BAPM Parameters!", return result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ClockStretcher)) {
-               result = polaris10_populate_clock_stretcher_data_table(hwmgr);
-               PP_ASSERT_WITH_CODE(0 == result,
-                               "Failed to populate Clock Stretcher Data Table!",
-                               return result);
-       }
-
-       result = polaris10_populate_avfs_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
-
-       table->CurrSclkPllRange = 0xff;
-       table->GraphicsVoltageChangeEnable  = 1;
-       table->GraphicsThermThrottleEnable  = 1;
-       table->GraphicsInterval = 1;
-       table->VoltageInterval  = 1;
-       table->ThermalInterval  = 1;
-       table->TemperatureLimitHigh =
-                       table_info->cac_dtp_table->usTargetOperatingTemp *
-                       POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
-       table->TemperatureLimitLow  =
-                       (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-                       POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
-       table->MemoryVoltageChangeEnable = 1;
-       table->MemoryInterval = 1;
-       table->VoltageResponseTime = 0;
-       table->PhaseResponseTime = 0;
-       table->MemoryThermThrottleEnable = 1;
-       table->PCIeBootLinkLevel = 0;
-       table->PCIeGenInterval = 1;
-       table->VRConfig = 0;
-
-       result = polaris10_populate_vr_config(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate VRConfig setting!", return result);
-
-       table->ThermGpio = 17;
-       table->SclkStepSize = 0x4000;
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
-               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
-       } else {
-               table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot);
-       }
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-                       &gpio_pin)) {
-               table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_AutomaticDCTransition);
-       } else {
-               table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_AutomaticDCTransition);
-       }
-
-       /* Thermal Output GPIO */
-       if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
-                       &gpio_pin)) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ThermalOutGPIO);
-
-               table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
-               /* For porlarity read GPIOPAD_A with assigned Gpio pin
-                * since VBIOS will program this register to set 'inactive state',
-                * driver can then determine 'active state' from this and
-                * program SMU with correct polarity
-                */
-               table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
-                                       & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-               /* if required, combine VRHot/PCC with thermal out GPIO */
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
-               && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
-                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-       } else {
-               table->ThermOutGpio = 17;
-               table->ThermOutPolarity = 1;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-       }
-
-       /* Populate BIF_SCLK levels into SMC DPM table */
-       for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
-               PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
-
-               if (i == 0)
-                       table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
-               else
-                       table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
-       }
-
-       for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
-               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-       result = polaris10_copy_bytes_to_smc(hwmgr->smumgr,
-                       data->dpm_table_start +
-                       offsetof(SMU74_Discrete_DpmTable, SystemFlags),
-                       (uint8_t *)&(table->SystemFlags),
-                       sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
-                       data->sram_end);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to upload dpm data to SMC memory!", return result);
-
-       return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-       const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t tmp;
-       int result;
-
-       /* This is a read-modify-write on the first byte of the ARB table.
-        * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
-        * is the field 'current'.
-        * This solution is ugly, but we never write the whole table only
-        * individual fields in it.
-        * In reality this field should not be in that structure
-        * but in a soft register.
-        */
-       result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                       data->arb_table_start, &tmp, data->sram_end);
-
-       if (result)
-               return result;
-
-       tmp &= 0x00FFFFFF;
-       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-       return polaris10_write_smc_sram_dword(hwmgr->smumgr,
-                       data->arb_table_start, tmp, data->sram_end);
-}
-
-static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_RegulatorHot))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
-       return 0;
-}
-
-static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-                       SCLK_PWRMGT_OFF, 0);
-       return 0;
-}
-
-static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_ulv_parm *ulv = &(data->ulv);
-
-       if (ulv->ulv_supported)
-               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
-       return 0;
-}
-
-static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_ulv_parm *ulv = &(data->ulv);
-
-       if (ulv->ulv_supported)
-               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
-       return 0;
-}
-
-static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep)) {
-               if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to enable Master Deep Sleep switch failed!",
-                                       return -1);
-       } else {
-               if (smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to disable Master Deep Sleep switch failed!",
-                                       return -1);
-               }
-       }
-
-       return 0;
-}
-
-static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep)) {
-               if (smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to disable Master Deep Sleep switch failed!",
-                                       return -1);
-               }
-       }
-
-       return 0;
-}
-
-static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t soft_register_value = 0;
-       uint32_t handshake_disables_offset = data->soft_regs_start
-                               + offsetof(SMU74_SoftRegisters, HandshakeDisables);
-
-       /* enable SCLK dpm */
-       if (!data->sclk_dpm_key_disabled)
-               PP_ASSERT_WITH_CODE(
-               (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
-               "Failed to enable SCLK DPM during DPM Start Function!",
-               return -1);
-
-       /* enable MCLK dpm */
-       if (0 == data->mclk_dpm_key_disabled) {
-/* Disable UVD - SMU handshake for MCLK. */
-               soft_register_value = cgs_read_ind_register(hwmgr->device,
-                                       CGS_IND_REG__SMC, handshake_disables_offset);
-               soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               handshake_disables_offset, soft_register_value);
-
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_MCLKDPM_Enable)),
-                               "Failed to enable MCLK DPM during DPM Start Function!",
-                               return -1);
-
-               PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
-               udelay(10);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
-       }
-
-       return 0;
-}
-
-static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       /*enable general power management */
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-                       GLOBAL_PWRMGT_EN, 1);
-
-       /* enable sclk deep sleep */
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-                       DYNAMIC_PM_EN, 1);
-
-       /* prepare for PCIE DPM */
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       data->soft_regs_start + offsetof(SMU74_SoftRegisters,
-                                       VoltageChangeTimeout), 0x1000);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
-                       SWRST_COMMAND_1, RESETLC, 0x0);
-/*
-       PP_ASSERT_WITH_CODE(
-                       (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                       PPSMC_MSG_Voltage_Cntl_Enable)),
-                       "Failed to enable voltage DPM during DPM Start Function!",
-                       return -1);
-*/
-
-       if (polaris10_enable_sclk_mclk_dpm(hwmgr)) {
-               printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
-               return -1;
-       }
-
-       /* enable PCIE dpm */
-       if (0 == data->pcie_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_PCIeDPM_Enable)),
-                               "Failed to enable pcie DPM during DPM Start Function!",
-                               return -1);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_Falcon_QuickTransition)) {
-               PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_EnableACDCGPIOInterrupt)),
-                               "Failed to enable AC DC GPIO Interrupt!",
-                               );
-       }
-
-       return 0;
-}
-
-static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       /* disable SCLK dpm */
-       if (!data->sclk_dpm_key_disabled)
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_DPM_Disable) == 0),
-                               "Failed to disable SCLK DPM!",
-                               return -1);
-
-       /* disable MCLK dpm */
-       if (!data->mclk_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_MCLKDPM_Disable) == 0),
-                               "Failed to disable MCLK DPM!",
-                               return -1);
-       }
-
-       return 0;
-}
-
-static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       /* disable general power management */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-                       GLOBAL_PWRMGT_EN, 0);
-       /* disable sclk deep sleep */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-                       DYNAMIC_PM_EN, 0);
-
-       /* disable PCIE dpm */
-       if (!data->pcie_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
-                               "Failed to disable pcie DPM during DPM Stop Function!",
-                               return -1);
-       }
-
-       if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
-               printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
-               return -1;
-       }
-
-       return 0;
-}
-
-static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
-{
-       bool protection;
-       enum DPM_EVENT_SRC src;
-
-       switch (sources) {
-       default:
-               printk(KERN_ERR "Unknown throttling event sources.");
-               /* fall through */
-       case 0:
-               protection = false;
-               /* src is unused */
-               break;
-       case (1 << PHM_AutoThrottleSource_Thermal):
-               protection = true;
-               src = DPM_EVENT_SRC_DIGITAL;
-               break;
-       case (1 << PHM_AutoThrottleSource_External):
-               protection = true;
-               src = DPM_EVENT_SRC_EXTERNAL;
-               break;
-       case (1 << PHM_AutoThrottleSource_External) |
-                       (1 << PHM_AutoThrottleSource_Thermal):
-               protection = true;
-               src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
-               break;
-       }
-       /* Order matters - don't enable thermal protection for the wrong source. */
-       if (protection) {
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
-                               DPM_EVENT_SRC, src);
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-                               THERMAL_PROTECTION_DIS,
-                               !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_ThermalController));
-       } else
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-                               THERMAL_PROTECTION_DIS, 1);
-}
-
-static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
-               PHM_AutoThrottleSource source)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (!(data->active_auto_throttle_sources & (1 << source))) {
-               data->active_auto_throttle_sources |= 1 << source;
-               polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
-       }
-       return 0;
-}
-
-static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
-       return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
-               PHM_AutoThrottleSource source)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (data->active_auto_throttle_sources & (1 << source)) {
-               data->active_auto_throttle_sources &= ~(1 << source);
-               polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
-       }
-       return 0;
-}
-
-static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
-       return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       data->pcie_performance_request = true;
-
-       return 0;
-}
-
-int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-       tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "DPM is already running right now, no need to enable DPM!",
-                       return 0);
-
-       if (polaris10_voltage_control(hwmgr)) {
-               tmp_result = polaris10_enable_voltage_control(hwmgr);
-               PP_ASSERT_WITH_CODE(tmp_result == 0,
-                               "Failed to enable voltage control!",
-                               result = tmp_result);
-
-               tmp_result = polaris10_construct_voltage_tables(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == tmp_result),
-                               "Failed to contruct voltage tables!",
-                               result = tmp_result);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EngineSpreadSpectrumSupport))
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ThermalController))
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
-       tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to program static screen threshold parameters!",
-                       result = tmp_result);
-
-       tmp_result = polaris10_enable_display_gap(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable display gap!", result = tmp_result);
-
-       tmp_result = polaris10_program_voting_clients(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to program voting clients!", result = tmp_result);
-
-       tmp_result = polaris10_process_firmware_header(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to process firmware header!", result = tmp_result);
-
-       tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to initialize switch from ArbF0 to F1!",
-                       result = tmp_result);
-
-       tmp_result = polaris10_init_smc_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to initialize SMC table!", result = tmp_result);
-
-       tmp_result = polaris10_init_arb_table_index(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to initialize ARB table index!", result = tmp_result);
-
-       tmp_result = polaris10_populate_pm_fuses(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to populate PM fuses!", result = tmp_result);
-
-       tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
-       smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
-
-       tmp_result = polaris10_enable_sclk_control(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable SCLK control!", result = tmp_result);
-
-       tmp_result = polaris10_enable_smc_voltage_controller(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable voltage control!", result = tmp_result);
-
-       tmp_result = polaris10_enable_ulv(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable ULV!", result = tmp_result);
-
-       tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable deep sleep master switch!", result = tmp_result);
-
-       tmp_result = polaris10_enable_didt_config(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to enable deep sleep master switch!", result = tmp_result);
-
-       tmp_result = polaris10_start_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to start DPM!", result = tmp_result);
-
-       tmp_result = polaris10_enable_smc_cac(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable SMC CAC!", result = tmp_result);
-
-       tmp_result = polaris10_enable_power_containment(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable power containment!", result = tmp_result);
-
-       tmp_result = polaris10_power_control_set_level(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to power control set level!", result = tmp_result);
-
-       tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable thermal auto throttle!", result = tmp_result);
-
-       tmp_result = polaris10_pcie_performance_request(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "pcie performance request failed!", result = tmp_result);
-
-       return result;
-}
-
-int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
-       PP_ASSERT_WITH_CODE(tmp_result == 0,
-                       "DPM is not running right now, no need to disable DPM!",
-                       return 0);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ThermalController))
-               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
-       tmp_result = polaris10_disable_power_containment(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable power containment!", result = tmp_result);
-
-       tmp_result = polaris10_disable_smc_cac(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable SMC CAC!", result = tmp_result);
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
-       tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable thermal auto throttle!", result = tmp_result);
-
-       tmp_result = polaris10_stop_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to stop DPM!", result = tmp_result);
-
-       tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable deep sleep master switch!", result = tmp_result);
-
-       tmp_result = polaris10_disable_ulv(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to disable ULV!", result = tmp_result);
-
-       tmp_result = polaris10_clear_voting_clients(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to clear voting clients!", result = tmp_result);
-
-       tmp_result = polaris10_reset_to_default(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to reset to default!", result = tmp_result);
-
-       tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
-       PP_ASSERT_WITH_CODE((tmp_result == 0),
-                       "Failed to force to switch arbf0!", result = tmp_result);
-
-       return result;
-}
-
-int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
-{
-
-       return 0;
-}
-
-int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
-       return phm_hwmgr_backend_fini(hwmgr);
-}
-
-int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_DynamicPatchPowerState);
-
-       if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_EnableMVDDControl);
-
-       if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ControlVDDCI);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                        PHM_PlatformCaps_TablelessHardwareInterface);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EnableSMU7ThermalManagement);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_DynamicPowerManagement);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_UnTabledHardwareInterface);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_TablelessHardwareInterface);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_SMC);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_NonABMSupportInPPLib);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_DynamicUVDState);
-
-       /* power tune caps Assume disabled */
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_SQRamping);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_DBRamping);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_TDRamping);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_TCPRamping);
-
-       if (hwmgr->powercontainment_enabled)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                           PHM_PlatformCaps_PowerContainment);
-       else
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                           PHM_PlatformCaps_PowerContainment);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                                       PHM_PlatformCaps_CAC);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_RegulatorHot);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_AutomaticDCTransition);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_ODFuzzyFanControlSupport);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
-       if (hwmgr->chip_id == CHIP_POLARIS11)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_SPLLShutdownSupport);
-       return 0;
-}
-
-static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       polaris10_initialize_power_tune_defaults(hwmgr);
-
-       data->pcie_gen_performance.max = PP_PCIEGen1;
-       data->pcie_gen_performance.min = PP_PCIEGen3;
-       data->pcie_gen_power_saving.max = PP_PCIEGen1;
-       data->pcie_gen_power_saving.min = PP_PCIEGen3;
-       data->pcie_lane_performance.max = 0;
-       data->pcie_lane_performance.min = 16;
-       data->pcie_lane_power_saving.max = 0;
-       data->pcie_lane_power_saving.min = 16;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint16_t vv_id;
-       uint32_t vddc = 0;
-       uint16_t i, j;
-       uint32_t sclk = 0;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)hwmgr->pptable;
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-       int result;
-
-       for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) {
-               vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-               if (!phm_get_sclk_for_voltage_evv(hwmgr,
-                               table_info->vddc_lookup_table, vv_id, &sclk)) {
-                       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_ClockStretcher)) {
-                               for (j = 1; j < sclk_table->count; j++) {
-                                       if (sclk_table->entries[j].clk == sclk &&
-                                                       sclk_table->entries[j].cks_enable == 0) {
-                                               sclk += 5000;
-                                               break;
-                                       }
-                               }
-                       }
-
-                       if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
-                                               VOLTAGE_TYPE_VDDC,
-                                               sclk, vv_id, &vddc) != 0) {
-                               printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
-                               continue;
-                       }
-
-                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
-                        * real voltage level in unit of 0.01mv */
-                       PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
-                                       "Invalid VDDC value", result = -EINVAL;);
-
-                       /* the voltage should not be zero nor equal to leakage ID */
-                       if (vddc != 0 && vddc != vv_id) {
-                               data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
-                               data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
-                               data->vddc_leakage.count++;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to changing voltage
- * @param     pointer to leakage table
- */
-static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
-               uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table)
-{
-       uint32_t index;
-
-       /* search for leakage voltage ID 0xff01 ~ 0xff08 */
-       for (index = 0; index < leakage_table->count; index++) {
-               /* if this voltage matches a leakage voltage ID */
-               /* patch with actual leakage voltage */
-               if (leakage_table->leakage_id[index] == *voltage) {
-                       *voltage = leakage_table->actual_voltage[index];
-                       break;
-               }
-       }
-
-       if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
-               printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param     hwmgr  the address of the powerplay hardware manager.
-* @param     pointer to voltage lookup table
-* @param     pointer to leakage table
-* @return     always 0
-*/
-static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_voltage_lookup_table *lookup_table,
-               struct polaris10_leakage_voltage *leakage_table)
-{
-       uint32_t i;
-
-       for (i = 0; i < lookup_table->count; i++)
-               polaris10_patch_with_vdd_leakage(hwmgr,
-                               &lookup_table->entries[i].us_vdd, leakage_table);
-
-       return 0;
-}
-
-static int polaris10_patch_clock_voltage_limits_with_vddc_leakage(
-               struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table,
-               uint16_t *vddc)
-{
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
-       hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
-                       table_info->max_clock_voltage_on_dc.vddc;
-       return 0;
-}
-
-static int polaris10_patch_voltage_dependency_tables_with_lookup_table(
-               struct pp_hwmgr *hwmgr)
-{
-       uint8_t entryId;
-       uint8_t voltageId;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-       struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
-                       table_info->vdd_dep_on_mclk;
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-
-       for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-               voltageId = sclk_table->entries[entryId].vddInd;
-               sclk_table->entries[entryId].vddc =
-                               table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-       }
-
-       for (entryId = 0; entryId < mclk_table->count; ++entryId) {
-               voltageId = mclk_table->entries[entryId].vddInd;
-               mclk_table->entries[entryId].vddc =
-                       table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-       }
-
-       for (entryId = 0; entryId < mm_table->count; ++entryId) {
-               voltageId = mm_table->entries[entryId].vddcInd;
-               mm_table->entries[entryId].vddc =
-                       table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-       }
-
-       return 0;
-
-}
-
-static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-       /* Need to determine if we need calculated voltage. */
-       return 0;
-}
-
-static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
-       /* Need to determine if we need calculated voltage from mm table. */
-       return 0;
-}
-
-static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr,
-               struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
-       uint32_t table_size, i, j;
-       struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
-       table_size = lookup_table->count;
-
-       PP_ASSERT_WITH_CODE(0 != lookup_table->count,
-               "Lookup table is empty", return -EINVAL);
-
-       /* Sorting voltages */
-       for (i = 0; i < table_size - 1; i++) {
-               for (j = i + 1; j > 0; j--) {
-                       if (lookup_table->entries[j].us_vdd <
-                                       lookup_table->entries[j - 1].us_vdd) {
-                               tmp_voltage_lookup_record = lookup_table->entries[j - 1];
-                               lookup_table->entries[j - 1] = lookup_table->entries[j];
-                               lookup_table->entries[j] = tmp_voltage_lookup_record;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-       int result = 0;
-       int tmp_result;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr,
-                       table_info->vddc_lookup_table, &(data->vddc_leakage));
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
-                       &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr);
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr);
-       if (tmp_result)
-               result = tmp_result;
-
-       tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
-       if (tmp_result)
-               result = tmp_result;
-
-       return result;
-}
-
-static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
-                                               table_info->vdd_dep_on_sclk;
-       struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
-                                               table_info->vdd_dep_on_mclk;
-
-       PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
-               "VDD dependency on SCLK table is missing.       \
-               This table is mandatory", return -EINVAL);
-       PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
-               "VDD dependency on SCLK table has to have is missing.   \
-               This table is mandatory", return -EINVAL);
-
-       PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
-               "VDD dependency on MCLK table is missing.       \
-               This table is mandatory", return -EINVAL);
-       PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
-               "VDD dependency on MCLK table has to have is missing.    \
-               This table is mandatory", return -EINVAL);
-
-       table_info->max_clock_voltage_on_ac.sclk =
-               allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
-       table_info->max_clock_voltage_on_ac.mclk =
-               allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
-       table_info->max_clock_voltage_on_ac.vddc =
-               allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-       table_info->max_clock_voltage_on_ac.vddci =
-               allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
-       hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci;
-
-       return 0;
-}
-
-int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
-{
-       struct phm_ppt_v1_information *table_info =
-                      (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-                       table_info->vdd_dep_on_mclk;
-       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
-                       table_info->vddc_lookup_table;
-       uint32_t i;
-
-       if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
-               if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
-                       return 0;
-
-               for (i = 0; i < lookup_table->count; i++) {
-                       if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
-                               dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
-                               return 0;
-                       }
-               }
-       }
-       return 0;
-}
-
-
-int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data;
-       struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-       uint32_t temp_reg;
-       int result;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = data;
-
-       data->dll_default_on = false;
-       data->sram_end = SMC_RAM_END;
-       data->mclk_dpm0_activity_target = 0xa;
-       data->disable_dpm_mask = 0xFF;
-       data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
-       data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
-       data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-       data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-       data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-       data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-       data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-       data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-       data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-       data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-
-       data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0;
-       data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1;
-       data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2;
-       data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3;
-       data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4;
-       data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5;
-       data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6;
-       data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7;
-
-       data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
-       data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT;
-
-       /* need to set voltage control types before EVV patching */
-       data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE;
-       data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
-       data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
-
-       data->enable_tdc_limit_feature = true;
-       data->enable_pkg_pwr_tracking_feature = true;
-       data->force_pcie_gen = PP_PCIEGenInvalid;
-       data->mclk_stutter_mode_threshold = 40000;
-
-       if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                       VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
-               data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EnableMVDDControl)) {
-               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                               VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
-                       data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
-               else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                               VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
-                       data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ControlVDDCI)) {
-               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
-                       data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
-               else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
-                       data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
-       }
-
-       if (table_info->cac_dtp_table->usClockStretchAmount != 0)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_ClockStretcher);
-
-       polaris10_set_features_platform_caps(hwmgr);
-
-       polaris10_patch_voltage_workaround(hwmgr);
-       polaris10_init_dpm_defaults(hwmgr);
-
-       /* Get leakage voltage based on leakage ID. */
-       result = polaris10_get_evv_voltages(hwmgr);
-
-       if (result) {
-               printk("Get EVV Voltage Failed.  Abort Driver loading!\n");
-               return -1;
-       }
-
-       polaris10_complete_dependency_tables(hwmgr);
-       polaris10_set_private_data_based_on_pptable(hwmgr);
-
-       /* Initalize Dynamic State Adjustment Rule Settings */
-       result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
-       if (0 == result) {
-               struct cgs_system_info sys_info = {0};
-
-               data->is_tlu_enabled = false;
-
-               hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
-                                                       POLARIS10_MAX_HARDWARE_POWERLEVELS;
-               hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
-               hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
-
-               if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
-                       temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
-                       switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
-                       case 0:
-                               temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
-                               break;
-                       case 1:
-                               temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
-                               break;
-                       case 2:
-                               temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
-                               break;
-                       case 3:
-                               temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
-                               break;
-                       case 4:
-                               temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
-                               break;
-                       default:
-                               PP_ASSERT_WITH_CODE(0,
-                               "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
-                               );
-                               break;
-                       }
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
-               }
-
-               if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
-                       hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
-                       hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
-                               (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
-
-                       hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
-                               (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-
-                       hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
-
-                       hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
-
-                       hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
-                               (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
-
-                       hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
-
-                       table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
-                                                                       (table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0;
-
-                       table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
-                       table_info->cac_dtp_table->usOperatingTempStep = 1;
-                       table_info->cac_dtp_table->usOperatingTempHyst = 1;
-
-                       hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
-                                      hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-
-                       hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
-                                      hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
-
-                       hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
-                                      table_info->cac_dtp_table->usOperatingTempMinLimit;
-
-                       hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
-                                      table_info->cac_dtp_table->usOperatingTempMaxLimit;
-
-                       hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
-                                      table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
-
-                       hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
-                                      table_info->cac_dtp_table->usOperatingTempStep;
-
-                       hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
-                                      table_info->cac_dtp_table->usTargetOperatingTemp;
-               }
-
-               sys_info.size = sizeof(struct cgs_system_info);
-               sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
-               result = cgs_query_system_info(hwmgr->device, &sys_info);
-               if (result)
-                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
-               else
-                       data->pcie_gen_cap = (uint32_t)sys_info.value;
-               if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-                       data->pcie_spc_cap = 20;
-               sys_info.size = sizeof(struct cgs_system_info);
-               sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
-               result = cgs_query_system_info(hwmgr->device, &sys_info);
-               if (result)
-                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
-               else
-                       data->pcie_lane_cap = (uint32_t)sys_info.value;
-
-               hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
-               hwmgr->platform_descriptor.clockStep.engineClock = 500;
-               hwmgr->platform_descriptor.clockStep.memoryClock = 500;
-       } else {
-               /* Ignore return value in here, we are cleaning up a mess. */
-               polaris10_hwmgr_backend_fini(hwmgr);
-       }
-
-       return 0;
-}
-
-static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t level, tmp;
-
-       if (!data->pcie_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++;
-
-                       if (level)
-                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                               PPSMC_MSG_PCIeDPM_ForceLevel, level);
-               }
-       }
-
-       if (!data->sclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++;
-
-                       if (level)
-                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                               PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                               (1 << level));
-               }
-       }
-
-       if (!data->mclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++;
-
-                       if (level)
-                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                               PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                               (1 << level));
-               }
-       }
-
-       return 0;
-}
-
-static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       phm_apply_dal_min_voltage_request(hwmgr);
-
-       if (!data->sclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-       }
-
-       if (!data->mclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-       }
-
-       return 0;
-}
-
-static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (!polaris10_is_dpm_running(hwmgr))
-               return -EINVAL;
-
-       if (!data->pcie_dpm_key_disabled) {
-               smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_PCIeDPM_UnForceLevel);
-       }
-
-       return polaris10_upload_dpm_level_enable_mask(hwmgr);
-}
-
-static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data =
-                       (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t level;
-
-       if (!data->sclk_dpm_key_disabled)
-               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-                       level = phm_get_lowest_enabled_level(hwmgr,
-                                                             data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                                           PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                                           (1 << level));
-
-       }
-
-       if (!data->mclk_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-                       level = phm_get_lowest_enabled_level(hwmgr,
-                                                             data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                                           (1 << level));
-               }
-       }
-
-       if (!data->pcie_dpm_key_disabled) {
-               if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-                       level = phm_get_lowest_enabled_level(hwmgr,
-                                                             data->dpm_level_enable_mask.pcie_dpm_enable_mask);
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                                           PPSMC_MSG_PCIeDPM_ForceLevel,
-                                                           (level));
-               }
-       }
-
-       return 0;
-
-}
-static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr,
-                               enum amd_dpm_forced_level level)
-{
-       int ret = 0;
-
-       switch (level) {
-       case AMD_DPM_FORCED_LEVEL_HIGH:
-               ret = polaris10_force_dpm_highest(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       case AMD_DPM_FORCED_LEVEL_LOW:
-               ret = polaris10_force_dpm_lowest(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       case AMD_DPM_FORCED_LEVEL_AUTO:
-               ret = polaris10_unforce_dpm_levels(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       default:
-               break;
-       }
-
-       hwmgr->dpm_level = level;
-
-       return ret;
-}
-
-static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
-       return sizeof(struct polaris10_power_state);
-}
-
-
-static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
-                               struct pp_power_state *request_ps,
-                       const struct pp_power_state *current_ps)
-{
-
-       struct polaris10_power_state *polaris10_ps =
-                               cast_phw_polaris10_power_state(&request_ps->hardware);
-       uint32_t sclk;
-       uint32_t mclk;
-       struct PP_Clocks minimum_clocks = {0};
-       bool disable_mclk_switching;
-       bool disable_mclk_switching_for_frame_lock;
-       struct cgs_display_info info = {0};
-       const struct phm_clock_and_voltage_limits *max_limits;
-       uint32_t i;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int32_t count;
-       int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
-       data->battery_state = (PP_StateUILabel_Battery ==
-                       request_ps->classification.ui_label);
-
-       PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2,
-                                "VI should always have 2 performance levels",
-                               );
-
-       max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
-                       &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
-                       &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
-       /* Cap clock DPM tables at DC MAX if it is in DC. */
-       if (PP_PowerSource_DC == hwmgr->power_source) {
-               for (i = 0; i < polaris10_ps->performance_level_count; i++) {
-                       if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk)
-                               polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk;
-                       if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk)
-                               polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk;
-               }
-       }
-
-       polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
-       polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
-       /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState)) {
-               max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
-               stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
-               for (count = table_info->vdd_dep_on_sclk->count - 1;
-                               count >= 0; count--) {
-                       if (stable_pstate_sclk >=
-                                       table_info->vdd_dep_on_sclk->entries[count].clk) {
-                               stable_pstate_sclk =
-                                               table_info->vdd_dep_on_sclk->entries[count].clk;
-                               break;
-                       }
-               }
-
-               if (count < 0)
-                       stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
-               stable_pstate_mclk = max_limits->mclk;
-
-               minimum_clocks.engineClock = stable_pstate_sclk;
-               minimum_clocks.memoryClock = stable_pstate_mclk;
-       }
-
-       if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
-               minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
-       if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
-               minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
-       polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
-       if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
-               PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
-                               hwmgr->platform_descriptor.overdriveLimit.engineClock),
-                               "Overdrive sclk exceeds limit",
-                               hwmgr->gfx_arbiter.sclk_over_drive =
-                                               hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
-               if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
-                       polaris10_ps->performance_levels[1].engine_clock =
-                                       hwmgr->gfx_arbiter.sclk_over_drive;
-       }
-
-       if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
-               PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
-                               hwmgr->platform_descriptor.overdriveLimit.memoryClock),
-                               "Overdrive mclk exceeds limit",
-                               hwmgr->gfx_arbiter.mclk_over_drive =
-                                               hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
-               if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
-                       polaris10_ps->performance_levels[1].memory_clock =
-                                       hwmgr->gfx_arbiter.mclk_over_drive;
-       }
-
-       disable_mclk_switching_for_frame_lock = phm_cap_enabled(
-                                   hwmgr->platform_descriptor.platformCaps,
-                                   PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
-
-       disable_mclk_switching = (1 < info.display_count) ||
-                                   disable_mclk_switching_for_frame_lock;
-
-       sclk = polaris10_ps->performance_levels[0].engine_clock;
-       mclk = polaris10_ps->performance_levels[0].memory_clock;
-
-       if (disable_mclk_switching)
-               mclk = polaris10_ps->performance_levels
-               [polaris10_ps->performance_level_count - 1].memory_clock;
-
-       if (sclk < minimum_clocks.engineClock)
-               sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
-                               max_limits->sclk : minimum_clocks.engineClock;
-
-       if (mclk < minimum_clocks.memoryClock)
-               mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
-                               max_limits->mclk : minimum_clocks.memoryClock;
-
-       polaris10_ps->performance_levels[0].engine_clock = sclk;
-       polaris10_ps->performance_levels[0].memory_clock = mclk;
-
-       polaris10_ps->performance_levels[1].engine_clock =
-               (polaris10_ps->performance_levels[1].engine_clock >=
-                               polaris10_ps->performance_levels[0].engine_clock) ?
-                                               polaris10_ps->performance_levels[1].engine_clock :
-                                               polaris10_ps->performance_levels[0].engine_clock;
-
-       if (disable_mclk_switching) {
-               if (mclk < polaris10_ps->performance_levels[1].memory_clock)
-                       mclk = polaris10_ps->performance_levels[1].memory_clock;
-
-               polaris10_ps->performance_levels[0].memory_clock = mclk;
-               polaris10_ps->performance_levels[1].memory_clock = mclk;
-       } else {
-               if (polaris10_ps->performance_levels[1].memory_clock <
-                               polaris10_ps->performance_levels[0].memory_clock)
-                       polaris10_ps->performance_levels[1].memory_clock =
-                                       polaris10_ps->performance_levels[0].memory_clock;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState)) {
-               for (i = 0; i < polaris10_ps->performance_level_count; i++) {
-                       polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
-                       polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
-                       polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
-                       polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
-               }
-       }
-       return 0;
-}
-
-
-static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
-       struct pp_power_state  *ps;
-       struct polaris10_power_state  *polaris10_ps;
-
-       if (hwmgr == NULL)
-               return -EINVAL;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
-       if (low)
-               return polaris10_ps->performance_levels[0].memory_clock;
-       else
-               return polaris10_ps->performance_levels
-                               [polaris10_ps->performance_level_count-1].memory_clock;
-}
-
-static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
-       struct pp_power_state  *ps;
-       struct polaris10_power_state  *polaris10_ps;
-
-       if (hwmgr == NULL)
-               return -EINVAL;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
-       if (low)
-               return polaris10_ps->performance_levels[0].engine_clock;
-       else
-               return polaris10_ps->performance_levels
-                               [polaris10_ps->performance_level_count-1].engine_clock;
-}
-
-static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
-                                       struct pp_hw_power_state *hw_ps)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps;
-       ATOM_FIRMWARE_INFO_V2_2 *fw_info;
-       uint16_t size;
-       uint8_t frev, crev;
-       int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
-       /* First retrieve the Boot clocks and VDDC from the firmware info table.
-        * We assume here that fw_info is unchanged if this call fails.
-        */
-       fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
-                       hwmgr->device, index,
-                       &size, &frev, &crev);
-       if (!fw_info)
-               /* During a test, there is no firmware info table. */
-               return 0;
-
-       /* Patch the state. */
-       data->vbios_boot_state.sclk_bootup_value =
-                       le32_to_cpu(fw_info->ulDefaultEngineClock);
-       data->vbios_boot_state.mclk_bootup_value =
-                       le32_to_cpu(fw_info->ulDefaultMemoryClock);
-       data->vbios_boot_state.mvdd_bootup_value =
-                       le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
-       data->vbios_boot_state.vddc_bootup_value =
-                       le16_to_cpu(fw_info->usBootUpVDDCVoltage);
-       data->vbios_boot_state.vddci_bootup_value =
-                       le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
-       data->vbios_boot_state.pcie_gen_bootup_value =
-                       phm_get_current_pcie_speed(hwmgr);
-
-       data->vbios_boot_state.pcie_lane_bootup_value =
-                       (uint16_t)phm_get_current_pcie_lane_number(hwmgr);
-
-       /* set boot power state */
-       ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
-       ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
-       ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
-       ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
-       return 0;
-}
-
-static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
-               void *state, struct pp_power_state *power_state,
-               void *pp_table, uint32_t classification_flag)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_power_state  *polaris10_power_state =
-                       (struct polaris10_power_state *)(&(power_state->hardware));
-       struct polaris10_performance_level *performance_level;
-       ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
-       ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
-                       (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-       PPTable_Generic_SubTable_Header *sclk_dep_table =
-                       (PPTable_Generic_SubTable_Header *)
-                       (((unsigned long)powerplay_table) +
-                               le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-
-       ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
-                       (ATOM_Tonga_MCLK_Dependency_Table *)
-                       (((unsigned long)powerplay_table) +
-                               le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
-       /* The following fields are not initialized here: id orderedList allStatesList */
-       power_state->classification.ui_label =
-                       (le16_to_cpu(state_entry->usClassification) &
-                       ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
-                       ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
-       power_state->classification.flags = classification_flag;
-       /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
-       power_state->classification.temporary_state = false;
-       power_state->classification.to_be_deleted = false;
-
-       power_state->validation.disallowOnDC =
-                       (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
-                                       ATOM_Tonga_DISALLOW_ON_DC));
-
-       power_state->pcie.lanes = 0;
-
-       power_state->display.disableFrameModulation = false;
-       power_state->display.limitRefreshrate = false;
-       power_state->display.enableVariBright =
-                       (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
-                                       ATOM_Tonga_ENABLE_VARIBRIGHT));
-
-       power_state->validation.supportedPowerLevels = 0;
-       power_state->uvd_clocks.VCLK = 0;
-       power_state->uvd_clocks.DCLK = 0;
-       power_state->temperatures.min = 0;
-       power_state->temperatures.max = 0;
-
-       performance_level = &(polaris10_power_state->performance_levels
-                       [polaris10_power_state->performance_level_count++]);
-
-       PP_ASSERT_WITH_CODE(
-                       (polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
-                       "Performance levels exceeds SMC limit!",
-                       return -1);
-
-       PP_ASSERT_WITH_CODE(
-                       (polaris10_power_state->performance_level_count <=
-                                       hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
-                       "Performance levels exceeds Driver limit!",
-                       return -1);
-
-       /* Performance levels are arranged from low to high. */
-       performance_level->memory_clock = mclk_dep_table->entries
-                       [state_entry->ucMemoryClockIndexLow].ulMclk;
-       if (sclk_dep_table->ucRevId == 0)
-               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
-                       [state_entry->ucEngineClockIndexLow].ulSclk;
-       else if (sclk_dep_table->ucRevId == 1)
-               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
-                       [state_entry->ucEngineClockIndexLow].ulSclk;
-       performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
-                       state_entry->ucPCIEGenLow);
-       performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-                       state_entry->ucPCIELaneHigh);
-
-       performance_level = &(polaris10_power_state->performance_levels
-                       [polaris10_power_state->performance_level_count++]);
-       performance_level->memory_clock = mclk_dep_table->entries
-                       [state_entry->ucMemoryClockIndexHigh].ulMclk;
-
-       if (sclk_dep_table->ucRevId == 0)
-               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
-                       [state_entry->ucEngineClockIndexHigh].ulSclk;
-       else if (sclk_dep_table->ucRevId == 1)
-               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
-                       [state_entry->ucEngineClockIndexHigh].ulSclk;
-
-       performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
-                       state_entry->ucPCIEGenHigh);
-       performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-                       state_entry->ucPCIELaneHigh);
-
-       return 0;
-}
-
-static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
-               unsigned long entry_index, struct pp_power_state *state)
-{
-       int result;
-       struct polaris10_power_state *ps;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-                       table_info->vdd_dep_on_mclk;
-
-       state->hardware.magic = PHM_VIslands_Magic;
-
-       ps = (struct polaris10_power_state *)(&state->hardware);
-
-       result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
-                       polaris10_get_pp_table_entry_callback_func);
-
-       /* This is the earliest time we have all the dependency table and the VBIOS boot state
-        * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
-        * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
-        */
-       if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
-               if (dep_mclk_table->entries[0].clk !=
-                               data->vbios_boot_state.mclk_bootup_value)
-                       printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
-                                       "does not match VBIOS boot MCLK level");
-               if (dep_mclk_table->entries[0].vddci !=
-                               data->vbios_boot_state.vddci_bootup_value)
-                       printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
-                                       "does not match VBIOS boot VDDCI level");
-       }
-
-       /* set DC compatible flag if this state supports DC */
-       if (!state->validation.disallowOnDC)
-               ps->dc_compatible = true;
-
-       if (state->classification.flags & PP_StateClassificationFlag_ACPI)
-               data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
-       ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
-       ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
-       if (!result) {
-               uint32_t i;
-
-               switch (state->classification.ui_label) {
-               case PP_StateUILabel_Performance:
-                       data->use_pcie_performance_levels = true;
-                       for (i = 0; i < ps->performance_level_count; i++) {
-                               if (data->pcie_gen_performance.max <
-                                               ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_performance.max =
-                                                       ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_gen_performance.min >
-                                               ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_performance.min =
-                                                       ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_lane_performance.max <
-                                               ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_performance.max =
-                                                       ps->performance_levels[i].pcie_lane;
-                               if (data->pcie_lane_performance.min >
-                                               ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_performance.min =
-                                                       ps->performance_levels[i].pcie_lane;
-                       }
-                       break;
-               case PP_StateUILabel_Battery:
-                       data->use_pcie_power_saving_levels = true;
-
-                       for (i = 0; i < ps->performance_level_count; i++) {
-                               if (data->pcie_gen_power_saving.max <
-                                               ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_power_saving.max =
-                                                       ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_gen_power_saving.min >
-                                               ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_power_saving.min =
-                                                       ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_lane_power_saving.max <
-                                               ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_power_saving.max =
-                                                       ps->performance_levels[i].pcie_lane;
-
-                               if (data->pcie_lane_power_saving.min >
-                                               ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_power_saving.min =
-                                                       ps->performance_levels[i].pcie_lane;
-                       }
-                       break;
-               default:
-                       break;
-               }
-       }
-       return 0;
-}
-
-static void
-polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
-       uint32_t sclk, mclk, activity_percent;
-       uint32_t offset;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
-       sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-       smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
-       mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-       seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n",
-                       mclk / 100, sclk / 100);
-
-       offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
-       activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
-       activity_percent += 0x80;
-       activity_percent >>= 8;
-
-       seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
-       seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
-       seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       const struct polaris10_power_state *polaris10_ps =
-                       cast_const_phw_polaris10_power_state(states->pnew_state);
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-       uint32_t sclk = polaris10_ps->performance_levels
-                       [polaris10_ps->performance_level_count - 1].engine_clock;
-       struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-       uint32_t mclk = polaris10_ps->performance_levels
-                       [polaris10_ps->performance_level_count - 1].memory_clock;
-       struct PP_Clocks min_clocks = {0};
-       uint32_t i;
-       struct cgs_display_info info = {0};
-
-       data->need_update_smu7_dpm_table = 0;
-
-       for (i = 0; i < sclk_table->count; i++) {
-               if (sclk == sclk_table->dpm_levels[i].value)
-                       break;
-       }
-
-       if (i >= sclk_table->count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-       else {
-       /* TODO: Check SCLK in DAL's minimum clocks
-        * in case DeepSleep divider update is required.
-        */
-               if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
-                       (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
-                               data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK))
-                       data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
-       }
-
-       for (i = 0; i < mclk_table->count; i++) {
-               if (mclk == mclk_table->dpm_levels[i].value)
-                       break;
-       }
-
-       if (i >= mclk_table->count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       if (data->display_timing.num_existing_displays != info.display_count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
-       return 0;
-}
-
-static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
-               const struct polaris10_power_state *polaris10_ps)
-{
-       uint32_t i;
-       uint32_t sclk, max_sclk = 0;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-
-       for (i = 0; i < polaris10_ps->performance_level_count; i++) {
-               sclk = polaris10_ps->performance_levels[i].engine_clock;
-               if (max_sclk < sclk)
-                       max_sclk = sclk;
-       }
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-               if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
-                       return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
-                                       dpm_table->pcie_speed_table.dpm_levels
-                                       [dpm_table->pcie_speed_table.count - 1].value :
-                                       dpm_table->pcie_speed_table.dpm_levels[i].value);
-       }
-
-       return 0;
-}
-
-static int polaris10_request_link_speed_change_before_state_change(
-               struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       const struct polaris10_power_state *polaris10_nps =
-                       cast_const_phw_polaris10_power_state(states->pnew_state);
-       const struct polaris10_power_state *polaris10_cps =
-                       cast_const_phw_polaris10_power_state(states->pcurrent_state);
-
-       uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps);
-       uint16_t current_link_speed;
-
-       if (data->force_pcie_gen == PP_PCIEGenInvalid)
-               current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps);
-       else
-               current_link_speed = data->force_pcie_gen;
-
-       data->force_pcie_gen = PP_PCIEGenInvalid;
-       data->pspp_notify_required = false;
-
-       if (target_link_speed > current_link_speed) {
-               switch (target_link_speed) {
-               case PP_PCIEGen3:
-                       if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
-                               break;
-                       data->force_pcie_gen = PP_PCIEGen2;
-                       if (current_link_speed == PP_PCIEGen2)
-                               break;
-               case PP_PCIEGen2:
-                       if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
-                               break;
-               default:
-                       data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
-                       break;
-               }
-       } else {
-               if (target_link_speed < current_link_speed)
-                       data->pspp_notify_required = true;
-       }
-
-       return 0;
-}
-
-static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if ((0 == data->sclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-                       (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-               PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
-                                   "Trying to freeze SCLK DPM when DPM is disabled",
-                               );
-               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_SCLKDPM_FreezeLevel),
-                               "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
-                               return -1);
-       }
-
-       if ((0 == data->mclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-                DPMTABLE_OD_UPDATE_MCLK)) {
-               PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
-                                   "Trying to freeze MCLK DPM when DPM is disabled",
-                               );
-               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_MCLKDPM_FreezeLevel),
-                               "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
-                               return -1);
-       }
-
-       return 0;
-}
-
-static int polaris10_populate_and_upload_sclk_mclk_dpm_levels(
-               struct pp_hwmgr *hwmgr, const void *input)
-{
-       int result = 0;
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       const struct polaris10_power_state *polaris10_ps =
-                       cast_const_phw_polaris10_power_state(states->pnew_state);
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t sclk = polaris10_ps->performance_levels
-                       [polaris10_ps->performance_level_count - 1].engine_clock;
-       uint32_t mclk = polaris10_ps->performance_levels
-                       [polaris10_ps->performance_level_count - 1].memory_clock;
-       struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-
-       struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table;
-       uint32_t dpm_count, clock_percent;
-       uint32_t i;
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
-               dpm_table->sclk_table.dpm_levels
-               [dpm_table->sclk_table.count - 1].value = sclk;
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
-                   phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-               /* Need to do calculation based on the golden DPM table
-                * as the Heatmap GPU Clock axis is also based on the default values
-                */
-                       PP_ASSERT_WITH_CODE(
-                               (golden_dpm_table->sclk_table.dpm_levels
-                                               [golden_dpm_table->sclk_table.count - 1].value != 0),
-                               "Divide by 0!",
-                               return -1);
-                       dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
-
-                       for (i = dpm_count; i > 1; i--) {
-                               if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
-                                       clock_percent =
-                                             ((sclk
-                                               - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
-                                               ) * 100)
-                                               / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
-                                       dpm_table->sclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->sclk_table.dpm_levels[i].value +
-                                                       (golden_dpm_table->sclk_table.dpm_levels[i].value *
-                                                               clock_percent)/100;
-
-                               } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
-                                       clock_percent =
-                                               ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
-                                               - sclk) * 100)
-                                               / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
-                                       dpm_table->sclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->sclk_table.dpm_levels[i].value -
-                                                       (golden_dpm_table->sclk_table.dpm_levels[i].value *
-                                                                       clock_percent) / 100;
-                               } else
-                                       dpm_table->sclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->sclk_table.dpm_levels[i].value;
-                       }
-               }
-       }
-
-       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
-               dpm_table->mclk_table.dpm_levels
-                       [dpm_table->mclk_table.count - 1].value = mclk;
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
-                   phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
-                       PP_ASSERT_WITH_CODE(
-                                       (golden_dpm_table->mclk_table.dpm_levels
-                                               [golden_dpm_table->mclk_table.count-1].value != 0),
-                                       "Divide by 0!",
-                                       return -1);
-                       dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
-                       for (i = dpm_count; i > 1; i--) {
-                               if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
-                                       clock_percent = ((mclk -
-                                       golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
-                                       / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
-                                       dpm_table->mclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->mclk_table.dpm_levels[i].value +
-                                                       (golden_dpm_table->mclk_table.dpm_levels[i].value *
-                                                       clock_percent) / 100;
-
-                               } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
-                                       clock_percent = (
-                                        (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
-                                       * 100)
-                                       / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
-                                       dpm_table->mclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->mclk_table.dpm_levels[i].value -
-                                                       (golden_dpm_table->mclk_table.dpm_levels[i].value *
-                                                                       clock_percent) / 100;
-                               } else
-                                       dpm_table->mclk_table.dpm_levels[i].value =
-                                                       golden_dpm_table->mclk_table.dpm_levels[i].value;
-                       }
-               }
-       }
-
-       if (data->need_update_smu7_dpm_table &
-                       (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
-               result = polaris10_populate_all_graphic_levels(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
-                               return result);
-       }
-
-       if (data->need_update_smu7_dpm_table &
-                       (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
-               /*populate MCLK dpm table to SMU7 */
-               result = polaris10_populate_all_memory_levels(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
-                               return result);
-       }
-
-       return result;
-}
-
-static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
-                         struct polaris10_single_dpm_table *dpm_table,
-                       uint32_t low_limit, uint32_t high_limit)
-{
-       uint32_t i;
-
-       for (i = 0; i < dpm_table->count; i++) {
-               if ((dpm_table->dpm_levels[i].value < low_limit)
-               || (dpm_table->dpm_levels[i].value > high_limit))
-                       dpm_table->dpm_levels[i].enabled = false;
-               else
-                       dpm_table->dpm_levels[i].enabled = true;
-       }
-
-       return 0;
-}
-
-static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
-               const struct polaris10_power_state *polaris10_ps)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t high_limit_count;
-
-       PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1),
-                       "power state did not have any performance level",
-                       return -1);
-
-       high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1;
-
-       polaris10_trim_single_dpm_states(hwmgr,
-                       &(data->dpm_table.sclk_table),
-                       polaris10_ps->performance_levels[0].engine_clock,
-                       polaris10_ps->performance_levels[high_limit_count].engine_clock);
-
-       polaris10_trim_single_dpm_states(hwmgr,
-                       &(data->dpm_table.mclk_table),
-                       polaris10_ps->performance_levels[0].memory_clock,
-                       polaris10_ps->performance_levels[high_limit_count].memory_clock);
-
-       return 0;
-}
-
-static int polaris10_generate_dpm_level_enable_mask(
-               struct pp_hwmgr *hwmgr, const void *input)
-{
-       int result;
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       const struct polaris10_power_state *polaris10_ps =
-                       cast_const_phw_polaris10_power_state(states->pnew_state);
-
-       result = polaris10_trim_dpm_states(hwmgr, polaris10_ps);
-       if (result)
-               return result;
-
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
-       return 0;
-}
-
-int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
-                       PPSMC_MSG_UVDDPM_Enable :
-                       PPSMC_MSG_UVDDPM_Disable);
-}
-
-int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-                       PPSMC_MSG_VCEDPM_Enable :
-                       PPSMC_MSG_VCEDPM_Disable);
-}
-
-int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-                       PPSMC_MSG_SAMUDPM_Enable :
-                       PPSMC_MSG_SAMUDPM_Disable);
-}
-
-int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (!bgate) {
-               data->smc_state_table.UvdBootLevel = 0;
-               if (table_info->mm_dep_table->count > 0)
-                       data->smc_state_table.UvdBootLevel =
-                                       (uint8_t) (table_info->mm_dep_table->count - 1);
-               mm_boot_level_offset = data->dpm_table_start +
-                               offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0x00FFFFFF;
-               mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
-               cgs_write_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_UVDDPM) ||
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_StablePState))
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_UVDDPM_SetEnabledMask,
-                                       (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
-       }
-
-       return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (!bgate) {
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                               PHM_PlatformCaps_StablePState))
-                       data->smc_state_table.VceBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
-               else
-                       data->smc_state_table.VceBootLevel = 0;
-
-               mm_boot_level_offset = data->dpm_table_start +
-                               offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0xFF00FFFF;
-               mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
-               cgs_write_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_VCEDPM_SetEnabledMask,
-                                       (uint32_t)1 << data->smc_state_table.VceBootLevel);
-       }
-
-       polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
-
-       return 0;
-}
-
-int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-       if (!bgate) {
-               data->smc_state_table.SamuBootLevel = 0;
-               mm_boot_level_offset = data->dpm_table_start +
-                               offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0xFFFFFF00;
-               mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
-               cgs_write_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_StablePState))
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                                       (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
-       }
-
-       return polaris10_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       int result = 0;
-       uint32_t low_sclk_interrupt_threshold = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification)
-               && (hwmgr->gfx_arbiter.sclk_threshold !=
-                               data->low_sclk_interrupt_threshold)) {
-               data->low_sclk_interrupt_threshold =
-                               hwmgr->gfx_arbiter.sclk_threshold;
-               low_sclk_interrupt_threshold =
-                               data->low_sclk_interrupt_threshold;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-               result = polaris10_copy_bytes_to_smc(
-                               hwmgr->smumgr,
-                               data->dpm_table_start +
-                               offsetof(SMU74_Discrete_DpmTable,
-                                       LowSclkInterruptThreshold),
-                               (uint8_t *)&low_sclk_interrupt_threshold,
-                               sizeof(uint32_t),
-                               data->sram_end);
-       }
-
-       return result;
-}
-
-static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-               return polaris10_program_memory_timing_parameters(hwmgr);
-
-       return 0;
-}
-
-static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if ((0 == data->sclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
-               PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
-                                   "Trying to Unfreeze SCLK DPM when DPM is disabled",
-                               );
-               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-                       "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
-                       return -1);
-       }
-
-       if ((0 == data->mclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
-               PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
-                                   "Trying to Unfreeze MCLK DPM when DPM is disabled",
-                               );
-               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-                   "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
-                   return -1);
-       }
-
-       data->need_update_smu7_dpm_table = 0;
-
-       return 0;
-}
-
-static int polaris10_notify_link_speed_change_after_state_change(
-               struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       const struct polaris10_power_state *polaris10_ps =
-                       cast_const_phw_polaris10_power_state(states->pnew_state);
-       uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps);
-       uint8_t  request;
-
-       if (data->pspp_notify_required) {
-               if (target_link_speed == PP_PCIEGen3)
-                       request = PCIE_PERF_REQ_GEN3;
-               else if (target_link_speed == PP_PCIEGen2)
-                       request = PCIE_PERF_REQ_GEN2;
-               else
-                       request = PCIE_PERF_REQ_GEN1;
-
-               if (request == PCIE_PERF_REQ_GEN1 &&
-                               phm_get_current_pcie_speed(hwmgr) > 0)
-                       return 0;
-
-               if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
-                       if (PP_PCIEGen2 == target_link_speed)
-                               printk("PSPP request to switch to Gen2 from Gen3 Failed!");
-                       else
-                               printk("PSPP request to switch to Gen1 from Gen2 Failed!");
-               }
-       }
-
-       return 0;
-}
-
-static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-               (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
-       return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
-}
-
-
-
-static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
-       int tmp_result, result = 0;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to find DPM states clocks in DPM table!",
-                       result = tmp_result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PCIEPerformanceRequest)) {
-               tmp_result =
-                       polaris10_request_link_speed_change_before_state_change(hwmgr, input);
-               PP_ASSERT_WITH_CODE((0 == tmp_result),
-                               "Failed to request link speed change before state change!",
-                               result = tmp_result);
-       }
-
-       tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
-       tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to populate and upload SCLK MCLK DPM levels!",
-                       result = tmp_result);
-
-       tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to generate DPM level enabled mask!",
-                       result = tmp_result);
-
-       tmp_result = polaris10_update_sclk_threshold(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to update SCLK threshold!",
-                       result = tmp_result);
-
-       tmp_result = polaris10_program_mem_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to program memory timing parameters!",
-                       result = tmp_result);
-
-       tmp_result = polaris10_notify_smc_display(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to notify smc display settings!",
-                       result = tmp_result);
-
-       tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to unfreeze SCLK MCLK DPM!",
-                       result = tmp_result);
-
-       tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to upload DPM level enabled mask!",
-                       result = tmp_result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PCIEPerformanceRequest)) {
-               tmp_result =
-                       polaris10_notify_link_speed_change_after_state_change(hwmgr, input);
-               PP_ASSERT_WITH_CODE((0 == tmp_result),
-                               "Failed to notify link speed change after state change!",
-                               result = tmp_result);
-       }
-       data->apply_optimized_settings = false;
-       return result;
-}
-
-static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
-       hwmgr->thermal_controller.
-       advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
-       if (phm_is_hw_access_blocked(hwmgr))
-               return 0;
-
-       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                       PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-
-int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
-       PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
-       return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
-}
-
-int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
-{
-       uint32_t num_active_displays = 0;
-       struct cgs_display_info info = {0};
-       info.mode_info = NULL;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       num_active_displays = info.display_count;
-
-       if (num_active_displays > 1)  /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
-               polaris10_notify_smc_display_change(hwmgr, false);
-
-
-       return 0;
-}
-
-/**
-* Programs the display gap
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always OK
-*/
-int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t num_active_displays = 0;
-       uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-       uint32_t display_gap2;
-       uint32_t pre_vbi_time_in_us;
-       uint32_t frame_time_in_us;
-       uint32_t ref_clock;
-       uint32_t refresh_rate = 0;
-       struct cgs_display_info info = {0};
-       struct cgs_mode_info mode_info;
-
-       info.mode_info = &mode_info;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-       num_active_displays = info.display_count;
-
-       display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-       ref_clock = mode_info.ref_clock;
-       refresh_rate = mode_info.refresh_rate;
-
-       if (0 == refresh_rate)
-               refresh_rate = 60;
-
-       frame_time_in_us = 1000000 / refresh_rate;
-
-       pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
-       data->frame_time_x2 = frame_time_in_us * 2 / 100;
-
-       display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
-
-       return 0;
-}
-
-
-int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
-       return polaris10_program_display_gap(hwmgr);
-}
-
-/**
-*  Set maximum target operating fan output RPM
-*
-* @param    hwmgr:  the address of the powerplay hardware manager.
-* @param    usMaxFanRpm:  max operating fan RPM value.
-* @return   The response that came from the SMC.
-*/
-static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
-{
-       hwmgr->thermal_controller.
-       advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
-       if (phm_is_hw_access_blocked(hwmgr))
-               return 0;
-
-       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                       PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
-                                       const void *thermal_interrupt_info)
-{
-       return 0;
-}
-
-bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       bool is_update_required = false;
-       struct cgs_display_info info = {0, 0, NULL};
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       if (data->display_timing.num_existing_displays != info.display_count)
-               is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
-       if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-               cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
-               if (min_clocks.engineClockInSR != data->display_timing.minClockInSR &&
-                       (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
-                               data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK))
-                       is_update_required = true;
-*/
-       return is_update_required;
-}
-
-static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1,
-                                                          const struct polaris10_performance_level *pl2)
-{
-       return ((pl1->memory_clock == pl2->memory_clock) &&
-                 (pl1->engine_clock == pl2->engine_clock) &&
-                 (pl1->pcie_gen == pl2->pcie_gen) &&
-                 (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
-       const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
-       const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
-       int i;
-
-       if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
-               return -EINVAL;
-
-       /* If the two states don't even have the same number of performance levels they cannot be the same state. */
-       if (psa->performance_level_count != psb->performance_level_count) {
-               *equal = false;
-               return 0;
-       }
-
-       for (i = 0; i < psa->performance_level_count; i++) {
-               if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
-                       /* If we have found even one performance level pair that is different the states are different. */
-                       *equal = false;
-                       return 0;
-               }
-       }
-
-       /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
-       *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
-       *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
-       *equal &= (psa->sclk_threshold == psb->sclk_threshold);
-
-       return 0;
-}
-
-int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       uint32_t vbios_version;
-
-       /*  Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
-
-       phm_get_mc_microcode_version(hwmgr);
-       vbios_version = hwmgr->microcode_version_info.MC & 0xf;
-       /*  Full version of MC ucode has already been loaded. */
-       if (vbios_version == 0) {
-               data->need_long_memory_training = false;
-               return 0;
-       }
-
-       data->need_long_memory_training = false;
-
-/*
- *     PPMCME_FirmwareDescriptorEntry *pfd = NULL;
-       pfd = &tonga_mcmeFirmware;
-       if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
-               polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,
-                                       pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
-                                       pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
-*/
-       return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
-                                               CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
-                                               & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
-
-       data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
-                                               CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
-                                               & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
-
-       data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
-                                               CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
-                                               & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
-
-       return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t temp;
-
-       temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
-       data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
-                       ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
-                        MC_SEQ_MISC0_GDDR5_SHIFT));
-
-       return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
-       return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       data->uvd_power_gated = false;
-       data->vce_power_gated = false;
-       data->samu_power_gated = false;
-
-       return 0;
-}
-
-static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       data->low_sclk_interrupt_threshold = 0;
-
-       return 0;
-}
-
-int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-
-       polaris10_upload_mc_firmware(hwmgr);
-
-       tmp_result = polaris10_read_clock_registers(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to read clock registers!", result = tmp_result);
-
-       tmp_result = polaris10_get_memory_type(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to get memory type!", result = tmp_result);
-
-       tmp_result = polaris10_enable_acpi_power_management(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable ACPI power management!", result = tmp_result);
-
-       tmp_result = polaris10_init_power_gate_state(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to init power gate state!", result = tmp_result);
-
-       tmp_result = phm_get_mc_microcode_version(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to get MC microcode version!", result = tmp_result);
-
-       tmp_result = polaris10_init_sclk_threshold(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to init sclk threshold!", result = tmp_result);
-
-       return result;
-}
-
-static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
-               enum pp_clock_type type, uint32_t mask)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-               return -EINVAL;
-
-       switch (type) {
-       case PP_SCLK:
-               if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
-               break;
-       case PP_MCLK:
-               if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
-               break;
-       case PP_PCIE:
-       {
-               uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-               uint32_t level = 0;
-
-               while (tmp >>= 1)
-                       level++;
-
-               if (!data->pcie_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_PCIeDPM_ForceLevel,
-                                       level);
-               break;
-       }
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
-       uint32_t speedCntl = 0;
-
-       /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-       speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
-                       ixPCIE_LC_SPEED_CNTL);
-       return((uint16_t)PHM_GET_FIELD(speedCntl,
-                       PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr,
-               enum pp_clock_type type, char *buf)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-       struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-       struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
-       int i, now, size = 0;
-       uint32_t clock, pcie_speed;
-
-       switch (type) {
-       case PP_SCLK:
-               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-               for (i = 0; i < sclk_table->count; i++) {
-                       if (clock > sclk_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < sclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
-                                       i, sclk_table->dpm_levels[i].value / 100,
-                                       (i == now) ? "*" : "");
-               break;
-       case PP_MCLK:
-               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-               for (i = 0; i < mclk_table->count; i++) {
-                       if (clock > mclk_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < mclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
-                                       i, mclk_table->dpm_levels[i].value / 100,
-                                       (i == now) ? "*" : "");
-               break;
-       case PP_PCIE:
-               pcie_speed = polaris10_get_current_pcie_speed(hwmgr);
-               for (i = 0; i < pcie_table->count; i++) {
-                       if (pcie_speed != pcie_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < pcie_table->count; i++)
-                       size += sprintf(buf + size, "%d: %s %s\n", i,
-                                       (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
-                                       (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
-                                       (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
-                                       (i == now) ? "*" : "");
-               break;
-       default:
-               break;
-       }
-       return size;
-}
-
-static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-       if (mode) {
-               /* stop auto-manage */
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl))
-                       polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-               polaris10_fan_ctrl_set_static_mode(hwmgr, mode);
-       } else
-               /* restart auto-manage */
-               polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
-       return 0;
-}
-
-static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
-       if (hwmgr->fan_ctrl_is_in_default_mode)
-               return hwmgr->fan_ctrl_default_mode;
-       else
-               return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-       struct polaris10_single_dpm_table *golden_sclk_table =
-                       &(data->golden_dpm_table.sclk_table);
-       int value;
-
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-       return value;
-}
-
-static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_single_dpm_table *golden_sclk_table =
-                       &(data->golden_dpm_table.sclk_table);
-       struct pp_power_state  *ps;
-       struct polaris10_power_state  *polaris10_ps;
-
-       if (value > 20)
-               value = 20;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
-       polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
-                       value / 100 +
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-       return 0;
-}
-
-static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-       struct polaris10_single_dpm_table *golden_mclk_table =
-                       &(data->golden_dpm_table.mclk_table);
-       int value;
-
-       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-       return value;
-}
-
-static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct polaris10_single_dpm_table *golden_mclk_table =
-                       &(data->golden_dpm_table.mclk_table);
-       struct pp_power_state  *ps;
-       struct polaris10_power_state  *polaris10_ps;
-
-       if (value > 20)
-               value = 20;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
-       polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
-                       value / 100 +
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-       return 0;
-}
-static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
-       .backend_init = &polaris10_hwmgr_backend_init,
-       .backend_fini = &polaris10_hwmgr_backend_fini,
-       .asic_setup = &polaris10_setup_asic_task,
-       .dynamic_state_management_enable = &polaris10_enable_dpm_tasks,
-       .apply_state_adjust_rules = polaris10_apply_state_adjust_rules,
-       .force_dpm_level = &polaris10_force_dpm_level,
-       .power_state_set = polaris10_set_power_state_tasks,
-       .get_power_state_size = polaris10_get_power_state_size,
-       .get_mclk = polaris10_dpm_get_mclk,
-       .get_sclk = polaris10_dpm_get_sclk,
-       .patch_boot_state = polaris10_dpm_patch_boot_state,
-       .get_pp_table_entry = polaris10_get_pp_table_entry,
-       .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
-       .print_current_perforce_level = polaris10_print_current_perforce_level,
-       .powerdown_uvd = polaris10_phm_powerdown_uvd,
-       .powergate_uvd = polaris10_phm_powergate_uvd,
-       .powergate_vce = polaris10_phm_powergate_vce,
-       .disable_clock_power_gating = polaris10_phm_disable_clock_power_gating,
-       .update_clock_gatings = polaris10_phm_update_clock_gatings,
-       .notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment,
-       .display_config_changed = polaris10_display_configuration_changed_task,
-       .set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output,
-       .set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output,
-       .get_temperature = polaris10_thermal_get_temperature,
-       .stop_thermal_controller = polaris10_thermal_stop_thermal_controller,
-       .get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info,
-       .get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent,
-       .set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent,
-       .reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default,
-       .get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm,
-       .set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm,
-       .uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller,
-       .register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt,
-       .check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration,
-       .check_states_equal = polaris10_check_states_equal,
-       .set_fan_control_mode = polaris10_set_fan_control_mode,
-       .get_fan_control_mode = polaris10_get_fan_control_mode,
-       .force_clock_level = polaris10_force_clock_level,
-       .print_clock_levels = polaris10_print_clock_levels,
-       .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
-       .get_sclk_od = polaris10_get_sclk_od,
-       .set_sclk_od = polaris10_set_sclk_od,
-       .get_mclk_od = polaris10_get_mclk_od,
-       .set_mclk_od = polaris10_set_mclk_od,
-};
-
-int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
-       hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
-       hwmgr->pptable_func = &tonga_pptable_funcs;
-       pp_polaris10_thermal_initialize(hwmgr);
-
-       return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
deleted file mode 100644 (file)
index 33c3394..0000000
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef POLARIS10_HWMGR_H
-#define POLARIS10_HWMGR_H
-
-#include "hwmgr.h"
-#include "smu74.h"
-#include "smu74_discrete.h"
-#include "ppatomctrl.h"
-#include "polaris10_ppsmc.h"
-#include "polaris10_powertune.h"
-
-#define POLARIS10_MAX_HARDWARE_POWERLEVELS     2
-
-#define POLARIS10_VOLTAGE_CONTROL_NONE                   0x0
-#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO                0x1
-#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2               0x2
-#define POLARIS10_VOLTAGE_CONTROL_MERGED                 0x3
-
-#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
-#define DPMTABLE_UPDATE_SCLK        0x00000004
-#define DPMTABLE_UPDATE_MCLK        0x00000008
-
-struct polaris10_performance_level {
-       uint32_t  memory_clock;
-       uint32_t  engine_clock;
-       uint16_t  pcie_gen;
-       uint16_t  pcie_lane;
-};
-
-struct polaris10_uvd_clocks {
-       uint32_t  vclk;
-       uint32_t  dclk;
-};
-
-struct polaris10_vce_clocks {
-       uint32_t  evclk;
-       uint32_t  ecclk;
-};
-
-struct polaris10_power_state {
-       uint32_t                  magic;
-       struct polaris10_uvd_clocks    uvd_clks;
-       struct polaris10_vce_clocks    vce_clks;
-       uint32_t                  sam_clk;
-       uint16_t                  performance_level_count;
-       bool                      dc_compatible;
-       uint32_t                  sclk_threshold;
-       struct polaris10_performance_level  performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct polaris10_dpm_level {
-       bool    enabled;
-       uint32_t        value;
-       uint32_t        param1;
-};
-
-#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500
-
-struct polaris10_single_dpm_table {
-       uint32_t                count;
-       struct polaris10_dpm_level      dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct polaris10_dpm_table {
-       struct polaris10_single_dpm_table  sclk_table;
-       struct polaris10_single_dpm_table  mclk_table;
-       struct polaris10_single_dpm_table  pcie_speed_table;
-       struct polaris10_single_dpm_table  vddc_table;
-       struct polaris10_single_dpm_table  vddci_table;
-       struct polaris10_single_dpm_table  mvdd_table;
-};
-
-struct polaris10_clock_registers {
-       uint32_t  vCG_SPLL_FUNC_CNTL;
-       uint32_t  vCG_SPLL_FUNC_CNTL_2;
-       uint32_t  vCG_SPLL_FUNC_CNTL_3;
-       uint32_t  vCG_SPLL_FUNC_CNTL_4;
-       uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t  vDLL_CNTL;
-       uint32_t  vMCLK_PWRMGT_CNTL;
-       uint32_t  vMPLL_AD_FUNC_CNTL;
-       uint32_t  vMPLL_DQ_FUNC_CNTL;
-       uint32_t  vMPLL_FUNC_CNTL;
-       uint32_t  vMPLL_FUNC_CNTL_1;
-       uint32_t  vMPLL_FUNC_CNTL_2;
-       uint32_t  vMPLL_SS1;
-       uint32_t  vMPLL_SS2;
-};
-
-#define DISABLE_MC_LOADMICROCODE   1
-#define DISABLE_MC_CFGPROGRAMMING  2
-
-struct polaris10_voltage_smio_registers {
-       uint32_t vS0_VID_LOWER_SMIO_CNTL;
-};
-
-#define POLARIS10_MAX_LEAKAGE_COUNT  8
-
-struct polaris10_leakage_voltage {
-       uint16_t  count;
-       uint16_t  leakage_id[POLARIS10_MAX_LEAKAGE_COUNT];
-       uint16_t  actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT];
-};
-
-struct polaris10_vbios_boot_state {
-       uint16_t    mvdd_bootup_value;
-       uint16_t    vddc_bootup_value;
-       uint16_t    vddci_bootup_value;
-       uint32_t    sclk_bootup_value;
-       uint32_t    mclk_bootup_value;
-       uint16_t    pcie_gen_bootup_value;
-       uint16_t    pcie_lane_bootup_value;
-};
-
-/* Ultra Low Voltage parameter structure */
-struct polaris10_ulv_parm {
-       bool                           ulv_supported;
-       uint32_t                       cg_ulv_parameter;
-       uint32_t                       ulv_volt_change_delay;
-       struct polaris10_performance_level  ulv_power_level;
-};
-
-struct polaris10_display_timing {
-       uint32_t  min_clock_in_sr;
-       uint32_t  num_existing_displays;
-};
-
-struct polaris10_dpmlevel_enable_mask {
-       uint32_t  uvd_dpm_enable_mask;
-       uint32_t  vce_dpm_enable_mask;
-       uint32_t  acp_dpm_enable_mask;
-       uint32_t  samu_dpm_enable_mask;
-       uint32_t  sclk_dpm_enable_mask;
-       uint32_t  mclk_dpm_enable_mask;
-       uint32_t  pcie_dpm_enable_mask;
-};
-
-struct polaris10_pcie_perf_range {
-       uint16_t  max;
-       uint16_t  min;
-};
-struct polaris10_range_table {
-       uint32_t trans_lower_frequency; /* in 10khz */
-       uint32_t trans_upper_frequency;
-};
-
-struct polaris10_hwmgr {
-       struct polaris10_dpm_table                      dpm_table;
-       struct polaris10_dpm_table                      golden_dpm_table;
-       SMU74_Discrete_DpmTable                         smc_state_table;
-       struct SMU74_Discrete_Ulv            ulv_setting;
-
-       struct polaris10_range_table                range_table[NUM_SCLK_RANGE];
-       uint32_t                                                voting_rights_clients0;
-       uint32_t                                                voting_rights_clients1;
-       uint32_t                                                voting_rights_clients2;
-       uint32_t                                                voting_rights_clients3;
-       uint32_t                                                voting_rights_clients4;
-       uint32_t                                                voting_rights_clients5;
-       uint32_t                                                voting_rights_clients6;
-       uint32_t                                                voting_rights_clients7;
-       uint32_t                                                static_screen_threshold_unit;
-       uint32_t                                                static_screen_threshold;
-       uint32_t                                                voltage_control;
-       uint32_t                                                vddc_vddci_delta;
-
-       uint32_t                                                active_auto_throttle_sources;
-
-       struct polaris10_clock_registers            clock_registers;
-       struct polaris10_voltage_smio_registers      voltage_smio_registers;
-
-       bool                           is_memory_gddr5;
-       uint16_t                       acpi_vddc;
-       bool                           pspp_notify_required;
-       uint16_t                       force_pcie_gen;
-       uint16_t                       acpi_pcie_gen;
-       uint32_t                       pcie_gen_cap;
-       uint32_t                       pcie_lane_cap;
-       uint32_t                       pcie_spc_cap;
-       struct polaris10_leakage_voltage          vddc_leakage;
-       struct polaris10_leakage_voltage          Vddci_leakage;
-
-       uint32_t                             mvdd_control;
-       uint32_t                             vddc_mask_low;
-       uint32_t                             mvdd_mask_low;
-       uint16_t                            max_vddc_in_pptable;
-       uint16_t                            min_vddc_in_pptable;
-       uint16_t                            max_vddci_in_pptable;
-       uint16_t                            min_vddci_in_pptable;
-       uint32_t                             mclk_strobe_mode_threshold;
-       uint32_t                             mclk_stutter_mode_threshold;
-       uint32_t                             mclk_edc_enable_threshold;
-       uint32_t                             mclk_edcwr_enable_threshold;
-       bool                                is_uvd_enabled;
-       struct polaris10_vbios_boot_state        vbios_boot_state;
-
-       bool                           pcie_performance_request;
-       bool                           battery_state;
-       bool                           is_tlu_enabled;
-
-       /* ---- SMC SRAM Address of firmware header tables ---- */
-       uint32_t                             sram_end;
-       uint32_t                             dpm_table_start;
-       uint32_t                             soft_regs_start;
-       uint32_t                             mc_reg_table_start;
-       uint32_t                             fan_table_start;
-       uint32_t                             arb_table_start;
-
-       /* ---- Stuff originally coming from Evergreen ---- */
-       uint32_t                             vddci_control;
-       struct pp_atomctrl_voltage_table     vddc_voltage_table;
-       struct pp_atomctrl_voltage_table     vddci_voltage_table;
-       struct pp_atomctrl_voltage_table     mvdd_voltage_table;
-
-       uint32_t                             mgcg_cgtt_local2;
-       uint32_t                             mgcg_cgtt_local3;
-       uint32_t                             gpio_debug;
-       uint32_t                             mc_micro_code_feature;
-       uint32_t                             highest_mclk;
-       uint16_t                             acpi_vddci;
-       uint8_t                              mvdd_high_index;
-       uint8_t                              mvdd_low_index;
-       bool                                 dll_default_on;
-       bool                                 performance_request_registered;
-
-       /* ---- Low Power Features ---- */
-       struct polaris10_ulv_parm                 ulv;
-
-       /* ---- CAC Stuff ---- */
-       uint32_t                       cac_table_start;
-       bool                           cac_configuration_required;
-       bool                           driver_calculate_cac_leakage;
-       bool                           cac_enabled;
-
-       /* ---- DPM2 Parameters ---- */
-       uint32_t                       power_containment_features;
-       bool                           enable_dte_feature;
-       bool                           enable_tdc_limit_feature;
-       bool                           enable_pkg_pwr_tracking_feature;
-       bool                           disable_uvd_power_tune_feature;
-       const struct polaris10_pt_defaults       *power_tune_defaults;
-       struct SMU74_Discrete_PmFuses  power_tune_table;
-       uint32_t                       dte_tj_offset;
-       uint32_t                       fast_watermark_threshold;
-
-       /* ---- Phase Shedding ---- */
-       bool                           vddc_phase_shed_control;
-
-       /* ---- DI/DT ---- */
-       struct polaris10_display_timing        display_timing;
-       uint32_t                      bif_sclk_table[SMU74_MAX_LEVELS_LINK];
-
-       /* ---- Thermal Temperature Setting ---- */
-       struct polaris10_dpmlevel_enable_mask     dpm_level_enable_mask;
-       uint32_t                                  need_update_smu7_dpm_table;
-       uint32_t                                  sclk_dpm_key_disabled;
-       uint32_t                                  mclk_dpm_key_disabled;
-       uint32_t                                  pcie_dpm_key_disabled;
-       uint32_t                                  min_engine_clocks;
-       struct polaris10_pcie_perf_range          pcie_gen_performance;
-       struct polaris10_pcie_perf_range          pcie_lane_performance;
-       struct polaris10_pcie_perf_range          pcie_gen_power_saving;
-       struct polaris10_pcie_perf_range          pcie_lane_power_saving;
-       bool                                      use_pcie_performance_levels;
-       bool                                      use_pcie_power_saving_levels;
-       uint32_t                                  activity_target[SMU74_MAX_LEVELS_GRAPHICS];
-       uint32_t                                  mclk_activity_target;
-       uint32_t                                  mclk_dpm0_activity_target;
-       uint32_t                                  low_sclk_interrupt_threshold;
-       uint32_t                                  last_mclk_dpm_enable_mask;
-       bool                                      uvd_enabled;
-
-       /* ---- Power Gating States ---- */
-       bool                           uvd_power_gated;
-       bool                           vce_power_gated;
-       bool                           samu_power_gated;
-       bool                           need_long_memory_training;
-
-       /* Application power optimization parameters */
-       bool                               update_up_hyst;
-       bool                               update_down_hyst;
-       uint32_t                           down_hyst;
-       uint32_t                           up_hyst;
-       uint32_t disable_dpm_mask;
-       bool apply_optimized_settings;
-       uint32_t                              avfs_vdroop_override_setting;
-       bool                                  apply_avfs_cks_off_voltage;
-       uint32_t                              frame_time_x2;
-};
-
-/* To convert to Q8.8 format for firmware */
-#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT             256
-
-enum Polaris10_I2CLineID {
-       Polaris10_I2CLineID_DDC1 = 0x90,
-       Polaris10_I2CLineID_DDC2 = 0x91,
-       Polaris10_I2CLineID_DDC3 = 0x92,
-       Polaris10_I2CLineID_DDC4 = 0x93,
-       Polaris10_I2CLineID_DDC5 = 0x94,
-       Polaris10_I2CLineID_DDC6 = 0x95,
-       Polaris10_I2CLineID_SCLSDA = 0x96,
-       Polaris10_I2CLineID_DDCVGA = 0x97
-};
-
-#define POLARIS10_I2C_DDC1DATA          0
-#define POLARIS10_I2C_DDC1CLK           1
-#define POLARIS10_I2C_DDC2DATA          2
-#define POLARIS10_I2C_DDC2CLK           3
-#define POLARIS10_I2C_DDC3DATA          4
-#define POLARIS10_I2C_DDC3CLK           5
-#define POLARIS10_I2C_SDA               40
-#define POLARIS10_I2C_SCL               41
-#define POLARIS10_I2C_DDC4DATA          65
-#define POLARIS10_I2C_DDC4CLK           66
-#define POLARIS10_I2C_DDC5DATA          0x48
-#define POLARIS10_I2C_DDC5CLK           0x49
-#define POLARIS10_I2C_DDC6DATA          0x4a
-#define POLARIS10_I2C_DDC6CLK           0x4b
-#define POLARIS10_I2C_DDCVGADATA        0x4c
-#define POLARIS10_I2C_DDCVGACLK         0x4d
-
-#define POLARIS10_UNUSED_GPIO_PIN       0x7F
-
-int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
-
-int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
deleted file mode 100644 (file)
index b9cb240..0000000
+++ /dev/null
@@ -1,988 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "smumgr.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_powertune.h"
-#include "polaris10_smumgr.h"
-#include "smu74_discrete.h"
-#include "pp_debug.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-#include "oss/oss_3_0_sh_mask.h"
-
-#define VOLTAGE_SCALE  4
-#define POWERTUNE_DEFAULT_SET_MAX    1
-
-uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
-
-struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- *      Offset                             Mask                                                Shift                                               Value       Type
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
-
-       {   0xFFFFFFFF  }
-};
-
-struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- *      Offset                             Mask                                                Shift                                               Value       Type
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
-       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
-
-       {   0xFFFFFFFF  }
-};
-
-struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- *      Offset                             Mask                                                Shift                                               Value       Type
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
-       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0009,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0009,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   0xFFFFFFFF  }
-};
-
-struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- *      Offset                             Mask                                                Shift                                               Value       Type
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
-       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0008,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0008,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-       {   0xFFFFFFFF  }
-};
-
-static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-       /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
-        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
-       { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
-       { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
-       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
-};
-
-void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct  phm_ppt_v1_information *table_info =
-                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (table_info &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID)
-               polaris10_hwmgr->power_tune_defaults =
-                               &polaris10_power_tune_data_set_array
-                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
-       else
-               polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
-
-}
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
-       uint32_t tmp;
-       tmp = raw_setting * 4096 / 100;
-       return (uint16_t)tmp;
-}
-
-int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-       SMU74_Discrete_DpmTable  *dpm_table = &(data->smc_state_table);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
-       struct pp_advance_fan_control_parameters *fan_table=
-                       &hwmgr->thermal_controller.advanceFanControlParameters;
-       int i, j, k;
-       const uint16_t *pdef1;
-       const uint16_t *pdef2;
-
-       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-       dpm_table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-
-       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
-                               "Target Operating Temp is out of Range!",
-                               );
-
-       dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTargetOperatingTemp * 256);
-       dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitHotspot * 256);
-       dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainEdge));
-       dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainHotspot));
-
-       pdef1 = defaults->BAPMTI_R;
-       pdef2 = defaults->BAPMTI_RC;
-
-       for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
-               for (j = 0; j < SMU74_DTE_SOURCES; j++) {
-                       for (k = 0; k < SMU74_DTE_SINKS; k++) {
-                               dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
-                               dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
-                               pdef1++;
-                               pdef2++;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-
-       data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
-       data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
-       data->power_tune_table.SviLoadLineTrimVddC = 3;
-       data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-       return 0;
-}
-
-static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-       uint16_t tdc_limit;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-
-       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
-       data->power_tune_table.TDC_VDDC_PkgLimit =
-                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-       data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-                       defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
-       data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
-       return 0;
-}
-
-static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-       uint32_t temp;
-
-       if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                       fuse_table_offset +
-                       offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
-                       (uint32_t *)&temp, data->sram_end))
-               PP_ASSERT_WITH_CODE(false,
-                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
-                               return -EINVAL);
-       else {
-               data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
-               data->power_tune_table.LPMLTemperatureMin =
-                               (uint8_t)((temp >> 16) & 0xff);
-               data->power_tune_table.LPMLTemperatureMax =
-                               (uint8_t)((temp >> 8) & 0xff);
-               data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
-       }
-       return 0;
-}
-
-static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
-       return 0;
-}
-
-static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
-               || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
-                       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
-       data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
-                               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
-       return 0;
-}
-
-static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               data->power_tune_table.GnbLPML[i] = 0;
-
-       return 0;
-}
-
-static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
-       return 0;
-}
-
-static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
-{
-
-       uint32_t en = enable ? 1 : 0;
-       int32_t result = 0;
-       uint32_t data;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
-               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
-               data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
-               data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
-               DIDTBlock_Info &= ~SQ_Enable_MASK;
-               DIDTBlock_Info |= en << SQ_Enable_SHIFT;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
-               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
-               data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
-               data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
-               DIDTBlock_Info &= ~DB_Enable_MASK;
-               DIDTBlock_Info |= en << DB_Enable_SHIFT;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
-               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
-               data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
-               data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
-               DIDTBlock_Info &= ~TD_Enable_MASK;
-               DIDTBlock_Info |= en << TD_Enable_SHIFT;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
-               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
-               data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
-               data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
-               DIDTBlock_Info &= ~TCP_Enable_MASK;
-               DIDTBlock_Info |= en << TCP_Enable_SHIFT;
-       }
-
-       if (enable)
-               result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
-
-       return result;
-}
-
-static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
-                               struct polaris10_pt_config_reg *cac_config_regs)
-{
-       struct polaris10_pt_config_reg *config_regs = cac_config_regs;
-       uint32_t cache = 0;
-       uint32_t data = 0;
-
-       PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
-
-       while (config_regs->offset != 0xFFFFFFFF) {
-               if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
-                       cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
-               else {
-                       switch (config_regs->type) {
-                       case POLARIS10_CONFIGREG_SMC_IND:
-                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
-                               break;
-
-                       case POLARIS10_CONFIGREG_DIDT_IND:
-                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
-                               break;
-
-                       case POLARIS10_CONFIGREG_GC_CAC_IND:
-                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
-                               break;
-
-                       default:
-                               data = cgs_read_register(hwmgr->device, config_regs->offset);
-                               break;
-                       }
-
-                       data &= ~config_regs->mask;
-                       data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
-                       data |= cache;
-
-                       switch (config_regs->type) {
-                       case POLARIS10_CONFIGREG_SMC_IND:
-                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
-                               break;
-
-                       case POLARIS10_CONFIGREG_DIDT_IND:
-                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
-                               break;
-
-                       case POLARIS10_CONFIGREG_GC_CAC_IND:
-                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
-                               break;
-
-                       default:
-                               cgs_write_register(hwmgr->device, config_regs->offset, data);
-                               break;
-                       }
-                       cache = 0;
-               }
-
-               config_regs++;
-       }
-
-       return 0;
-}
-
-int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       uint32_t num_se = 0;
-       uint32_t count, value, value2;
-       struct cgs_system_info sys_info = {0};
-
-       sys_info.size = sizeof(struct cgs_system_info);
-       sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
-       result = cgs_query_system_info(hwmgr->device, &sys_info);
-
-
-       if (result == 0)
-               num_se = sys_info.value;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
-
-               /* TO DO Pre DIDT disable clock gating */
-               value = 0;
-               value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
-               for (count = 0; count < num_se; count++) {
-                       value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK
-                               | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK
-                               | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT);
-                       cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
-
-                       if (hwmgr->chip_id == CHIP_POLARIS10) {
-                               result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
-                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
-                               result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
-                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
-                       } else if (hwmgr->chip_id == CHIP_POLARIS11) {
-                               result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
-                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
-                               result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
-                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
-                       }
-               }
-               cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
-
-               result = polaris10_enable_didt(hwmgr, true);
-               PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
-
-               /* TO DO Post DIDT enable clock gating */
-       }
-
-       return 0;
-}
-
-int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
-               /* TO DO Pre DIDT disable clock gating */
-
-               result = polaris10_enable_didt(hwmgr, false);
-               PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
-               /* TO DO Post DIDT enable clock gating */
-       }
-
-       return 0;
-}
-
-
-static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-       uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
-       hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-       lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-       data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
-       data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
-       return 0;
-}
-
-int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint32_t pm_fuse_table_offset;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU74_Firmware_Header, PmFuseTable),
-                               &pm_fuse_table_offset, data->sram_end))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to get pm_fuse_table_offset Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_svi_load_line(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate SviLoadLine Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_tdc_limit(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
-
-               if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TdcWaterfallCtl, "
-                                       "LPMLTemperature Min and Max Failed!",
-                                       return -EINVAL);
-
-               if (0 != polaris10_populate_temperature_scaler(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate LPMLTemperatureScaler Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_fuzzy_fan(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate Fuzzy Fan Control parameters Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_gnb_lpml(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate GnbLPML Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate GnbLPML Min and Max Vid Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
-                                       "Sidd Failed!", return -EINVAL);
-
-               if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
-                               (uint8_t *)&data->power_tune_table,
-                               (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to download PmFuseTable Failed!",
-                                       return -EINVAL);
-       }
-       return 0;
-}
-
-int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       int result = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_CAC)) {
-               int smc_result;
-               smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                               (uint16_t)(PPSMC_MSG_EnableCac));
-               PP_ASSERT_WITH_CODE((0 == smc_result),
-                               "Failed to enable CAC in SMC.", result = -1);
-
-               data->cac_enabled = (0 == smc_result) ? true : false;
-       }
-       return result;
-}
-
-int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       int result = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_CAC) && data->cac_enabled) {
-               int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                               (uint16_t)(PPSMC_MSG_DisableCac));
-               PP_ASSERT_WITH_CODE((smc_result == 0),
-                               "Failed to disable CAC in SMC.", result = -1);
-
-               data->cac_enabled = false;
-       }
-       return result;
-}
-
-int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (data->power_containment_features &
-                       POWERCONTAINMENT_FEATURE_PkgPwrLimit)
-               return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_PkgPwrSetLimit, n);
-       return 0;
-}
-
-static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
-{
-       return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
-                       PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
-}
-
-int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int smc_result;
-       int result = 0;
-
-       data->power_containment_features = 0;
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-
-               if (data->enable_tdc_limit_feature) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_TDCLimitEnable));
-                       PP_ASSERT_WITH_CODE((0 == smc_result),
-                                       "Failed to enable TDCLimit in SMC.", result = -1;);
-                       if (0 == smc_result)
-                               data->power_containment_features |=
-                                               POWERCONTAINMENT_FEATURE_TDCLimit;
-               }
-
-               if (data->enable_pkg_pwr_tracking_feature) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
-                       PP_ASSERT_WITH_CODE((0 == smc_result),
-                                       "Failed to enable PkgPwrTracking in SMC.", result = -1;);
-                       if (0 == smc_result) {
-                               struct phm_cac_tdp_table *cac_table =
-                                               table_info->cac_dtp_table;
-                               uint32_t default_limit =
-                                       (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
-                               data->power_containment_features |=
-                                               POWERCONTAINMENT_FEATURE_PkgPwrLimit;
-
-                               if (polaris10_set_power_limit(hwmgr, default_limit))
-                                       printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
-                       }
-               }
-       }
-       return result;
-}
-
-int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       int result = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment) &&
-                       data->power_containment_features) {
-               int smc_result;
-
-               if (data->power_containment_features &
-                               POWERCONTAINMENT_FEATURE_TDCLimit) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
-                       PP_ASSERT_WITH_CODE((smc_result == 0),
-                                       "Failed to disable TDCLimit in SMC.",
-                                       result = smc_result);
-               }
-
-               if (data->power_containment_features &
-                               POWERCONTAINMENT_FEATURE_DTE) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_DisableDTE));
-                       PP_ASSERT_WITH_CODE((smc_result == 0),
-                                       "Failed to disable DTE in SMC.",
-                                       result = smc_result);
-               }
-
-               if (data->power_containment_features &
-                               POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
-                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
-                       PP_ASSERT_WITH_CODE((smc_result == 0),
-                                       "Failed to disable PkgPwrTracking in SMC.",
-                                       result = smc_result);
-               }
-               data->power_containment_features = 0;
-       }
-
-       return result;
-}
-
-int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
-{
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-       int adjust_percent, target_tdp;
-       int result = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               /* adjustment percentage has already been validated */
-               adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
-                               hwmgr->platform_descriptor.TDPAdjustment :
-                               (-1 * hwmgr->platform_descriptor.TDPAdjustment);
-               /* SMC requested that target_tdp to be 7 bit fraction in DPM table
-                * but message to be 8 bit fraction for messages
-                */
-               target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
-               result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
-       }
-
-       return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
deleted file mode 100644 (file)
index bc78e28..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef POLARIS10_POWERTUNE_H
-#define POLARIS10_POWERTUNE_H
-
-enum polaris10_pt_config_reg_type {
-       POLARIS10_CONFIGREG_MMR = 0,
-       POLARIS10_CONFIGREG_SMC_IND,
-       POLARIS10_CONFIGREG_DIDT_IND,
-       POLARIS10_CONFIGREG_GC_CAC_IND,
-       POLARIS10_CONFIGREG_CACHE,
-       POLARIS10_CONFIGREG_MAX
-};
-
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK    0xfffc0000
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT  0x12
-#define DIDT_TD_CTRL0__UNUSED_0_MASK    0xfffc0000
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT  0x12
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK   0xfffc0000
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
-#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
-#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xc0000000
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001e
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
-
-#define ixGC_CAC_CNTL 0x0000
-#define ixDIDT_SQ_STALL_CTRL 0x0004
-#define  ixDIDT_SQ_TUNING_CTRL 0x0005
-#define ixDIDT_TD_STALL_CTRL 0x0044
-#define ixDIDT_TD_TUNING_CTRL 0x0045
-#define ixDIDT_TCP_STALL_CTRL 0x0064
-#define ixDIDT_TCP_TUNING_CTRL 0x0065
-
-struct polaris10_pt_config_reg {
-       uint32_t                           offset;
-       uint32_t                           mask;
-       uint32_t                           shift;
-       uint32_t                           value;
-       enum polaris10_pt_config_reg_type       type;
-};
-
-struct polaris10_pt_defaults {
-       uint8_t   SviLoadLineEn;
-       uint8_t   SviLoadLineVddC;
-       uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
-       uint8_t   TDC_MAWt;
-       uint8_t   TdcWaterfallCtl;
-       uint8_t   DTEAmbientTempBase;
-
-       uint32_t  DisplayCac;
-       uint32_t  BAPM_TEMP_GRADIENT;
-       uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-       uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-};
-
-void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
-int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
-int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
-int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
-#endif  /* POLARIS10_POWERTUNE_H */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
deleted file mode 100644 (file)
index b206632..0000000
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <asm/div64.h>
-#include "polaris10_thermal.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_smumgr.h"
-#include "polaris10_ppsmc.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
-               struct phm_fan_speed_info *fan_speed_info)
-{
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       fan_speed_info->supports_percent_read = true;
-       fan_speed_info->supports_percent_write = true;
-       fan_speed_info->min_percent = 0;
-       fan_speed_info->max_percent = 100;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
-               hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
-               fan_speed_info->supports_rpm_read = true;
-               fan_speed_info->supports_rpm_write = true;
-               fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
-               fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
-       } else {
-               fan_speed_info->min_rpm = 0;
-               fan_speed_info->max_rpm = 0;
-       }
-
-       return 0;
-}
-
-int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
-               uint32_t *speed)
-{
-       uint32_t duty100;
-       uint32_t duty;
-       uint64_t tmp64;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL1, FMAX_DUTY100);
-       duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
-       if (duty100 == 0)
-               return -EINVAL;
-
-
-       tmp64 = (uint64_t)duty * 100;
-       do_div(tmp64, duty100);
-       *speed = (uint32_t)tmp64;
-
-       if (*speed > 100)
-               *speed = 100;
-
-       return 0;
-}
-
-int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
-       uint32_t tach_period;
-       uint32_t crystal_clock_freq;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan ||
-                       (hwmgr->thermal_controller.fanInfo.
-                               ucTachometerPulsesPerRevolution == 0))
-               return 0;
-
-       tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_TACH_STATUS, TACH_PERIOD);
-
-       if (tach_period == 0)
-               return -EINVAL;
-
-       crystal_clock_freq = tonga_get_xclk(hwmgr);
-
-       *speed = 60 * crystal_clock_freq * 10000 / tach_period;
-
-       return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param    hwmgr  the address of the powerplay hardware manager.
-*           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
-       if (hwmgr->fan_ctrl_is_in_default_mode) {
-               hwmgr->fan_ctrl_default_mode =
-                               PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,     CGS_IND_REG__SMC,
-                                               CG_FDO_CTRL2, FDO_PWM_MODE);
-               hwmgr->tmin =
-                               PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                                               CG_FDO_CTRL2, TMIN);
-               hwmgr->fan_ctrl_is_in_default_mode = false;
-       }
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL2, TMIN, 0);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
-       return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
-       if (!hwmgr->fan_ctrl_is_in_default_mode) {
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_FDO_CTRL2, TMIN, hwmgr->tmin);
-               hwmgr->fan_ctrl_is_in_default_mode = true;
-       }
-
-       return 0;
-}
-
-int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
-               result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_FanSpeedInTableIsRPM))
-                       hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
-                                       hwmgr->thermal_controller.
-                                       advanceFanControlParameters.usMaxFanRPM);
-               else
-                       hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
-                                       hwmgr->thermal_controller.
-                                       advanceFanControlParameters.usMaxFanPWM);
-
-       } else {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
-               result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
-       }
-
-       if (!result && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ucTargetTemperature)
-               result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_SetFanTemperatureTarget,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucTargetTemperature);
-
-       return result;
-}
-
-
-int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
-}
-
-/**
-* Set Fan Speed in percent.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
-               uint32_t speed)
-{
-       uint32_t duty100;
-       uint32_t duty;
-       uint64_t tmp64;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       if (speed > 100)
-               speed = 100;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl))
-               polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (duty100 == 0)
-               return -EINVAL;
-
-       tmp64 = (uint64_t)speed * duty100;
-       do_div(tmp64, 100);
-       duty = (uint32_t)tmp64;
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
-       return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl)) {
-               result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-               if (!result)
-                       result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
-       } else
-               result = polaris10_fan_ctrl_set_default_mode(hwmgr);
-
-       return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
-       uint32_t tach_period;
-       uint32_t crystal_clock_freq;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan ||
-                       (hwmgr->thermal_controller.fanInfo.
-                       ucTachometerPulsesPerRevolution == 0) ||
-                       (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
-                       (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
-               return 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl))
-               polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-
-       crystal_clock_freq = tonga_get_xclk(hwmgr);
-
-       tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_TACH_STATUS, TACH_PERIOD, tach_period);
-
-       return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
-       int temp;
-
-       temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
-       /* Bit 9 means the reading is lower than the lowest usable value. */
-       if (temp & 0x200)
-               temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
-       else
-               temp = temp & 0x1ff;
-
-       temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-       return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param    hwmgr The address of the hardware manager.
-* @param    range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
-               uint32_t low_temp, uint32_t high_temp)
-{
-       uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
-                       PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
-                       PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-       if (low < low_temp)
-               low = low_temp;
-       if (high > high_temp)
-               high = high_temp;
-
-       if (low > high)
-               return -EINVAL;
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, DIG_THERM_INTH,
-                       (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, DIG_THERM_INTL,
-                       (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_CTRL, DIG_THERM_DPM,
-                       (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
-       return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-       if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_TACH_CTRL, EDGE_PER_REV,
-                               hwmgr->thermal_controller.fanInfo.
-                               ucTachometerPulsesPerRevolution - 1);
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
-       return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
-       uint32_t alert;
-
-       alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, THERM_INT_MASK);
-       alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-       /* send message to SMU to enable internal thermal interrupts */
-       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param    hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
-       uint32_t alert;
-
-       alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, THERM_INT_MASK);
-       alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-       /* send message to SMU to disable internal thermal interrupts */
-       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param    hwmgr The address of the hardware manager.
-*/
-int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-       int result = polaris10_thermal_disable_alert(hwmgr);
-
-       if (!hwmgr->thermal_controller.fanInfo.bNoFan)
-               polaris10_fan_ctrl_set_default_mode(hwmgr);
-
-       return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-       uint32_t duty100;
-       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-       uint16_t fdo_min, slope1, slope2;
-       uint32_t reference_clock;
-       int res;
-       uint64_t tmp64;
-
-       if (data->fan_table_start == 0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (duty100 == 0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
-                       usPWMMin * duty100;
-       do_div(tmp64, 10000);
-       fdo_min = (uint16_t)tmp64;
-
-       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-       fan_table.TempMin = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMin) / 100);
-       fan_table.TempMed = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMed) / 100);
-       fan_table.TempMax = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-       fan_table.Slope1 = cpu_to_be16(slope1);
-       fan_table.Slope2 = cpu_to_be16(slope2);
-
-       fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-       fan_table.HystDown = cpu_to_be16(hwmgr->
-                       thermal_controller.advanceFanControlParameters.ucTHyst);
-
-       fan_table.HystUp = cpu_to_be16(1);
-
-       fan_table.HystSlope = cpu_to_be16(1);
-
-       fan_table.TempRespLim = cpu_to_be16(5);
-
-       reference_clock = tonga_get_xclk(hwmgr);
-
-       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
-                       thermal_controller.advanceFanControlParameters.ulCycleDelay *
-                       reference_clock) / 1600);
-
-       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
-                       hwmgr->device, CGS_IND_REG__SMC,
-                       CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-       res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
-                       (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
-                       data->sram_end);
-
-       if (!res && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ucMinimumPWMLimit)
-               res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_SetFanMinPwm,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucMinimumPWMLimit);
-
-       if (!res && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
-               res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                               PPSMC_MSG_SetFanSclkTarget,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
-       if (res)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-
-       return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled
- * PHM_PlatformCaps_MicrocodeFanControl even after
- * this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl)) {
-               polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
-               polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-       }
-
-       return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
-       if (range == NULL)
-               return -EINVAL;
-
-       return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from initialize thermal controller routine
-*/
-int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-    return polaris10_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from enable alert routine
-*/
-int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       return polaris10_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from disable alert routine
-*/
-static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       return polaris10_thermal_disable_alert(hwmgr);
-}
-
-static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
-               void *input, void *output, void *storage, int result)
-{
-       int ret;
-       struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
-               return 0;
-
-       ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
-
-       ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
-                       0 : -1;
-
-       if (!ret)
-               /* If this param is not changed, this function could fire unnecessarily */
-               smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
-       return ret;
-}
-
-static const struct phm_master_table_item
-polaris10_thermal_start_thermal_controller_master_list[] = {
-       {NULL, tf_polaris10_thermal_initialize},
-       {NULL, tf_polaris10_thermal_set_temperature_range},
-       {NULL, tf_polaris10_thermal_enable_alert},
-       {NULL, tf_polaris10_thermal_avfs_enable},
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this
- * so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
-       {NULL, tf_polaris10_thermal_setup_fan_table},
-       {NULL, tf_polaris10_thermal_start_smc_fan_control},
-       {NULL, NULL}
-};
-
-static const struct phm_master_table_header
-polaris10_thermal_start_thermal_controller_master = {
-       0,
-       PHM_MasterTableFlag_None,
-       polaris10_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item
-polaris10_thermal_set_temperature_range_master_list[] = {
-       {NULL, tf_polaris10_thermal_disable_alert},
-       {NULL, tf_polaris10_thermal_set_temperature_range},
-       {NULL, tf_polaris10_thermal_enable_alert},
-       {NULL, NULL}
-};
-
-static const struct phm_master_table_header
-polaris10_thermal_set_temperature_range_master = {
-       0,
-       PHM_MasterTableFlag_None,
-       polaris10_thermal_set_temperature_range_master_list
-};
-
-int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-       if (!hwmgr->thermal_controller.fanInfo.bNoFan)
-               polaris10_fan_ctrl_set_default_mode(hwmgr);
-       return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param    hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       result = phm_construct_table(hwmgr,
-                       &polaris10_thermal_set_temperature_range_master,
-                       &(hwmgr->set_temperature_range));
-
-       if (!result) {
-               result = phm_construct_table(hwmgr,
-                               &polaris10_thermal_start_thermal_controller_master,
-                               &(hwmgr->start_thermal_controller));
-               if (result)
-                       phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
-       }
-
-       if (!result)
-               hwmgr->fan_ctrl_is_in_default_mode = true;
-       return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
deleted file mode 100644 (file)
index 62f8cbc..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _POLARIS10_THERMAL_H_
-#define _POLARIS10_THERMAL_H_
-
-#include "hwmgr.h"
-
-#define POLARIS10_THERMAL_HIGH_ALERT_MASK         0x1
-#define POLARIS10_THERMAL_LOW_ALERT_MASK          0x2
-
-#define POLARIS10_THERMAL_MINIMUM_TEMP_READING    -256
-#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING    255
-
-#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP      0
-#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP      255
-
-#define FDO_PWM_MODE_STATIC  1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
index 26f3e30d0fefde6a77628b1429ea4f952cb900ba..1126bd4f74dcc61d6e48c452ec281723b2fe81ec 100644 (file)
@@ -22,7 +22,6 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
 
 #include "ppatomctrl.h"
 #include "atombios.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
new file mode 100644 (file)
index 0000000..1e870f5
--- /dev/null
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef TONGA_PPTABLE_H
+#define TONGA_PPTABLE_H
+
+/** \file
+ * This is a PowerPlay table header file
+ */
+#pragma pack(push, 1)
+
+#include "hwmgr.h"
+
+#define ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_TONGA_PP_FANPARAMETERS_NOFAN                                 0x80    /* No fan is connected to this controller. */
+
+#define ATOM_TONGA_PP_THERMALCONTROLLER_NONE      0
+#define ATOM_TONGA_PP_THERMALCONTROLLER_LM96163   17
+#define ATOM_TONGA_PP_THERMALCONTROLLER_TONGA     21
+#define ATOM_TONGA_PP_THERMALCONTROLLER_FIJI      22
+
+/*
+ * Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+ * We probably should reserve the bit 0x80 for this use.
+ * To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+ * The driver can pick the correct internal controller based on the ASIC.
+ */
+
+#define ATOM_TONGA_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    /* ADT7473 Fan Control + Internal Thermal Controller */
+#define ATOM_TONGA_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    /* EMC2103 Fan Control + Internal Thermal Controller */
+
+/*/* ATOM_TONGA_POWERPLAYTABLE::ulPlatformCaps */
+#define ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL              0x1            /* This cap indicates whether vddgfx will be a separated power rail. */
+#define ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY                   0x2            /* This cap indicates whether this is a mobile part and CCC need to show Powerplay page. */
+#define ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE            0x4            /* This cap indicates whether power source notificaiton is done by SBIOS directly. */
+#define ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND      0x8            /* Enable the option to overwrite voltage island feature to be disabled, regardless of VddGfx power rail support. */
+#define ____RETIRE16____                                0x10
+#define ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC                 0x20            /* This cap indicates whether power source notificaiton is done by GPIO directly. */
+#define ____RETIRE64____                                0x40
+#define ____RETIRE128____                               0x80
+#define ____RETIRE256____                              0x100
+#define ____RETIRE512____                              0x200
+#define ____RETIRE1024____                             0x400
+#define ____RETIRE2048____                             0x800
+#define ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL             0x1000            /* This cap indicates dynamic MVDD is required. Uncheck to disable it. */
+#define ____RETIRE2000____                            0x2000
+#define ____RETIRE4000____                            0x4000
+#define ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL            0x8000            /* This cap indicates dynamic VDDCI is required. Uncheck to disable it. */
+#define ____RETIRE10000____                          0x10000
+#define ATOM_TONGA_PP_PLATFORM_CAP_BACO                    0x20000            /* Enable to indicate the driver supports BACO state. */
+
+#define ATOM_TONGA_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17         0x100000     /* Enable to indicate the driver supports thermal2GPIO17. */
+#define ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL  0x1000000     /* Enable to indicate if thermal and PCC are sharing the same GPIO */
+#define ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE       0x2000000
+
+/* ATOM_PPLIB_NONCLOCK_INFO::usClassification */
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK               0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT              0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE               0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY            1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED           3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE        5
+/* 2, 4, 6, 7 are reserved */
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT                  0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL               0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE    0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST                  0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED                0x0080
+#define ATOM_PPLIB_CLASSIFICATION_ACPI                  0x1000
+
+/* ATOM_PPLIB_NONCLOCK_INFO::usClassification2 */
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+
+#define ATOM_Tonga_DISALLOW_ON_DC                       0x00004000
+#define ATOM_Tonga_ENABLE_VARIBRIGHT                    0x00008000
+
+#define ATOM_Tonga_TABLE_REVISION_TONGA                 7
+
+typedef struct _ATOM_Tonga_POWERPLAYTABLE {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+
+       UCHAR  ucTableRevision;
+       USHORT usTableSize;                                             /*the size of header structure */
+
+       ULONG   ulGoldenPPID;
+       ULONG   ulGoldenRevision;
+       USHORT  usFormatID;
+
+       USHORT  usVoltageTime;                                   /*in microseconds */
+       ULONG   ulPlatformCaps;                                   /*See ATOM_Tonga_CAPS_* */
+
+       ULONG   ulMaxODEngineClock;                        /*For Overdrive.  */
+       ULONG   ulMaxODMemoryClock;                        /*For Overdrive. */
+
+       USHORT  usPowerControlLimit;
+       USHORT  usUlvVoltageOffset;                               /*in mv units */
+
+       USHORT  usStateArrayOffset;                               /*points to ATOM_Tonga_State_Array */
+       USHORT  usFanTableOffset;                                 /*points to ATOM_Tonga_Fan_Table */
+       USHORT  usThermalControllerOffset;                 /*points to ATOM_Tonga_Thermal_Controller */
+       USHORT  usReserv;                                                  /*CustomThermalPolicy removed for Tonga. Keep this filed as reserved. */
+
+       USHORT  usMclkDependencyTableOffset;       /*points to ATOM_Tonga_MCLK_Dependency_Table */
+       USHORT  usSclkDependencyTableOffset;       /*points to ATOM_Tonga_SCLK_Dependency_Table */
+       USHORT  usVddcLookupTableOffset;                   /*points to ATOM_Tonga_Voltage_Lookup_Table */
+       USHORT  usVddgfxLookupTableOffset;              /*points to ATOM_Tonga_Voltage_Lookup_Table */
+
+       USHORT  usMMDependencyTableOffset;                /*points to ATOM_Tonga_MM_Dependency_Table */
+
+       USHORT  usVCEStateTableOffset;                     /*points to ATOM_Tonga_VCE_State_Table; */
+
+       USHORT  usPPMTableOffset;                                 /*points to ATOM_Tonga_PPM_Table */
+       USHORT  usPowerTuneTableOffset;                   /*points to ATOM_PowerTune_Table */
+
+       USHORT  usHardLimitTableOffset;                    /*points to ATOM_Tonga_Hard_Limit_Table */
+
+       USHORT  usPCIETableOffset;                                /*points to ATOM_Tonga_PCIE_Table */
+
+       USHORT  usGPIOTableOffset;                                /*points to ATOM_Tonga_GPIO_Table */
+
+       USHORT  usReserved[6];                                     /*TODO: modify reserved size to fit structure aligning */
+} ATOM_Tonga_POWERPLAYTABLE;
+
+typedef struct _ATOM_Tonga_State {
+       UCHAR  ucEngineClockIndexHigh;
+       UCHAR  ucEngineClockIndexLow;
+
+       UCHAR  ucMemoryClockIndexHigh;
+       UCHAR  ucMemoryClockIndexLow;
+
+       UCHAR  ucPCIEGenLow;
+       UCHAR  ucPCIEGenHigh;
+
+       UCHAR  ucPCIELaneLow;
+       UCHAR  ucPCIELaneHigh;
+
+       USHORT usClassification;
+       ULONG ulCapsAndSettings;
+       USHORT usClassification2;
+       UCHAR  ucUnused[4];
+} ATOM_Tonga_State;
+
+typedef struct _ATOM_Tonga_State_Array {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;             /* Number of entries. */
+       ATOM_Tonga_State entries[1];    /* Dynamically allocate entries. */
+} ATOM_Tonga_State_Array;
+
+typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+       UCHAR  ucVddcInd;       /* Vddc voltage */
+       USHORT usVddci;
+       USHORT usVddgfxOffset;  /* Offset relative to Vddc voltage */
+       USHORT usMvdd;
+       ULONG ulMclk;
+       USHORT usReserved;
+} ATOM_Tonga_MCLK_Dependency_Record;
+
+typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_MCLK_Dependency_Record entries[1];                           /* Dynamically allocate entries. */
+} ATOM_Tonga_MCLK_Dependency_Table;
+
+typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+       UCHAR  ucVddInd;                                                                                        /* Base voltage */
+       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
+       ULONG ulSclk;
+       USHORT usEdcCurrent;
+       UCHAR  ucReliabilityTemperature;
+       UCHAR  ucCKSVOffsetandDisable;                                                    /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+} ATOM_Tonga_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_SCLK_Dependency_Record entries[1];                            /* Dynamically allocate entries. */
+} ATOM_Tonga_SCLK_Dependency_Table;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+       UCHAR  ucVddInd;                                                                                        /* Base voltage */
+       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
+       ULONG ulSclk;
+       USHORT usEdcCurrent;
+       UCHAR  ucReliabilityTemperature;
+       UCHAR  ucCKSVOffsetandDisable;                  /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+       ULONG  ulSclkOffset;
+} ATOM_Polaris_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                     /* Number of entries. */
+       ATOM_Polaris_SCLK_Dependency_Record entries[1];                          /* Dynamically allocate entries. */
+} ATOM_Polaris_SCLK_Dependency_Table;
+
+typedef struct _ATOM_Tonga_PCIE_Record {
+       UCHAR ucPCIEGenSpeed;
+       UCHAR usPCIELaneWidth;
+       UCHAR ucReserved[2];
+} ATOM_Tonga_PCIE_Record;
+
+typedef struct _ATOM_Tonga_PCIE_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_PCIE_Record entries[1];                                                      /* Dynamically allocate entries. */
+} ATOM_Tonga_PCIE_Table;
+
+typedef struct _ATOM_Polaris10_PCIE_Record {
+       UCHAR ucPCIEGenSpeed;
+       UCHAR usPCIELaneWidth;
+       UCHAR ucReserved[2];
+       ULONG ulPCIE_Sclk;
+} ATOM_Polaris10_PCIE_Record;
+
+typedef struct _ATOM_Polaris10_PCIE_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                         /* Number of entries. */
+       ATOM_Polaris10_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
+} ATOM_Polaris10_PCIE_Table;
+
+
+typedef struct _ATOM_Tonga_MM_Dependency_Record {
+       UCHAR   ucVddcInd;                                                                                       /* VDDC voltage */
+       USHORT  usVddgfxOffset;                                                                   /* Offset relative to VDDC voltage */
+       ULONG  ulDClk;                                                                                          /* UVD D-clock */
+       ULONG  ulVClk;                                                                                          /* UVD V-clock */
+       ULONG  ulEClk;                                                                                          /* VCE clock */
+       ULONG  ulAClk;                                                                                          /* ACP clock */
+       ULONG  ulSAMUClk;                                                                                       /* SAMU clock */
+} ATOM_Tonga_MM_Dependency_Record;
+
+typedef struct _ATOM_Tonga_MM_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_MM_Dependency_Record entries[1];                        /* Dynamically allocate entries. */
+} ATOM_Tonga_MM_Dependency_Table;
+
+typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+       USHORT usVdd;                                                                                      /* Base voltage */
+       USHORT usCACLow;
+       USHORT usCACMid;
+       USHORT usCACHigh;
+} ATOM_Tonga_Voltage_Lookup_Record;
+
+typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_Voltage_Lookup_Record entries[1];                            /* Dynamically allocate entries. */
+} ATOM_Tonga_Voltage_Lookup_Table;
+
+typedef struct _ATOM_Tonga_Fan_Table {
+       UCHAR   ucRevId;                                                 /* Change this if the table format changes or version changes so that the other fields are not the same. */
+       UCHAR   ucTHyst;                                                 /* Temperature hysteresis. Integer. */
+       USHORT  usTMin;                                                  /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
+       USHORT  usTMed;                                                  /* The middle temperature where we change slopes. */
+       USHORT  usTHigh;                                                 /* The high point above TMed for adjusting the second slope. */
+       USHORT  usPWMMin;                                                /* The minimum PWM value in percent (0.01% increments). */
+       USHORT  usPWMMed;                                                /* The PWM value (in percent) at TMed. */
+       USHORT  usPWMHigh;                                               /* The PWM value at THigh. */
+       USHORT  usTMax;                                                  /* The max temperature */
+       UCHAR   ucFanControlMode;                                 /* Legacy or Fuzzy Fan mode */
+       USHORT  usFanPWMMax;                                      /* Maximum allowed fan power in percent */
+       USHORT  usFanOutputSensitivity;           /* Sensitivity of fan reaction to temepature changes */
+       USHORT  usFanRPMMax;                                      /* The default value in RPM */
+       ULONG  ulMinFanSCLKAcousticLimit;          /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
+       UCHAR   ucTargetTemperature;                     /* Advanced fan controller target temperature. */
+       UCHAR   ucMinimumPWMLimit;                        /* The minimum PWM that the advanced fan controller can set.  This should be set to the highest PWM that will run the fan at its lowest RPM. */
+       USHORT  usReserved;
+} ATOM_Tonga_Fan_Table;
+
+typedef struct _ATOM_Fiji_Fan_Table {
+       UCHAR   ucRevId;                                                 /* Change this if the table format changes or version changes so that the other fields are not the same. */
+       UCHAR   ucTHyst;                                                 /* Temperature hysteresis. Integer. */
+       USHORT  usTMin;                                                  /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
+       USHORT  usTMed;                                                  /* The middle temperature where we change slopes. */
+       USHORT  usTHigh;                                                 /* The high point above TMed for adjusting the second slope. */
+       USHORT  usPWMMin;                                                /* The minimum PWM value in percent (0.01% increments). */
+       USHORT  usPWMMed;                                                /* The PWM value (in percent) at TMed. */
+       USHORT  usPWMHigh;                                               /* The PWM value at THigh. */
+       USHORT  usTMax;                                                  /* The max temperature */
+       UCHAR   ucFanControlMode;                                 /* Legacy or Fuzzy Fan mode */
+       USHORT  usFanPWMMax;                                      /* Maximum allowed fan power in percent */
+       USHORT  usFanOutputSensitivity;           /* Sensitivity of fan reaction to temepature changes */
+       USHORT  usFanRPMMax;                                      /* The default value in RPM */
+       ULONG  ulMinFanSCLKAcousticLimit;               /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
+       UCHAR   ucTargetTemperature;                     /* Advanced fan controller target temperature. */
+       UCHAR   ucMinimumPWMLimit;                        /* The minimum PWM that the advanced fan controller can set.  This should be set to the highest PWM that will run the fan at its lowest RPM. */
+       USHORT  usFanGainEdge;
+       USHORT  usFanGainHotspot;
+       USHORT  usFanGainLiquid;
+       USHORT  usFanGainVrVddc;
+       USHORT  usFanGainVrMvdd;
+       USHORT  usFanGainPlx;
+       USHORT  usFanGainHbm;
+       USHORT  usReserved;
+} ATOM_Fiji_Fan_Table;
+
+typedef struct _ATOM_Tonga_Thermal_Controller {
+       UCHAR ucRevId;
+       UCHAR ucType;              /* one of ATOM_TONGA_PP_THERMALCONTROLLER_* */
+       UCHAR ucI2cLine;                /* as interpreted by DAL I2C */
+       UCHAR ucI2cAddress;
+       UCHAR ucFanParameters;  /* Fan Control Parameters. */
+       UCHAR ucFanMinRPM;       /* Fan Minimum RPM (hundreds) -- for display purposes only. */
+       UCHAR ucFanMaxRPM;       /* Fan Maximum RPM (hundreds) -- for display purposes only. */
+       UCHAR ucReserved;
+       UCHAR ucFlags;             /* to be defined */
+} ATOM_Tonga_Thermal_Controller;
+
+typedef struct _ATOM_Tonga_VCE_State_Record {
+       UCHAR  ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Tonga_MM_Dependency_Table' type */
+       UCHAR  ucFlag;          /* 2 bits indicates memory p-states */
+       UCHAR  ucSCLKIndex;             /*index into ATOM_Tonga_SCLK_Dependency_Table */
+       UCHAR  ucMCLKIndex;             /*index into ATOM_Tonga_MCLK_Dependency_Table */
+} ATOM_Tonga_VCE_State_Record;
+
+typedef struct _ATOM_Tonga_VCE_State_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;
+       ATOM_Tonga_VCE_State_Record entries[1];
+} ATOM_Tonga_VCE_State_Table;
+
+typedef struct _ATOM_Tonga_PowerTune_Table {
+       UCHAR  ucRevId;
+       USHORT usTDP;
+       USHORT usConfigurableTDP;
+       USHORT usTDC;
+       USHORT usBatteryPowerLimit;
+       USHORT usSmallPowerLimit;
+       USHORT usLowCACLeakage;
+       USHORT usHighCACLeakage;
+       USHORT usMaximumPowerDeliveryLimit;
+       USHORT usTjMax;
+       USHORT usPowerTuneDataSetID;
+       USHORT usEDCLimit;
+       USHORT usSoftwareShutdownTemp;
+       USHORT usClockStretchAmount;
+       USHORT usReserve[2];
+} ATOM_Tonga_PowerTune_Table;
+
+typedef struct _ATOM_Fiji_PowerTune_Table {
+       UCHAR  ucRevId;
+       USHORT usTDP;
+       USHORT usConfigurableTDP;
+       USHORT usTDC;
+       USHORT usBatteryPowerLimit;
+       USHORT usSmallPowerLimit;
+       USHORT usLowCACLeakage;
+       USHORT usHighCACLeakage;
+       USHORT usMaximumPowerDeliveryLimit;
+       USHORT usTjMax;  /* For Fiji, this is also usTemperatureLimitEdge; */
+       USHORT usPowerTuneDataSetID;
+       USHORT usEDCLimit;
+       USHORT usSoftwareShutdownTemp;
+       USHORT usClockStretchAmount;
+       USHORT usTemperatureLimitHotspot;  /*The following are added for Fiji */
+       USHORT usTemperatureLimitLiquid1;
+       USHORT usTemperatureLimitLiquid2;
+       USHORT usTemperatureLimitVrVddc;
+       USHORT usTemperatureLimitVrMvdd;
+       USHORT usTemperatureLimitPlx;
+       UCHAR  ucLiquid1_I2C_address;  /*Liquid */
+       UCHAR  ucLiquid2_I2C_address;
+       UCHAR  ucLiquid_I2C_Line;
+       UCHAR  ucVr_I2C_address;        /*VR */
+       UCHAR  ucVr_I2C_Line;
+       UCHAR  ucPlx_I2C_address;  /*PLX */
+       UCHAR  ucPlx_I2C_Line;
+       USHORT usReserved;
+} ATOM_Fiji_PowerTune_Table;
+
+#define ATOM_PPM_A_A    1
+#define ATOM_PPM_A_I    2
+typedef struct _ATOM_Tonga_PPM_Table {
+       UCHAR   ucRevId;
+       UCHAR   ucPpmDesign;              /*A+I or A+A */
+       USHORT  usCpuCoreNumber;
+       ULONG  ulPlatformTDP;
+       ULONG  ulSmallACPlatformTDP;
+       ULONG  ulPlatformTDC;
+       ULONG  ulSmallACPlatformTDC;
+       ULONG  ulApuTDP;
+       ULONG  ulDGpuTDP;
+       ULONG  ulDGpuUlvPower;
+       ULONG  ulTjmax;
+} ATOM_Tonga_PPM_Table;
+
+typedef struct _ATOM_Tonga_Hard_Limit_Record {
+       ULONG  ulSCLKLimit;
+       ULONG  ulMCLKLimit;
+       USHORT  usVddcLimit;
+       USHORT  usVddciLimit;
+       USHORT  usVddgfxLimit;
+} ATOM_Tonga_Hard_Limit_Record;
+
+typedef struct _ATOM_Tonga_Hard_Limit_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;
+       ATOM_Tonga_Hard_Limit_Record entries[1];
+} ATOM_Tonga_Hard_Limit_Table;
+
+typedef struct _ATOM_Tonga_GPIO_Table {
+       UCHAR  ucRevId;
+       UCHAR  ucVRHotTriggeredSclkDpmIndex;            /* If VRHot signal is triggered SCLK will be limited to this DPM level */
+       UCHAR  ucReserve[5];
+} ATOM_Tonga_GPIO_Table;
+
+typedef struct _PPTable_Generic_SubTable_Header {
+       UCHAR  ucRevId;
+} PPTable_Generic_SubTable_Header;
+
+
+#pragma pack(pop)
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
new file mode 100644 (file)
index 0000000..7de701d
--- /dev/null
@@ -0,0 +1,1325 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "process_pptables_v1_0.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pp_debug.h"
+#include "hwmgr.h"
+#include "cgs_common.h"
+#include "pptable_v1_0.h"
+
+/**
+ * Private Function used during initialization.
+ * @param hwmgr Pointer to the hardware manager.
+ * @param setIt A flag indication if the capability should be set (TRUE) or reset (FALSE).
+ * @param cap Which capability to set/reset.
+ */
+static void set_hw_cap(struct pp_hwmgr *hwmgr, bool setIt, enum phm_platform_caps cap)
+{
+       if (setIt)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
+       else
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
+}
+
+
+/**
+ * Private Function used during initialization.
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_caps the bit array (from BIOS) of capability bits.
+ * @exception the current implementation always returns 1.
+ */
+static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
+{
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE16____),
+               "ATOM_PP_PLATFORM_CAP_ASPM_L1 is not supported!", continue);
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE64____),
+               "ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY is not supported!", continue);
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE512____),
+               "ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL is not supported!", continue);
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE1024____),
+               "ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 is not supported!", continue);
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE2048____),
+               "ATOM_PP_PLATFORM_CAP_HTLINKCONTROL is not supported!", continue);
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY),
+                       PHM_PlatformCaps_PowerPlaySupport
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
+                       PHM_PlatformCaps_BiosPowerSourceControl
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC),
+                       PHM_PlatformCaps_AutomaticDCTransition
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL),
+                       PHM_PlatformCaps_EnableMVDDControl
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL),
+                       PHM_PlatformCaps_ControlVDDCI
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL),
+                       PHM_PlatformCaps_ControlVDDGFX
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_BACO),
+                       PHM_PlatformCaps_BACO
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND),
+                       PHM_PlatformCaps_DisableVoltageIsland
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL),
+                       PHM_PlatformCaps_CombinePCCWithThermalSignal
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE),
+                       PHM_PlatformCaps_LoadPostProductionFirmware
+                 );
+
+       return 0;
+}
+
+/**
+ * Private Function to get the PowerPlay Table Address.
+ */
+const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+{
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+
+       u16 size;
+       u8 frev, crev;
+       void *table_address = (void *)hwmgr->soft_pp_table;
+
+       if (!table_address) {
+               table_address = (ATOM_Tonga_POWERPLAYTABLE *)
+                               cgs_atom_get_data_table(hwmgr->device,
+                                               index, &size, &frev, &crev);
+               hwmgr->soft_pp_table = table_address;   /*Cache the result in RAM.*/
+               hwmgr->soft_pp_table_size = size;
+       }
+
+       return table_address;
+}
+
+static int get_vddc_lookup_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_voltage_lookup_table **lookup_table,
+               const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
+               uint32_t max_levels
+               )
+{
+       uint32_t table_size, i;
+       phm_ppt_v1_voltage_lookup_table *table;
+       phm_ppt_v1_voltage_lookup_record *record;
+       ATOM_Tonga_Voltage_Lookup_Record *atom_record;
+
+       PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
+               "Invalid CAC Leakage PowerPlay Table!", return 1);
+
+       table_size = sizeof(uint32_t) +
+               sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
+
+       table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == table)
+               return -ENOMEM;
+
+       memset(table, 0x00, table_size);
+
+       table->count = vddc_lookup_pp_tables->ucNumEntries;
+
+       for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
+               record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       phm_ppt_v1_voltage_lookup_record,
+                                       entries, table, i);
+               atom_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_Voltage_Lookup_Record,
+                                       entries, vddc_lookup_pp_tables, i);
+               record->us_calculated = 0;
+               record->us_vdd = atom_record->usVdd;
+               record->us_cac_low = atom_record->usCACLow;
+               record->us_cac_mid = atom_record->usCACMid;
+               record->us_cac_high = atom_record->usCACHigh;
+       }
+
+       *lookup_table = table;
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Initialize Platform Power Management Parameter table
+ * @param hwmgr Pointer to the hardware manager.
+ * @param atom_ppm_table Pointer to PPM table in VBIOS
+ */
+static int get_platform_power_management_table(
+               struct pp_hwmgr *hwmgr,
+               ATOM_Tonga_PPM_Table *atom_ppm_table)
+{
+       struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
+       struct phm_ppt_v1_information *pp_table_information =
+               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (NULL == ptr)
+               return -ENOMEM;
+
+       ptr->ppm_design
+               = atom_ppm_table->ucPpmDesign;
+       ptr->cpu_core_number
+               = atom_ppm_table->usCpuCoreNumber;
+       ptr->platform_tdp
+               = atom_ppm_table->ulPlatformTDP;
+       ptr->small_ac_platform_tdp
+               = atom_ppm_table->ulSmallACPlatformTDP;
+       ptr->platform_tdc
+               = atom_ppm_table->ulPlatformTDC;
+       ptr->small_ac_platform_tdc
+               = atom_ppm_table->ulSmallACPlatformTDC;
+       ptr->apu_tdp
+               = atom_ppm_table->ulApuTDP;
+       ptr->dgpu_tdp
+               = atom_ppm_table->ulDGpuTDP;
+       ptr->dgpu_ulv_power
+               = atom_ppm_table->ulDGpuUlvPower;
+       ptr->tj_max
+               = atom_ppm_table->ulTjmax;
+
+       pp_table_information->ppm_parameter_table = ptr;
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Initialize TDP limits for DPM2
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_table Pointer to the PowerPlay Table.
+ */
+static int init_dpm_2_parameters(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
+               )
+{
+       int result = 0;
+       struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       ATOM_Tonga_PPM_Table *atom_ppm_table;
+       uint32_t disable_ppm = 0;
+       uint32_t disable_power_control = 0;
+
+       pp_table_information->us_ulv_voltage_offset =
+               le16_to_cpu(powerplay_table->usUlvVoltageOffset);
+
+       pp_table_information->ppm_parameter_table = NULL;
+       pp_table_information->vddc_lookup_table = NULL;
+       pp_table_information->vddgfx_lookup_table = NULL;
+       /* TDP limits */
+       hwmgr->platform_descriptor.TDPODLimit =
+               le16_to_cpu(powerplay_table->usPowerControlLimit);
+       hwmgr->platform_descriptor.TDPAdjustment = 0;
+       hwmgr->platform_descriptor.VidAdjustment = 0;
+       hwmgr->platform_descriptor.VidAdjustmentPolarity = 0;
+       hwmgr->platform_descriptor.VidMinLimit = 0;
+       hwmgr->platform_descriptor.VidMaxLimit = 1500000;
+       hwmgr->platform_descriptor.VidStep = 6250;
+
+       disable_power_control = 0;
+       if (0 == disable_power_control) {
+               /* enable TDP overdrive (PowerControl) feature as well if supported */
+               if (hwmgr->platform_descriptor.TDPODLimit != 0)
+                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerControl);
+       }
+
+       if (0 != powerplay_table->usVddcLookupTableOffset) {
+               const ATOM_Tonga_Voltage_Lookup_Table *pVddcCACTable =
+                       (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
+                       le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
+
+               result = get_vddc_lookup_table(hwmgr,
+                       &pp_table_information->vddc_lookup_table, pVddcCACTable, 16);
+       }
+
+       if (0 != powerplay_table->usVddgfxLookupTableOffset) {
+               const ATOM_Tonga_Voltage_Lookup_Table *pVddgfxCACTable =
+                       (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
+                       le16_to_cpu(powerplay_table->usVddgfxLookupTableOffset));
+
+               result = get_vddc_lookup_table(hwmgr,
+                       &pp_table_information->vddgfx_lookup_table, pVddgfxCACTable, 16);
+       }
+
+       disable_ppm = 0;
+       if (0 == disable_ppm) {
+               atom_ppm_table = (ATOM_Tonga_PPM_Table *)
+                       (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
+
+               if (0 != powerplay_table->usPPMTableOffset) {
+                       if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
+                               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_EnablePlatformPowerManagement);
+                       }
+               }
+       }
+
+       return result;
+}
+
+static int get_valid_clk(
+               struct pp_hwmgr *hwmgr,
+               struct phm_clock_array **clk_table,
+               phm_ppt_v1_clock_voltage_dependency_table const *clk_volt_pp_table
+               )
+{
+       uint32_t table_size, i;
+       struct phm_clock_array *table;
+       phm_ppt_v1_clock_voltage_dependency_record *dep_record;
+
+       PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
+               "Invalid PowerPlay Table!", return -1);
+
+       table_size = sizeof(uint32_t) +
+               sizeof(uint32_t) * clk_volt_pp_table->count;
+
+       table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == table)
+               return -ENOMEM;
+
+       memset(table, 0x00, table_size);
+
+       table->count = (uint32_t)clk_volt_pp_table->count;
+
+       for (i = 0; i < table->count; i++) {
+               dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                               phm_ppt_v1_clock_voltage_dependency_record,
+                               entries, clk_volt_pp_table, i);
+               table->values[i] = (uint32_t)dep_record->clk;
+       }
+       *clk_table = table;
+
+       return 0;
+}
+
+static int get_hard_limits(
+               struct pp_hwmgr *hwmgr,
+               struct phm_clock_and_voltage_limits *limits,
+               ATOM_Tonga_Hard_Limit_Table const *limitable
+               )
+{
+       PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
+
+       /* currently we always take entries[0] parameters */
+       limits->sclk = (uint32_t)limitable->entries[0].ulSCLKLimit;
+       limits->mclk = (uint32_t)limitable->entries[0].ulMCLKLimit;
+       limits->vddc = (uint16_t)limitable->entries[0].usVddcLimit;
+       limits->vddci = (uint16_t)limitable->entries[0].usVddciLimit;
+       limits->vddgfx = (uint16_t)limitable->entries[0].usVddgfxLimit;
+
+       return 0;
+}
+
+static int get_mclk_voltage_dependency_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
+               ATOM_Tonga_MCLK_Dependency_Table const *mclk_dep_table
+               )
+{
+       uint32_t table_size, i;
+       phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
+       phm_ppt_v1_clock_voltage_dependency_record *mclk_table_record;
+       ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
+
+       PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
+               "Invalid PowerPlay Table!", return -1);
+
+       table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+               * mclk_dep_table->ucNumEntries;
+
+       mclk_table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == mclk_table)
+               return -ENOMEM;
+
+       memset(mclk_table, 0x00, table_size);
+
+       mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
+
+       for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
+               mclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       phm_ppt_v1_clock_voltage_dependency_record,
+                                               entries, mclk_table, i);
+               mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_MCLK_Dependency_Record,
+                                               entries, mclk_dep_table, i);
+               mclk_table_record->vddInd = mclk_dep_record->ucVddcInd;
+               mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset;
+               mclk_table_record->vddci = mclk_dep_record->usVddci;
+               mclk_table_record->mvdd = mclk_dep_record->usMvdd;
+               mclk_table_record->clk = mclk_dep_record->ulMclk;
+       }
+
+       *pp_tonga_mclk_dep_table = mclk_table;
+
+       return 0;
+}
+
+static int get_sclk_voltage_dependency_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
+               PPTable_Generic_SubTable_Header const  *sclk_dep_table
+               )
+{
+       uint32_t table_size, i;
+       phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
+       phm_ppt_v1_clock_voltage_dependency_record *sclk_table_record;
+
+       if (sclk_dep_table->ucRevId < 1) {
+               const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
+                           (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
+               ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
+
+               PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * tonga_table->ucNumEntries;
+
+               sclk_table = kzalloc(table_size, GFP_KERNEL);
+
+               if (NULL == sclk_table)
+                       return -ENOMEM;
+
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
+
+               for (i = 0; i < tonga_table->ucNumEntries; i++) {
+                       sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Tonga_SCLK_Dependency_Record,
+                                               entries, tonga_table, i);
+                       sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               phm_ppt_v1_clock_voltage_dependency_record,
+                                               entries, sclk_table, i);
+                       sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+                       sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+                       sclk_table_record->clk = sclk_dep_record->ulSclk;
+                       sclk_table_record->cks_enable =
+                               (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+               }
+       } else {
+               const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
+                           (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+               ATOM_Polaris_SCLK_Dependency_Record *sclk_dep_record;
+
+               PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * polaris_table->ucNumEntries;
+
+               sclk_table = kzalloc(table_size, GFP_KERNEL);
+
+               if (NULL == sclk_table)
+                       return -ENOMEM;
+
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
+
+               for (i = 0; i < polaris_table->ucNumEntries; i++) {
+                       sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Polaris_SCLK_Dependency_Record,
+                                               entries, polaris_table, i);
+                       sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               phm_ppt_v1_clock_voltage_dependency_record,
+                                               entries, sclk_table, i);
+                       sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+                       sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+                       sclk_table_record->clk = sclk_dep_record->ulSclk;
+                       sclk_table_record->cks_enable =
+                               (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+                       sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset;
+               }
+       }
+       *pp_tonga_sclk_dep_table = sclk_table;
+
+       return 0;
+}
+
+static int get_pcie_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
+               PPTable_Generic_SubTable_Header const *ptable
+               )
+{
+       uint32_t table_size, i, pcie_count;
+       phm_ppt_v1_pcie_table *pcie_table;
+       struct phm_ppt_v1_information *pp_table_information =
+               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_pcie_record *pcie_record;
+
+       if (ptable->ucRevId < 1) {
+               const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)ptable;
+               ATOM_Tonga_PCIE_Record *atom_pcie_record;
+
+               PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) +
+                       sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+
+               pcie_table = kzalloc(table_size, GFP_KERNEL);
+
+               if (pcie_table == NULL)
+                       return -ENOMEM;
+
+               memset(pcie_table, 0x00, table_size);
+
+               /*
+               * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+               * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+               */
+               pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+               if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+                       pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+               else
+                       printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+                       Disregarding the excess entries... \n");
+
+               pcie_table->count = pcie_count;
+               for (i = 0; i < pcie_count; i++) {
+                       pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               phm_ppt_v1_pcie_record,
+                                               entries, pcie_table, i);
+                       atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Tonga_PCIE_Record,
+                                               entries, atom_pcie_table, i);
+                       pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+                       pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+               }
+
+               *pp_tonga_pcie_table = pcie_table;
+       } else {
+               /* Polaris10/Polaris11 and newer. */
+               const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)ptable;
+               ATOM_Polaris10_PCIE_Record *atom_pcie_record;
+
+               PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) +
+                       sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+
+               pcie_table = kzalloc(table_size, GFP_KERNEL);
+
+               if (pcie_table == NULL)
+                       return -ENOMEM;
+
+               memset(pcie_table, 0x00, table_size);
+
+               /*
+               * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+               * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+               */
+               pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+               if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+                       pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+               else
+                       printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+                       Disregarding the excess entries... \n");
+
+               pcie_table->count = pcie_count;
+
+               for (i = 0; i < pcie_count; i++) {
+                       pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               phm_ppt_v1_pcie_record,
+                                               entries, pcie_table, i);
+                       atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Polaris10_PCIE_Record,
+                                               entries, atom_pcie_table, i);
+                       pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+                       pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+                       pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk;
+               }
+
+               *pp_tonga_pcie_table = pcie_table;
+       }
+
+       return 0;
+}
+
+static int get_cac_tdp_table(
+               struct pp_hwmgr *hwmgr,
+               struct phm_cac_tdp_table **cac_tdp_table,
+               const PPTable_Generic_SubTable_Header * table
+               )
+{
+       uint32_t table_size;
+       struct phm_cac_tdp_table *tdp_table;
+
+       table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
+       tdp_table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == tdp_table)
+               return -ENOMEM;
+
+       memset(tdp_table, 0x00, table_size);
+
+       hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == hwmgr->dyn_state.cac_dtp_table) {
+               kfree(tdp_table);
+               return -ENOMEM;
+       }
+
+       memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
+
+       if (table->ucRevId < 3) {
+               const ATOM_Tonga_PowerTune_Table *tonga_table =
+                       (ATOM_Tonga_PowerTune_Table *)table;
+               tdp_table->usTDP = tonga_table->usTDP;
+               tdp_table->usConfigurableTDP =
+                       tonga_table->usConfigurableTDP;
+               tdp_table->usTDC = tonga_table->usTDC;
+               tdp_table->usBatteryPowerLimit =
+                       tonga_table->usBatteryPowerLimit;
+               tdp_table->usSmallPowerLimit =
+                       tonga_table->usSmallPowerLimit;
+               tdp_table->usLowCACLeakage =
+                       tonga_table->usLowCACLeakage;
+               tdp_table->usHighCACLeakage =
+                       tonga_table->usHighCACLeakage;
+               tdp_table->usMaximumPowerDeliveryLimit =
+                       tonga_table->usMaximumPowerDeliveryLimit;
+               tdp_table->usDefaultTargetOperatingTemp =
+                       tonga_table->usTjMax;
+               tdp_table->usTargetOperatingTemp =
+                       tonga_table->usTjMax; /*Set the initial temp to the same as default */
+               tdp_table->usPowerTuneDataSetID =
+                       tonga_table->usPowerTuneDataSetID;
+               tdp_table->usSoftwareShutdownTemp =
+                       tonga_table->usSoftwareShutdownTemp;
+               tdp_table->usClockStretchAmount =
+                       tonga_table->usClockStretchAmount;
+       } else {   /* Fiji and newer */
+               const ATOM_Fiji_PowerTune_Table *fijitable =
+                       (ATOM_Fiji_PowerTune_Table *)table;
+               tdp_table->usTDP = fijitable->usTDP;
+               tdp_table->usConfigurableTDP = fijitable->usConfigurableTDP;
+               tdp_table->usTDC = fijitable->usTDC;
+               tdp_table->usBatteryPowerLimit = fijitable->usBatteryPowerLimit;
+               tdp_table->usSmallPowerLimit = fijitable->usSmallPowerLimit;
+               tdp_table->usLowCACLeakage = fijitable->usLowCACLeakage;
+               tdp_table->usHighCACLeakage = fijitable->usHighCACLeakage;
+               tdp_table->usMaximumPowerDeliveryLimit =
+                       fijitable->usMaximumPowerDeliveryLimit;
+               tdp_table->usDefaultTargetOperatingTemp =
+                       fijitable->usTjMax;
+               tdp_table->usTargetOperatingTemp =
+                       fijitable->usTjMax; /*Set the initial temp to the same as default */
+               tdp_table->usPowerTuneDataSetID =
+                       fijitable->usPowerTuneDataSetID;
+               tdp_table->usSoftwareShutdownTemp =
+                       fijitable->usSoftwareShutdownTemp;
+               tdp_table->usClockStretchAmount =
+                       fijitable->usClockStretchAmount;
+               tdp_table->usTemperatureLimitHotspot =
+                       fijitable->usTemperatureLimitHotspot;
+               tdp_table->usTemperatureLimitLiquid1 =
+                       fijitable->usTemperatureLimitLiquid1;
+               tdp_table->usTemperatureLimitLiquid2 =
+                       fijitable->usTemperatureLimitLiquid2;
+               tdp_table->usTemperatureLimitVrVddc =
+                       fijitable->usTemperatureLimitVrVddc;
+               tdp_table->usTemperatureLimitVrMvdd =
+                       fijitable->usTemperatureLimitVrMvdd;
+               tdp_table->usTemperatureLimitPlx =
+                       fijitable->usTemperatureLimitPlx;
+               tdp_table->ucLiquid1_I2C_address =
+                       fijitable->ucLiquid1_I2C_address;
+               tdp_table->ucLiquid2_I2C_address =
+                       fijitable->ucLiquid2_I2C_address;
+               tdp_table->ucLiquid_I2C_Line =
+                       fijitable->ucLiquid_I2C_Line;
+               tdp_table->ucVr_I2C_address = fijitable->ucVr_I2C_address;
+               tdp_table->ucVr_I2C_Line = fijitable->ucVr_I2C_Line;
+               tdp_table->ucPlx_I2C_address = fijitable->ucPlx_I2C_address;
+               tdp_table->ucPlx_I2C_Line = fijitable->ucPlx_I2C_Line;
+       }
+
+       *cac_tdp_table = tdp_table;
+
+       return 0;
+}
+
+static int get_mm_clock_voltage_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_mm_clock_voltage_dependency_table **tonga_mm_table,
+               const ATOM_Tonga_MM_Dependency_Table * mm_dependency_table
+               )
+{
+       uint32_t table_size, i;
+       const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
+       phm_ppt_v1_mm_clock_voltage_dependency_record *mm_table_record;
+
+       PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
+               "Invalid PowerPlay Table!", return -1);
+       table_size = sizeof(uint32_t) +
+               sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
+               * mm_dependency_table->ucNumEntries;
+       mm_table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == mm_table)
+               return -ENOMEM;
+
+       memset(mm_table, 0x00, table_size);
+
+       mm_table->count = mm_dependency_table->ucNumEntries;
+
+       for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
+               mm_dependency_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Tonga_MM_Dependency_Record,
+                                               entries, mm_dependency_table, i);
+               mm_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       phm_ppt_v1_mm_clock_voltage_dependency_record,
+                                       entries, mm_table, i);
+               mm_table_record->vddcInd = mm_dependency_record->ucVddcInd;
+               mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset;
+               mm_table_record->aclk = mm_dependency_record->ulAClk;
+               mm_table_record->samclock = mm_dependency_record->ulSAMUClk;
+               mm_table_record->eclk = mm_dependency_record->ulEClk;
+               mm_table_record->vclk = mm_dependency_record->ulVClk;
+               mm_table_record->dclk = mm_dependency_record->ulDClk;
+       }
+
+       *tonga_mm_table = mm_table;
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Initialize clock voltage dependency
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_table Pointer to the PowerPlay Table.
+ */
+static int init_clock_voltage_dependency(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
+               )
+{
+       int result = 0;
+       struct phm_ppt_v1_information *pp_table_information =
+               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       const ATOM_Tonga_MM_Dependency_Table *mm_dependency_table =
+               (const ATOM_Tonga_MM_Dependency_Table *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usMMDependencyTableOffset));
+       const PPTable_Generic_SubTable_Header *pPowerTuneTable =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usPowerTuneTableOffset));
+       const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
+               (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
+       const PPTable_Generic_SubTable_Header *sclk_dep_table =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+       const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
+               (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usHardLimitTableOffset));
+       const PPTable_Generic_SubTable_Header *pcie_table =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usPCIETableOffset));
+
+       pp_table_information->vdd_dep_on_sclk = NULL;
+       pp_table_information->vdd_dep_on_mclk = NULL;
+       pp_table_information->mm_dep_table = NULL;
+       pp_table_information->pcie_table = NULL;
+
+       if (powerplay_table->usMMDependencyTableOffset != 0)
+               result = get_mm_clock_voltage_table(hwmgr,
+               &pp_table_information->mm_dep_table, mm_dependency_table);
+
+       if (result == 0 && powerplay_table->usPowerTuneTableOffset != 0)
+               result = get_cac_tdp_table(hwmgr,
+               &pp_table_information->cac_dtp_table, pPowerTuneTable);
+
+       if (result == 0 && powerplay_table->usSclkDependencyTableOffset != 0)
+               result = get_sclk_voltage_dependency_table(hwmgr,
+               &pp_table_information->vdd_dep_on_sclk, sclk_dep_table);
+
+       if (result == 0 && powerplay_table->usMclkDependencyTableOffset != 0)
+               result = get_mclk_voltage_dependency_table(hwmgr,
+               &pp_table_information->vdd_dep_on_mclk, mclk_dep_table);
+
+       if (result == 0 && powerplay_table->usPCIETableOffset != 0)
+               result = get_pcie_table(hwmgr,
+               &pp_table_information->pcie_table, pcie_table);
+
+       if (result == 0 && powerplay_table->usHardLimitTableOffset != 0)
+               result = get_hard_limits(hwmgr,
+               &pp_table_information->max_clock_voltage_on_dc, pHardLimits);
+
+       hwmgr->dyn_state.max_clock_voltage_on_dc.sclk =
+               pp_table_information->max_clock_voltage_on_dc.sclk;
+       hwmgr->dyn_state.max_clock_voltage_on_dc.mclk =
+               pp_table_information->max_clock_voltage_on_dc.mclk;
+       hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
+               pp_table_information->max_clock_voltage_on_dc.vddc;
+       hwmgr->dyn_state.max_clock_voltage_on_dc.vddci =
+               pp_table_information->max_clock_voltage_on_dc.vddci;
+
+       if (result == 0 && (NULL != pp_table_information->vdd_dep_on_mclk)
+               && (0 != pp_table_information->vdd_dep_on_mclk->count))
+               result = get_valid_clk(hwmgr, &pp_table_information->valid_mclk_values,
+               pp_table_information->vdd_dep_on_mclk);
+
+       if (result == 0 && (NULL != pp_table_information->vdd_dep_on_sclk)
+               && (0 != pp_table_information->vdd_dep_on_sclk->count))
+               result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
+               pp_table_information->vdd_dep_on_sclk);
+
+       return result;
+}
+
+/** Retrieves the (signed) Overdrive limits from VBIOS.
+ * The max engine clock, memory clock and max temperature come from the firmware info table.
+ *
+ * The information is placed into the platform descriptor.
+ *
+ * @param hwmgr source of the VBIOS table and owner of the platform descriptor to be updated.
+ * @param powerplay_table the address of the PowerPlay table.
+ *
+ * @return 1 as long as the firmware info table was present and of a supported version.
+ */
+static int init_over_drive_limits(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
+{
+       hwmgr->platform_descriptor.overdriveLimit.engineClock =
+               le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+       hwmgr->platform_descriptor.overdriveLimit.memoryClock =
+               le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+
+       hwmgr->platform_descriptor.minOverdriveVDDC = 0;
+       hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
+       hwmgr->platform_descriptor.overdriveVDDCStep = 0;
+
+       if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 \
+               && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) {
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ACOverdriveSupport);
+       }
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Inspect the PowerPlay table for obvious signs of corruption.
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_table Pointer to the PowerPlay Table.
+ * @exception This implementation always returns 1.
+ */
+static int init_thermal_controller(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
+               )
+{
+       const PPTable_Generic_SubTable_Header *fan_table;
+       ATOM_Tonga_Thermal_Controller *thermal_controller;
+
+       thermal_controller = (ATOM_Tonga_Thermal_Controller *)
+               (((unsigned long)powerplay_table) +
+               le16_to_cpu(powerplay_table->usThermalControllerOffset));
+       PP_ASSERT_WITH_CODE((0 != powerplay_table->usThermalControllerOffset),
+               "Thermal controller table not set!", return -1);
+
+       hwmgr->thermal_controller.ucType = thermal_controller->ucType;
+       hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
+       hwmgr->thermal_controller.ucI2cAddress = thermal_controller->ucI2cAddress;
+
+       hwmgr->thermal_controller.fanInfo.bNoFan =
+               (0 != (thermal_controller->ucFanParameters & ATOM_TONGA_PP_FANPARAMETERS_NOFAN));
+
+       hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+               thermal_controller->ucFanParameters &
+               ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+
+       hwmgr->thermal_controller.fanInfo.ulMinRPM
+               = thermal_controller->ucFanMinRPM * 100UL;
+       hwmgr->thermal_controller.fanInfo.ulMaxRPM
+               = thermal_controller->ucFanMaxRPM * 100UL;
+
+       set_hw_cap(
+                       hwmgr,
+                       ATOM_TONGA_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
+                       PHM_PlatformCaps_ThermalController
+                 );
+
+       if (0 == powerplay_table->usFanTableOffset)
+               return 0;
+
+       fan_table = (const PPTable_Generic_SubTable_Header *)
+               (((unsigned long)powerplay_table) +
+               le16_to_cpu(powerplay_table->usFanTableOffset));
+
+       PP_ASSERT_WITH_CODE((0 != powerplay_table->usFanTableOffset),
+               "Fan table not set!", return -1);
+       PP_ASSERT_WITH_CODE((0 < fan_table->ucRevId),
+               "Unsupported fan table format!", return -1);
+
+       hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
+               = 100000;
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_MicrocodeFanControl);
+
+       if (fan_table->ucRevId < 8) {
+               const ATOM_Tonga_Fan_Table *tonga_fan_table =
+                       (ATOM_Tonga_Fan_Table *)fan_table;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
+                       = tonga_fan_table->ucTHyst;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMin
+                       = tonga_fan_table->usTMin;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMed
+                       = tonga_fan_table->usTMed;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
+                       = tonga_fan_table->usTHigh;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
+                       = tonga_fan_table->usPWMMin;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
+                       = tonga_fan_table->usPWMMed;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
+                       = tonga_fan_table->usPWMHigh;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+                       = 10900;                  /* hard coded */
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+                       = tonga_fan_table->usTMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
+                       = tonga_fan_table->ucFanControlMode;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
+                       = tonga_fan_table->usFanPWMMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
+                       = 4836;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
+                       = tonga_fan_table->usFanOutputSensitivity;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
+                       = tonga_fan_table->usFanRPMMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
+                       = (tonga_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places.  SMC wants MHz. */
+               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
+                       = tonga_fan_table->ucTargetTemperature;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
+                       = tonga_fan_table->ucMinimumPWMLimit;
+       } else {
+               const ATOM_Fiji_Fan_Table *fiji_fan_table =
+                       (ATOM_Fiji_Fan_Table *)fan_table;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
+                       = fiji_fan_table->ucTHyst;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMin
+                       = fiji_fan_table->usTMin;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMed
+                       = fiji_fan_table->usTMed;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
+                       = fiji_fan_table->usTHigh;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
+                       = fiji_fan_table->usPWMMin;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
+                       = fiji_fan_table->usPWMMed;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
+                       = fiji_fan_table->usPWMHigh;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+                       = fiji_fan_table->usTMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
+                       = fiji_fan_table->ucFanControlMode;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
+                       = fiji_fan_table->usFanPWMMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
+                       = 4836;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
+                       = fiji_fan_table->usFanOutputSensitivity;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
+                       = fiji_fan_table->usFanRPMMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
+                       = (fiji_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places.  SMC wants MHz. */
+               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
+                       = fiji_fan_table->ucTargetTemperature;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
+                       = fiji_fan_table->ucMinimumPWMLimit;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge
+                       = fiji_fan_table->usFanGainEdge;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot
+                       = fiji_fan_table->usFanGainHotspot;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid
+                       = fiji_fan_table->usFanGainLiquid;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc
+                       = fiji_fan_table->usFanGainVrVddc;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd
+                       = fiji_fan_table->usFanGainVrMvdd;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx
+                       = fiji_fan_table->usFanGainPlx;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm
+                       = fiji_fan_table->usFanGainHbm;
+       }
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Inspect the PowerPlay table for obvious signs of corruption.
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_table Pointer to the PowerPlay Table.
+ * @exception 2 if the powerplay table is incorrect.
+ */
+static int check_powerplay_tables(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
+               )
+{
+       const ATOM_Tonga_State_Array *state_arrays;
+
+       state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)powerplay_table) +
+               le16_to_cpu(powerplay_table->usStateArrayOffset));
+
+       PP_ASSERT_WITH_CODE((ATOM_Tonga_TABLE_REVISION_TONGA <=
+               powerplay_table->sHeader.ucTableFormatRevision),
+               "Unsupported PPTable format!", return -1);
+       PP_ASSERT_WITH_CODE((0 != powerplay_table->usStateArrayOffset),
+               "State table is not set!", return -1);
+       PP_ASSERT_WITH_CODE((0 < powerplay_table->sHeader.usStructureSize),
+               "Invalid PowerPlay Table!", return -1);
+       PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
+               "Invalid PowerPlay Table!", return -1);
+
+       return 0;
+}
+
+int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+{
+       int result = 0;
+       const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
+
+       hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v1_information), GFP_KERNEL);
+
+       PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
+                           "Failed to allocate hwmgr->pptable!", return -ENOMEM);
+
+       memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information));
+
+       powerplay_table = get_powerplay_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((NULL != powerplay_table),
+               "Missing PowerPlay Table!", return -1);
+
+       result = check_powerplay_tables(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "check_powerplay_tables failed", return result);
+
+       result = set_platform_caps(hwmgr,
+                                  le32_to_cpu(powerplay_table->ulPlatformCaps));
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "set_platform_caps failed", return result);
+
+       result = init_thermal_controller(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "init_thermal_controller failed", return result);
+
+       result = init_over_drive_limits(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "init_over_drive_limits failed", return result);
+
+       result = init_clock_voltage_dependency(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "init_clock_voltage_dependency failed", return result);
+
+       result = init_dpm_2_parameters(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "init_dpm_2_parameters failed", return result);
+
+       return result;
+}
+
+int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+{
+       struct phm_ppt_v1_information *pp_table_information =
+               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       kfree(pp_table_information->vdd_dep_on_sclk);
+       pp_table_information->vdd_dep_on_sclk = NULL;
+
+       kfree(pp_table_information->vdd_dep_on_mclk);
+       pp_table_information->vdd_dep_on_mclk = NULL;
+
+       kfree(pp_table_information->valid_mclk_values);
+       pp_table_information->valid_mclk_values = NULL;
+
+       kfree(pp_table_information->valid_sclk_values);
+       pp_table_information->valid_sclk_values = NULL;
+
+       kfree(pp_table_information->vddc_lookup_table);
+       pp_table_information->vddc_lookup_table = NULL;
+
+       kfree(pp_table_information->vddgfx_lookup_table);
+       pp_table_information->vddgfx_lookup_table = NULL;
+
+       kfree(pp_table_information->mm_dep_table);
+       pp_table_information->mm_dep_table = NULL;
+
+       kfree(pp_table_information->cac_dtp_table);
+       pp_table_information->cac_dtp_table = NULL;
+
+       kfree(hwmgr->dyn_state.cac_dtp_table);
+       hwmgr->dyn_state.cac_dtp_table = NULL;
+
+       kfree(pp_table_information->ppm_parameter_table);
+       pp_table_information->ppm_parameter_table = NULL;
+
+       kfree(pp_table_information->pcie_table);
+       pp_table_information->pcie_table = NULL;
+
+       kfree(hwmgr->pptable);
+       hwmgr->pptable = NULL;
+
+       return 0;
+}
+
+const struct pp_table_func pptable_v1_0_funcs = {
+       .pptable_init = pp_tables_v1_0_initialize,
+       .pptable_fini = pp_tables_v1_0_uninitialize,
+};
+
+int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr)
+{
+       ATOM_Tonga_State_Array const *state_arrays;
+       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((NULL != pp_table),
+                       "Missing PowerPlay Table!", return -1);
+       PP_ASSERT_WITH_CODE((pp_table->sHeader.ucTableFormatRevision >=
+                       ATOM_Tonga_TABLE_REVISION_TONGA),
+                       "Incorrect PowerPlay table revision!", return -1);
+
+       state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
+                       le16_to_cpu(pp_table->usStateArrayOffset));
+
+       return (uint32_t)(state_arrays->ucNumEntries);
+}
+
+/**
+* Private function to convert flags stored in the BIOS to software flags in PowerPlay.
+*/
+static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
+               uint16_t classification, uint16_t classification2)
+{
+       uint32_t result = 0;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT)
+               result |= PP_StateClassificationFlag_Boot;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+               result |= PP_StateClassificationFlag_Thermal;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
+               result |= PP_StateClassificationFlag_LimitedPowerSource;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_REST)
+               result |= PP_StateClassificationFlag_Rest;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED)
+               result |= PP_StateClassificationFlag_Forced;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI)
+               result |= PP_StateClassificationFlag_ACPI;
+
+       if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
+               result |= PP_StateClassificationFlag_LimitedPowerSource_2;
+
+       return result;
+}
+
+static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
+{
+       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+       const ATOM_Tonga_VCE_State_Table *vce_state_table =
+                               (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
+
+       if (vce_state_table == NULL)
+               return 0;
+
+       return vce_state_table->ucNumEntries;
+}
+
+static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
+               struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+{
+       const ATOM_Tonga_VCE_State_Record *vce_state_record;
+       ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
+       ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
+       ATOM_Tonga_MM_Dependency_Record *mm_dep_record;
+       const ATOM_Tonga_POWERPLAYTABLE *pptable = get_powerplay_table(hwmgr);
+       const ATOM_Tonga_VCE_State_Table *vce_state_table = (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pptable)
+                                                         + le16_to_cpu(pptable->usVCEStateTableOffset));
+       const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = (ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long)pptable)
+                                                         + le16_to_cpu(pptable->usSclkDependencyTableOffset));
+       const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long)pptable)
+                                                         + le16_to_cpu(pptable->usMclkDependencyTableOffset));
+       const ATOM_Tonga_MM_Dependency_Table *mm_dep_table = (ATOM_Tonga_MM_Dependency_Table *)(((unsigned long)pptable)
+                                                         + le16_to_cpu(pptable->usMMDependencyTableOffset));
+
+       PP_ASSERT_WITH_CODE((i < vce_state_table->ucNumEntries),
+                        "Requested state entry ID is out of range!",
+                        return -EINVAL);
+
+       vce_state_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_VCE_State_Record,
+                                       entries, vce_state_table, i);
+       sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_SCLK_Dependency_Record,
+                                       entries, sclk_dep_table,
+                                       vce_state_record->ucSCLKIndex);
+       mm_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_MM_Dependency_Record,
+                                       entries, mm_dep_table,
+                                       vce_state_record->ucVCEClockIndex);
+       *flag = vce_state_record->ucFlag;
+
+       vce_state->evclk = mm_dep_record->ulEClk;
+       vce_state->ecclk = mm_dep_record->ulEClk;
+       vce_state->sclk = sclk_dep_record->ulSclk;
+
+       if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries)
+               mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_MCLK_Dependency_Record,
+                                       entries, mclk_dep_table,
+                                       mclk_dep_table->ucNumEntries - 1);
+       else
+               mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_MCLK_Dependency_Record,
+                                       entries, mclk_dep_table,
+                                       vce_state_record->ucMCLKIndex);
+
+       vce_state->mclk = mclk_dep_record->ulMclk;
+       return 0;
+}
+
+/**
+* Create a Power State out of an entry in the PowerPlay table.
+* This function is called by the hardware back-end.
+* @param hwmgr Pointer to the hardware manager.
+* @param entry_index The index of the entry to be extracted from the table.
+* @param power_state The address of the PowerState instance being created.
+* @return -1 if the entry cannot be retrieved.
+*/
+int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
+               uint32_t entry_index, struct pp_power_state *power_state,
+               int (*call_back_func)(struct pp_hwmgr *, void *,
+                               struct pp_power_state *, void *, uint32_t))
+{
+       int result = 0;
+       const ATOM_Tonga_State_Array *state_arrays;
+       const ATOM_Tonga_State *state_entry;
+       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+       int i, j;
+       uint32_t flags = 0;
+
+       PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
+       power_state->classification.bios_index = entry_index;
+
+       if (pp_table->sHeader.ucTableFormatRevision >=
+                       ATOM_Tonga_TABLE_REVISION_TONGA) {
+               state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
+                               le16_to_cpu(pp_table->usStateArrayOffset));
+
+               PP_ASSERT_WITH_CODE((0 < pp_table->usStateArrayOffset),
+                               "Invalid PowerPlay Table State Array Offset.", return -1);
+               PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
+                               "Invalid PowerPlay Table State Array.", return -1);
+               PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
+                               "Invalid PowerPlay Table State Array Entry.", return -1);
+
+               state_entry = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Tonga_State, entries,
+                                               state_arrays, entry_index);
+
+               result = call_back_func(hwmgr, (void *)state_entry, power_state,
+                               (void *)pp_table,
+                               make_classification_flags(hwmgr,
+                                       le16_to_cpu(state_entry->usClassification),
+                                       le16_to_cpu(state_entry->usClassification2)));
+       }
+
+       if (!result && (power_state->classification.flags &
+                       PP_StateClassificationFlag_Boot))
+               result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
+
+       hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
+
+       if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+               for (j = 0; j < i; j++)
+                       ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
+       }
+
+       return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
new file mode 100644 (file)
index 0000000..b9710ab
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _PROCESSPPTABLES_V1_0_H
+#define _PROCESSPPTABLES_V1_0_H
+
+#include "hwmgr.h"
+
+extern const struct pp_table_func pptable_v1_0_funcs;
+extern int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr);
+extern int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index,
+               struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
+                               struct pp_power_state *, void *, uint32_t));
+
+#endif
+
index 6c321b0d8a1eb8483c45bf563f79adb000a87865..ccf7ebeaf89267a830c1f2a0d3ecb1f22fe693f6 100644 (file)
@@ -1523,7 +1523,7 @@ int get_number_of_vce_state_table_entries(
 
 int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
                                                        unsigned long i,
-                                                       struct PP_VCEState *vce_state,
+                                                       struct pp_vce_state *vce_state,
                                                        void **clock_info,
                                                        unsigned long *flag)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
new file mode 100644 (file)
index 0000000..6eb6db1
--- /dev/null
@@ -0,0 +1,488 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smu7_hwmgr.h"
+#include "smu7_clockpowergating.h"
+#include "smu7_common.h"
+
+static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+                       PPSMC_MSG_UVDDPM_Enable :
+                       PPSMC_MSG_UVDDPM_Disable);
+}
+
+static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+                       PPSMC_MSG_VCEDPM_Enable :
+                       PPSMC_MSG_VCEDPM_Disable);
+}
+
+static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+                       PPSMC_MSG_SAMUDPM_Enable :
+                       PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+       if (!bgate)
+               smum_update_smc_table(hwmgr, SMU_UVD_TABLE);
+       return smu7_enable_disable_uvd_dpm(hwmgr, !bgate);
+}
+
+static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+       if (!bgate)
+               smum_update_smc_table(hwmgr, SMU_VCE_TABLE);
+       return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
+}
+
+static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+       if (!bgate)
+               smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
+       return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
+}
+
+int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cf_want_uvd_power_gating(hwmgr))
+               return smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_UVDPowerOFF);
+       return 0;
+}
+
+int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cf_want_uvd_power_gating(hwmgr)) {
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                 PHM_PlatformCaps_UVDDynamicPowerGating)) {
+                       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_UVDPowerON, 1);
+               } else {
+                       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_UVDPowerON, 0);
+               }
+       }
+
+       return 0;
+}
+
+int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cf_want_vce_power_gating(hwmgr))
+               return smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_VCEPowerOFF);
+       return 0;
+}
+
+int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cf_want_vce_power_gating(hwmgr))
+               return smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_VCEPowerON);
+       return 0;
+}
+
+int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SamuPowerGating))
+               return smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_SAMPowerOFF);
+       return 0;
+}
+
+int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SamuPowerGating))
+               return smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_SAMPowerON);
+       return 0;
+}
+
+int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       data->uvd_power_gated = false;
+       data->vce_power_gated = false;
+       data->samu_power_gated = false;
+
+       smu7_powerup_uvd(hwmgr);
+       smu7_powerup_vce(hwmgr);
+       smu7_powerup_samu(hwmgr);
+
+       return 0;
+}
+
+int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       data->uvd_power_gated = bgate;
+
+       if (bgate) {
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_UVD,
+                               AMD_CG_STATE_GATE);
+               smu7_update_uvd_dpm(hwmgr, true);
+               smu7_powerdown_uvd(hwmgr);
+       } else {
+               smu7_powerup_uvd(hwmgr);
+               smu7_update_uvd_dpm(hwmgr, false);
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_UVD,
+                               AMD_CG_STATE_UNGATE);
+       }
+
+       return 0;
+}
+
+int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->vce_power_gated == bgate)
+               return 0;
+
+       data->vce_power_gated = bgate;
+
+       if (bgate) {
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_VCE,
+                               AMD_CG_STATE_GATE);
+               smu7_update_vce_dpm(hwmgr, true);
+               smu7_powerdown_vce(hwmgr);
+       } else {
+               smu7_powerup_vce(hwmgr);
+               smu7_update_vce_dpm(hwmgr, false);
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_VCE,
+                               AMD_CG_STATE_UNGATE);
+       }
+       return 0;
+}
+
+int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->samu_power_gated == bgate)
+               return 0;
+
+       data->samu_power_gated = bgate;
+
+       if (bgate) {
+               smu7_update_samu_dpm(hwmgr, true);
+               smu7_powerdown_samu(hwmgr);
+       } else {
+               smu7_powerup_samu(hwmgr);
+               smu7_update_samu_dpm(hwmgr, false);
+       }
+
+       return 0;
+}
+
+int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
+                                       const uint32_t *msg_id)
+{
+       PPSMC_Msg msg;
+       uint32_t value;
+
+       if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU))
+               return 0;
+
+       switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
+       case PP_GROUP_GFX:
+               switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
+               case PP_BLOCK_GFX_CG:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_GFX_CGCG_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       if (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
+                                       ? PPSMC_MSG_EnableClockGatingFeature
+                                       : PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_GFX_CGLS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_GFX_3D:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_GFX_3DCG_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+
+                       if  (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_GFX_3DLS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_GFX_RLC:
+                       if (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_GFX_RLC_LS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_GFX_CP:
+                       if (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_GFX_CP_LS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_GFX_MG:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK |
+                                               CG_GFX_OTHERS_MGCG_MASK);
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+               break;
+
+       case PP_GROUP_SYS:
+               switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
+               case PP_BLOCK_SYS_BIF:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_BIF_MGCG_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       if  (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_BIF_MGLS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_SYS_MC:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_MC_MGCG_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+
+                       if (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_MC_MGLS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_SYS_DRM:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_DRM_MGCG_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       if (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_DRM_MGLS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_SYS_HDP:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_HDP_MGCG_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+
+                       if (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_HDP_MGLS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_SYS_SDMA:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_SDMA_MGCG_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+
+                       if (PP_STATE_SUPPORT_LS & *msg_id) {
+                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_SDMA_MGLS_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               case PP_BLOCK_SYS_ROM:
+                       if (PP_STATE_SUPPORT_CG & *msg_id) {
+                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+                                               PPSMC_MSG_EnableClockGatingFeature :
+                                               PPSMC_MSG_DisableClockGatingFeature;
+                               value = CG_SYS_ROM_MASK;
+
+                               if (smum_send_msg_to_smc_with_parameter(
+                                               hwmgr->smumgr, msg, value))
+                                       return -EINVAL;
+                       }
+                       break;
+
+               default:
+                       return -EINVAL;
+
+               }
+               break;
+
+       default:
+               return -EINVAL;
+
+       }
+
+       return 0;
+}
+
+/* This function is for Polaris11 only for now,
+ * Powerplay will only control the static per CU Power Gating.
+ * Dynamic per CU Power Gating will be done in gfx.
+ */
+int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+{
+       struct cgs_system_info sys_info = {0};
+       uint32_t active_cus;
+       int result;
+
+       sys_info.size = sizeof(struct cgs_system_info);
+       sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
+
+       result = cgs_query_system_info(hwmgr->device, &sys_info);
+
+       if (result)
+               return -EINVAL;
+
+       active_cus = sys_info.value;
+
+       if (enable)
+               return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
+       else
+               return smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_GFX_CU_PG_DISABLE);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
new file mode 100644 (file)
index 0000000..d52a28c
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_CLOCK_POWER_GATING_H_
+#define _SMU7_CLOCK__POWER_GATING_H_
+
+#include "smu7_hwmgr.h"
+#include "pp_asicblocks.h"
+
+int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
+int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
+                                       const uint32_t *msg_id);
+int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
new file mode 100644 (file)
index 0000000..f967613
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_DYN_DEFAULTS_H
+#define _SMU7_DYN_DEFAULTS_H
+
+
+/*  We need to fill in the default values */
+
+
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT0              0x3FFFC102
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT1              0x000400
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT2              0xC00080
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT3              0xC00200
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT4              0xC01680
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT5              0xC00033
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT6              0xC00033
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT7              0x3FFFC000
+
+
+#define SMU7_THERMALPROTECTCOUNTER_DFLT            0x200
+#define SMU7_STATICSCREENTHRESHOLDUNIT_DFLT        0
+#define SMU7_STATICSCREENTHRESHOLD_DFLT            0x00C8
+#define SMU7_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200
+#define SMU7_REFERENCEDIVIDER_DFLT                  4
+
+#define SMU7_ULVVOLTAGECHANGEDELAY_DFLT             1687
+
+#define SMU7_CGULVPARAMETER_DFLT                    0x00040035
+#define SMU7_CGULVCONTROL_DFLT                      0x00007450
+#define SMU7_TARGETACTIVITY_DFLT                     50
+#define SMU7_MCLK_TARGETACTIVITY_DFLT                10
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
new file mode 100644 (file)
index 0000000..a3832f2
--- /dev/null
@@ -0,0 +1,4359 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <asm/div64.h>
+#include "linux/delay.h"
+#include "pp_acpi.h"
+#include "pp_debug.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pptable_v1_0.h"
+#include "pppcielanes.h"
+#include "amd_pcie_helpers.h"
+#include "hardwaremanager.h"
+#include "process_pptables_v1_0.h"
+#include "cgs_common.h"
+
+#include "smu7_common.h"
+
+#include "hwmgr.h"
+#include "smu7_hwmgr.h"
+#include "smu7_powertune.h"
+#include "smu7_dyn_defaults.h"
+#include "smu7_thermal.h"
+#include "smu7_clockpowergating.h"
+#include "processpptables.h"
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0       0x05
+#define MC_CG_SEQ_DRAMCONF_S1       0x06
+#define MC_CG_SEQ_YCLK_SUSPEND      0x04
+#define MC_CG_SEQ_YCLK_RESUME       0x0a
+
+#define SMC_CG_IND_START            0xc0030000
+#define SMC_CG_IND_END              0xc0040000
+
+#define VOLTAGE_SCALE               4
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+
+#define MEM_FREQ_LOW_LATENCY        25000
+#define MEM_FREQ_HIGH_LATENCY       80000
+
+#define MEM_LATENCY_HIGH            45
+#define MEM_LATENCY_LOW             35
+#define MEM_LATENCY_ERR             0xFFFF
+
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+
+/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
+enum DPM_EVENT_SRC {
+       DPM_EVENT_SRC_ANALOG = 0,
+       DPM_EVENT_SRC_EXTERNAL = 1,
+       DPM_EVENT_SRC_DIGITAL = 2,
+       DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+       DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
+};
+
+static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
+
+struct smu7_power_state *cast_phw_smu7_power_state(
+                                 struct pp_hw_power_state *hw_ps)
+{
+       PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
+                               "Invalid Powerstate Type!",
+                                return NULL);
+
+       return (struct smu7_power_state *)hw_ps;
+}
+
+const struct smu7_power_state *cast_const_phw_smu7_power_state(
+                                const struct pp_hw_power_state *hw_ps)
+{
+       PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
+                               "Invalid Powerstate Type!",
+                                return NULL);
+
+       return (const struct smu7_power_state *)hw_ps;
+}
+
+/**
+ * Find the MC microcode version and store it in the HwMgr struct
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+{
+       cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
+
+       hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+       return 0;
+}
+
+uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+       uint32_t speedCntl = 0;
+
+       /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+       speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
+                       ixPCIE_LC_SPEED_CNTL);
+       return((uint16_t)PHM_GET_FIELD(speedCntl,
+                       PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+{
+       uint32_t link_width;
+
+       /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+       link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+                       PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
+
+       PP_ASSERT_WITH_CODE((7 >= link_width),
+                       "Invalid PCIe lane width!", return 0);
+
+       return decode_pcie_lane_width(link_width);
+}
+
+/**
+* Enable voltage control
+*
+* @param    pHwMgr  the address of the powerplay hardware manager.
+* @return   always PP_Result_OK
+*/
+int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+{
+       if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
+               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
+
+       return 0;
+}
+
+/**
+* Checks if we want to support voltage control
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+*/
+static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
+{
+       const struct smu7_hwmgr *data =
+                       (const struct smu7_hwmgr *)(hwmgr->backend);
+
+       return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
+}
+
+/**
+* Enable voltage control
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
+{
+       /* enable voltage control */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
+
+       return 0;
+}
+
+static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
+               struct phm_clock_voltage_dependency_table *voltage_dependency_table
+               )
+{
+       uint32_t i;
+
+       PP_ASSERT_WITH_CODE((NULL != voltage_table),
+                       "Voltage Dependency Table empty.", return -EINVAL;);
+
+       voltage_table->mask_low = 0;
+       voltage_table->phase_delay = 0;
+       voltage_table->count = voltage_dependency_table->count;
+
+       for (i = 0; i < voltage_dependency_table->count; i++) {
+               voltage_table->entries[i].value =
+                       voltage_dependency_table->entries[i].v;
+               voltage_table->entries[i].smio_low = 0;
+       }
+
+       return 0;
+}
+
+
+/**
+* Create Voltage Tables.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       int result = 0;
+       uint32_t tmp;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               result = atomctrl_get_voltage_table_v3(hwmgr,
+                               VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
+                               &(data->mvdd_voltage_table));
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Failed to retrieve MVDD table.",
+                               return result);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+               if (hwmgr->pp_table_version == PP_TABLE_V1)
+                       result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
+                                       table_info->vdd_dep_on_mclk);
+               else if (hwmgr->pp_table_version == PP_TABLE_V0)
+                       result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
+                                       hwmgr->dyn_state.mvdd_dependency_on_mclk);
+
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Failed to retrieve SVI2 MVDD table from dependancy table.",
+                               return result;);
+       }
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               result = atomctrl_get_voltage_table_v3(hwmgr,
+                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
+                               &(data->vddci_voltage_table));
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Failed to retrieve VDDCI table.",
+                               return result);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+               if (hwmgr->pp_table_version == PP_TABLE_V1)
+                       result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
+                                       table_info->vdd_dep_on_mclk);
+               else if (hwmgr->pp_table_version == PP_TABLE_V0)
+                       result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
+                                       hwmgr->dyn_state.vddci_dependency_on_mclk);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Failed to retrieve SVI2 VDDCI table from dependancy table.",
+                               return result);
+       }
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+               /* VDDGFX has only SVI2 voltage control */
+               result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
+                                       table_info->vddgfx_lookup_table);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
+       }
+
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
+               result = atomctrl_get_voltage_table_v3(hwmgr,
+                                       VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
+                                       &data->vddc_voltage_table);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "Failed to retrieve VDDC table.", return result;);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+
+               if (hwmgr->pp_table_version == PP_TABLE_V0)
+                       result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
+                                       hwmgr->dyn_state.vddc_dependency_on_mclk);
+               else if (hwmgr->pp_table_version == PP_TABLE_V1)
+                       result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
+                               table_info->vddc_lookup_table);
+
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+       }
+
+       tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
+       PP_ASSERT_WITH_CODE(
+                       (data->vddc_voltage_table.count <= tmp),
+               "Too many voltage values for VDDC. Trimming to fit state table.",
+                       phm_trim_voltage_table_to_fit_state_table(tmp,
+                                               &(data->vddc_voltage_table)));
+
+       tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+       PP_ASSERT_WITH_CODE(
+                       (data->vddgfx_voltage_table.count <= tmp),
+               "Too many voltage values for VDDC. Trimming to fit state table.",
+                       phm_trim_voltage_table_to_fit_state_table(tmp,
+                                               &(data->vddgfx_voltage_table)));
+
+       tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
+       PP_ASSERT_WITH_CODE(
+                       (data->vddci_voltage_table.count <= tmp),
+               "Too many voltage values for VDDCI. Trimming to fit state table.",
+                       phm_trim_voltage_table_to_fit_state_table(tmp,
+                                       &(data->vddci_voltage_table)));
+
+       tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
+       PP_ASSERT_WITH_CODE(
+                       (data->mvdd_voltage_table.count <= tmp),
+               "Too many voltage values for MVDD. Trimming to fit state table.",
+                       phm_trim_voltage_table_to_fit_state_table(tmp,
+                                               &(data->mvdd_voltage_table)));
+
+       return 0;
+}
+
+/**
+* Programs static screed detection parameters
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int smu7_program_static_screen_threshold_parameters(
+                                                       struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       /* Set static screen threshold unit */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
+                       data->static_screen_threshold_unit);
+       /* Set static screen threshold */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
+                       data->static_screen_threshold);
+
+       return 0;
+}
+
+/**
+* Setup display gap for glitch free memory clock switching.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
+{
+       uint32_t display_gap =
+                       cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       ixCG_DISPLAY_GAP_CNTL);
+
+       display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+                       DISP_GAP, DISPLAY_GAP_IGNORE);
+
+       display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+                       DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+       return 0;
+}
+
+/**
+* Programs activity state transition voting clients
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       /* Clear reset for voting clients before enabling DPM */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
+
+       return 0;
+}
+
+static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+       /* Reset voting clients before disabling DPM */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_0, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_1, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_2, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_3, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_4, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_5, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_6, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_7, 0);
+
+       return 0;
+}
+
+/* Copy one arb setting to another and then switch the active set.
+ * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
+ */
+static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
+               uint32_t arb_src, uint32_t arb_dest)
+{
+       uint32_t mc_arb_dram_timing;
+       uint32_t mc_arb_dram_timing2;
+       uint32_t burst_time;
+       uint32_t mc_cg_config;
+
+       switch (arb_src) {
+       case MC_CG_ARB_FREQ_F0:
+               mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+               mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+               burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+               break;
+       case MC_CG_ARB_FREQ_F1:
+               mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
+               mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
+               burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (arb_dest) {
+       case MC_CG_ARB_FREQ_F0:
+               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+               PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
+               break;
+       case MC_CG_ARB_FREQ_F1:
+               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+               PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
+       mc_cg_config |= 0x0000000F;
+       cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
+       PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
+
+       return 0;
+}
+
+static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
+/**
+* Initial switch from ARB F0->F1
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+* This function is to be called from the SetPowerState table.
+*/
+static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
+{
+       return smu7_copy_and_switch_arb_sets(hwmgr,
+                       MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+       uint32_t tmp;
+
+       tmp = (cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+                       0x0000ff00) >> 8;
+
+       if (tmp == MC_CG_ARB_FREQ_F0)
+               return 0;
+
+       return smu7_copy_and_switch_arb_sets(hwmgr,
+                       tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_pcie_table *pcie_table = NULL;
+
+       uint32_t i, max_entry;
+       uint32_t tmp;
+
+       PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
+                       data->use_pcie_power_saving_levels), "No pcie performance levels!",
+                       return -EINVAL);
+
+       if (table_info != NULL)
+               pcie_table = table_info->pcie_table;
+
+       if (data->use_pcie_performance_levels &&
+                       !data->use_pcie_power_saving_levels) {
+               data->pcie_gen_power_saving = data->pcie_gen_performance;
+               data->pcie_lane_power_saving = data->pcie_lane_performance;
+       } else if (!data->use_pcie_performance_levels &&
+                       data->use_pcie_power_saving_levels) {
+               data->pcie_gen_performance = data->pcie_gen_power_saving;
+               data->pcie_lane_performance = data->pcie_lane_power_saving;
+       }
+       tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
+       phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
+                                       tmp,
+                                       MAX_REGULAR_DPM_NUMBER);
+
+       if (pcie_table != NULL) {
+               /* max_entry is used to make sure we reserve one PCIE level
+                * for boot level (fix for A+A PSPP issue).
+                * If PCIE table from PPTable have ULV entry + 8 entries,
+                * then ignore the last entry.*/
+               max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
+               for (i = 1; i < max_entry; i++) {
+                       phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
+                                       get_pcie_gen_support(data->pcie_gen_cap,
+                                                       pcie_table->entries[i].gen_speed),
+                                       get_pcie_lane_support(data->pcie_lane_cap,
+                                                       pcie_table->entries[i].lane_width));
+               }
+               data->dpm_table.pcie_speed_table.count = max_entry - 1;
+               smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
+       } else {
+               /* Hardcode Pcie Table */
+               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
+                               get_pcie_gen_support(data->pcie_gen_cap,
+                                               PP_Min_PCIEGen),
+                               get_pcie_lane_support(data->pcie_lane_cap,
+                                               PP_Max_PCIELane));
+               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
+                               get_pcie_gen_support(data->pcie_gen_cap,
+                                               PP_Min_PCIEGen),
+                               get_pcie_lane_support(data->pcie_lane_cap,
+                                               PP_Max_PCIELane));
+               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
+                               get_pcie_gen_support(data->pcie_gen_cap,
+                                               PP_Max_PCIEGen),
+                               get_pcie_lane_support(data->pcie_lane_cap,
+                                               PP_Max_PCIELane));
+               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
+                               get_pcie_gen_support(data->pcie_gen_cap,
+                                               PP_Max_PCIEGen),
+                               get_pcie_lane_support(data->pcie_lane_cap,
+                                               PP_Max_PCIELane));
+               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
+                               get_pcie_gen_support(data->pcie_gen_cap,
+                                               PP_Max_PCIEGen),
+                               get_pcie_lane_support(data->pcie_lane_cap,
+                                               PP_Max_PCIELane));
+               phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
+                               get_pcie_gen_support(data->pcie_gen_cap,
+                                               PP_Max_PCIEGen),
+                               get_pcie_lane_support(data->pcie_lane_cap,
+                                               PP_Max_PCIELane));
+
+               data->dpm_table.pcie_speed_table.count = 6;
+       }
+       /* Populate last level for boot PCIE level, but do not increment count. */
+       phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+                       data->dpm_table.pcie_speed_table.count,
+                       get_pcie_gen_support(data->pcie_gen_cap,
+                                       PP_Min_PCIEGen),
+                       get_pcie_lane_support(data->pcie_lane_cap,
+                                       PP_Max_PCIELane));
+
+       return 0;
+}
+
+static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
+
+       phm_reset_single_dpm_table(
+                       &data->dpm_table.sclk_table,
+                               smum_get_mac_definition(hwmgr->smumgr,
+                                       SMU_MAX_LEVELS_GRAPHICS),
+                                       MAX_REGULAR_DPM_NUMBER);
+       phm_reset_single_dpm_table(
+                       &data->dpm_table.mclk_table,
+                       smum_get_mac_definition(hwmgr->smumgr,
+                               SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
+
+       phm_reset_single_dpm_table(
+                       &data->dpm_table.vddc_table,
+                               smum_get_mac_definition(hwmgr->smumgr,
+                                       SMU_MAX_LEVELS_VDDC),
+                                       MAX_REGULAR_DPM_NUMBER);
+       phm_reset_single_dpm_table(
+                       &data->dpm_table.vddci_table,
+                       smum_get_mac_definition(hwmgr->smumgr,
+                               SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
+
+       phm_reset_single_dpm_table(
+                       &data->dpm_table.mvdd_table,
+                               smum_get_mac_definition(hwmgr->smumgr,
+                                       SMU_MAX_LEVELS_MVDD),
+                                       MAX_REGULAR_DPM_NUMBER);
+       return 0;
+}
+/*
+ * This function is to initialize all DPM state tables
+ * for SMU7 based on the dependency table.
+ * Dynamic state patching function will then trim these
+ * state tables to the allowed range based
+ * on the power policy or external client requests,
+ * such as UVD request, etc.
+ */
+
+static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
+               hwmgr->dyn_state.vddc_dependency_on_sclk;
+       struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
+               hwmgr->dyn_state.vddc_dependency_on_mclk;
+       struct phm_cac_leakage_table *std_voltage_table =
+               hwmgr->dyn_state.cac_leakage_table;
+       uint32_t i;
+
+       PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
+               "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
+       PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
+               "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
+
+       PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+               "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
+       PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
+               "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
+
+
+       /* Initialize Sclk DPM table based on allow Sclk values*/
+       data->dpm_table.sclk_table.count = 0;
+
+       for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+               if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
+                               allowed_vdd_sclk_table->entries[i].clk) {
+                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+                               allowed_vdd_sclk_table->entries[i].clk;
+                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+                       data->dpm_table.sclk_table.count++;
+               }
+       }
+
+       PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+               "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
+       /* Initialize Mclk DPM table based on allow Mclk values */
+       data->dpm_table.mclk_table.count = 0;
+       for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+               if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
+                       allowed_vdd_mclk_table->entries[i].clk) {
+                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+                               allowed_vdd_mclk_table->entries[i].clk;
+                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+                       data->dpm_table.mclk_table.count++;
+               }
+       }
+
+       /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
+       for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+               data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+               data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
+               /* param1 is for corresponding std voltage */
+               data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+       }
+
+       data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
+       allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+       if (NULL != allowed_vdd_mclk_table) {
+               /* Initialize Vddci DPM table based on allow Mclk values */
+               for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+                       data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+                       data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
+               }
+               data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
+       }
+
+       allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
+
+       if (NULL != allowed_vdd_mclk_table) {
+               /*
+                * Initialize MVDD DPM table based on allow Mclk
+                * values
+                */
+               for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+                       data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+                       data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+               }
+               data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
+       }
+
+       return 0;
+}
+
+static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i;
+
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+
+       if (table_info == NULL)
+               return -EINVAL;
+
+       dep_sclk_table = table_info->vdd_dep_on_sclk;
+       dep_mclk_table = table_info->vdd_dep_on_mclk;
+
+       PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
+                       "SCLK dependency table is missing.",
+                       return -EINVAL);
+       PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
+                       "SCLK dependency table count is 0.",
+                       return -EINVAL);
+
+       PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
+                       "MCLK dependency table is missing.",
+                       return -EINVAL);
+       PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
+                       "MCLK dependency table count is 0",
+                       return -EINVAL);
+
+       /* Initialize Sclk DPM table based on allow Sclk values */
+       data->dpm_table.sclk_table.count = 0;
+       for (i = 0; i < dep_sclk_table->count; i++) {
+               if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
+                                               dep_sclk_table->entries[i].clk) {
+
+                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+                                       dep_sclk_table->entries[i].clk;
+
+                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
+                                       (i == 0) ? true : false;
+                       data->dpm_table.sclk_table.count++;
+               }
+       }
+
+       /* Initialize Mclk DPM table based on allow Mclk values */
+       data->dpm_table.mclk_table.count = 0;
+       for (i = 0; i < dep_mclk_table->count; i++) {
+               if (i == 0 || data->dpm_table.mclk_table.dpm_levels
+                               [data->dpm_table.mclk_table.count - 1].value !=
+                                               dep_mclk_table->entries[i].clk) {
+                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+                                                       dep_mclk_table->entries[i].clk;
+                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
+                                                       (i == 0) ? true : false;
+                       data->dpm_table.mclk_table.count++;
+               }
+       }
+
+       return 0;
+}
+
+int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       smu7_reset_dpm_tables(hwmgr);
+
+       if (hwmgr->pp_table_version == PP_TABLE_V1)
+               smu7_setup_dpm_tables_v1(hwmgr);
+       else if (hwmgr->pp_table_version == PP_TABLE_V0)
+               smu7_setup_dpm_tables_v0(hwmgr);
+
+       smu7_setup_default_pcie_table(hwmgr);
+
+       /* save a copy of the default DPM table */
+       memcpy(&(data->golden_dpm_table), &(data->dpm_table),
+                       sizeof(struct smu7_dpm_table));
+       return 0;
+}
+
+uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
+{
+       uint32_t reference_clock, tmp;
+       struct cgs_display_info info = {0};
+       struct cgs_mode_info mode_info;
+
+       info.mode_info = &mode_info;
+
+       tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
+
+       if (tmp)
+               return TCLK;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+       reference_clock = mode_info.ref_clock;
+
+       tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
+
+       if (0 != tmp)
+               return reference_clock / 4;
+
+       return reference_clock;
+}
+
+static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
+{
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_RegulatorHot))
+               return smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_EnableVRHotGPIOInterrupt);
+
+       return 0;
+}
+
+static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
+{
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+                       SCLK_PWRMGT_OFF, 0);
+       return 0;
+}
+
+static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->ulv_supported)
+               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+
+       return 0;
+}
+
+static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->ulv_supported)
+               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+       return 0;
+}
+
+static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep)) {
+               if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to enable Master Deep Sleep switch failed!",
+                                       return -EINVAL);
+       } else {
+               if (smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to disable Master Deep Sleep switch failed!",
+                                       return -EINVAL);
+               }
+       }
+
+       return 0;
+}
+
+static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep)) {
+               if (smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to disable Master Deep Sleep switch failed!",
+                                       return -EINVAL);
+               }
+       }
+
+       return 0;
+}
+
+static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t soft_register_value = 0;
+       uint32_t handshake_disables_offset = data->soft_regs_start
+                               + smum_get_offsetof(hwmgr->smumgr,
+                                       SMU_SoftRegisters, HandshakeDisables);
+
+       soft_register_value = cgs_read_ind_register(hwmgr->device,
+                               CGS_IND_REG__SMC, handshake_disables_offset);
+       soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
+                                       SMU_UVD_MCLK_HANDSHAKE_DISABLE);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       handshake_disables_offset, soft_register_value);
+       return 0;
+}
+
+static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       /* enable SCLK dpm */
+       if (!data->sclk_dpm_key_disabled)
+               PP_ASSERT_WITH_CODE(
+               (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+               "Failed to enable SCLK DPM during DPM Start Function!",
+               return -EINVAL);
+
+       /* enable MCLK dpm */
+       if (0 == data->mclk_dpm_key_disabled) {
+               if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
+                       smu7_disable_handshake_uvd(hwmgr);
+               PP_ASSERT_WITH_CODE(
+                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_MCLKDPM_Enable)),
+                               "Failed to enable MCLK DPM during DPM Start Function!",
+                               return -EINVAL);
+
+               PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+               udelay(10);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+       }
+
+       return 0;
+}
+
+static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       /*enable general power management */
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                       GLOBAL_PWRMGT_EN, 1);
+
+       /* enable sclk deep sleep */
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+                       DYNAMIC_PM_EN, 1);
+
+       /* prepare for PCIE DPM */
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       data->soft_regs_start +
+                       smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
+                                               VoltageChangeTimeout), 0x1000);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+                       SWRST_COMMAND_1, RESETLC, 0x0);
+
+       PP_ASSERT_WITH_CODE(
+                       (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+                                       PPSMC_MSG_Voltage_Cntl_Enable)),
+                       "Failed to enable voltage DPM during DPM Start Function!",
+                       return -EINVAL);
+
+
+       if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
+               printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
+               return -EINVAL;
+       }
+
+       /* enable PCIE dpm */
+       if (0 == data->pcie_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_PCIeDPM_Enable)),
+                               "Failed to enable pcie DPM during DPM Start Function!",
+                               return -EINVAL);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_Falcon_QuickTransition)) {
+               PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_EnableACDCGPIOInterrupt)),
+                               "Failed to enable AC DC GPIO Interrupt!",
+                               );
+       }
+
+       return 0;
+}
+
+static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       /* disable SCLK dpm */
+       if (!data->sclk_dpm_key_disabled)
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_DPM_Disable) == 0),
+                               "Failed to disable SCLK DPM!",
+                               return -EINVAL);
+
+       /* disable MCLK dpm */
+       if (!data->mclk_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_MCLKDPM_Disable) == 0),
+                               "Failed to disable MCLK DPM!",
+                               return -EINVAL);
+       }
+
+       return 0;
+}
+
+static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       /* disable general power management */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                       GLOBAL_PWRMGT_EN, 0);
+       /* disable sclk deep sleep */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+                       DYNAMIC_PM_EN, 0);
+
+       /* disable PCIE dpm */
+       if (!data->pcie_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
+                               "Failed to disable pcie DPM during DPM Stop Function!",
+                               return -EINVAL);
+       }
+
+       if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
+               printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
+{
+       bool protection;
+       enum DPM_EVENT_SRC src;
+
+       switch (sources) {
+       default:
+               printk(KERN_ERR "Unknown throttling event sources.");
+               /* fall through */
+       case 0:
+               protection = false;
+               /* src is unused */
+               break;
+       case (1 << PHM_AutoThrottleSource_Thermal):
+               protection = true;
+               src = DPM_EVENT_SRC_DIGITAL;
+               break;
+       case (1 << PHM_AutoThrottleSource_External):
+               protection = true;
+               src = DPM_EVENT_SRC_EXTERNAL;
+               break;
+       case (1 << PHM_AutoThrottleSource_External) |
+                       (1 << PHM_AutoThrottleSource_Thermal):
+               protection = true;
+               src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+               break;
+       }
+       /* Order matters - don't enable thermal protection for the wrong source. */
+       if (protection) {
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+                               DPM_EVENT_SRC, src);
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                               THERMAL_PROTECTION_DIS,
+                               !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_ThermalController));
+       } else
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                               THERMAL_PROTECTION_DIS, 1);
+}
+
+static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+               PHM_AutoThrottleSource source)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (!(data->active_auto_throttle_sources & (1 << source))) {
+               data->active_auto_throttle_sources |= 1 << source;
+               smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+       }
+       return 0;
+}
+
+static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+       return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+               PHM_AutoThrottleSource source)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->active_auto_throttle_sources & (1 << source)) {
+               data->active_auto_throttle_sources &= ~(1 << source);
+               smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+       }
+       return 0;
+}
+
+static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+       return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       data->pcie_performance_request = true;
+
+       return 0;
+}
+
+int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+       int tmp_result = 0;
+       int result = 0;
+
+       tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
+       PP_ASSERT_WITH_CODE(tmp_result == 0,
+                       "DPM is already running right now, no need to enable DPM!",
+                       return 0);
+
+       if (smu7_voltage_control(hwmgr)) {
+               tmp_result = smu7_enable_voltage_control(hwmgr);
+               PP_ASSERT_WITH_CODE(tmp_result == 0,
+                               "Failed to enable voltage control!",
+                               result = tmp_result);
+
+               tmp_result = smu7_construct_voltage_tables(hwmgr);
+               PP_ASSERT_WITH_CODE((0 == tmp_result),
+                               "Failed to contruct voltage tables!",
+                               result = tmp_result);
+       }
+       smum_initialize_mc_reg_table(hwmgr);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_EngineSpreadSpectrumSupport))
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalController))
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
+
+       tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to program static screen threshold parameters!",
+                       result = tmp_result);
+
+       tmp_result = smu7_enable_display_gap(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable display gap!", result = tmp_result);
+
+       tmp_result = smu7_program_voting_clients(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to program voting clients!", result = tmp_result);
+
+       tmp_result = smum_process_firmware_header(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to process firmware header!", result = tmp_result);
+
+       tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to initialize switch from ArbF0 to F1!",
+                       result = tmp_result);
+
+       result = smu7_setup_default_dpm_tables(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to setup default DPM tables!", return result);
+
+       tmp_result = smum_init_smc_table(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to initialize SMC table!", result = tmp_result);
+
+       tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
+
+       smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+
+       tmp_result = smu7_enable_sclk_control(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable SCLK control!", result = tmp_result);
+
+       tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable voltage control!", result = tmp_result);
+
+       tmp_result = smu7_enable_ulv(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable ULV!", result = tmp_result);
+
+       tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable deep sleep master switch!", result = tmp_result);
+
+       tmp_result = smu7_enable_didt_config(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to enable deep sleep master switch!", result = tmp_result);
+
+       tmp_result = smu7_start_dpm(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to start DPM!", result = tmp_result);
+
+       tmp_result = smu7_enable_smc_cac(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable SMC CAC!", result = tmp_result);
+
+       tmp_result = smu7_enable_power_containment(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable power containment!", result = tmp_result);
+
+       tmp_result = smu7_power_control_set_level(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to power control set level!", result = tmp_result);
+
+       tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable thermal auto throttle!", result = tmp_result);
+
+       tmp_result = smu7_pcie_performance_request(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "pcie performance request failed!", result = tmp_result);
+
+       return 0;
+}
+
+int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+       int tmp_result, result = 0;
+
+       tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
+       PP_ASSERT_WITH_CODE(tmp_result == 0,
+                       "DPM is not running right now, no need to disable DPM!",
+                       return 0);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalController))
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+       tmp_result = smu7_disable_power_containment(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable power containment!", result = tmp_result);
+
+       tmp_result = smu7_disable_smc_cac(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable SMC CAC!", result = tmp_result);
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+       tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable thermal auto throttle!", result = tmp_result);
+
+       tmp_result = smu7_stop_dpm(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to stop DPM!", result = tmp_result);
+
+       tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable deep sleep master switch!", result = tmp_result);
+
+       tmp_result = smu7_disable_ulv(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable ULV!", result = tmp_result);
+
+       tmp_result = smu7_clear_voting_clients(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to clear voting clients!", result = tmp_result);
+
+       tmp_result = smu7_reset_to_default(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to reset to default!", result = tmp_result);
+
+       tmp_result = smu7_force_switch_to_arbf0(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to force to switch arbf0!", result = tmp_result);
+
+       return result;
+}
+
+int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
+{
+
+       return 0;
+}
+
+static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       data->dll_default_on = false;
+       data->mclk_dpm0_activity_target = 0xa;
+       data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
+       data->vddc_vddgfx_delta = 300;
+       data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
+       data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
+       data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
+       data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+       data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
+       data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+       data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+       data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+       data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+       data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+
+       data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+       data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
+       data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
+       /* need to set voltage control types before EVV patching */
+       data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
+       data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
+       data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
+       data->enable_tdc_limit_feature = true;
+       data->enable_pkg_pwr_tracking_feature = true;
+       data->force_pcie_gen = PP_PCIEGenInvalid;
+       data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
+
+       data->fast_watermark_threshold = 100;
+       if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+                       VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+               data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ControlVDDGFX)) {
+               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+                       VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
+                       data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+               }
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_EnableMVDDControl)) {
+               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+                               VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+                       data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
+               else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+                               VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+                       data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+       }
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ControlVDDGFX);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ControlVDDCI)) {
+               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+                       data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
+               else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+                       data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+       }
+
+       if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_EnableMVDDControl);
+
+       if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ControlVDDCI);
+
+       if ((hwmgr->pp_table_version != PP_TABLE_V0)
+               && (table_info->cac_dtp_table->usClockStretchAmount != 0))
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_ClockStretcher);
+
+       data->pcie_gen_performance.max = PP_PCIEGen1;
+       data->pcie_gen_performance.min = PP_PCIEGen3;
+       data->pcie_gen_power_saving.max = PP_PCIEGen1;
+       data->pcie_gen_power_saving.min = PP_PCIEGen3;
+       data->pcie_lane_performance.max = 0;
+       data->pcie_lane_performance.min = 16;
+       data->pcie_lane_power_saving.max = 0;
+       data->pcie_lane_power_saving.min = 16;
+}
+
+/**
+* Get Leakage VDDC based on leakage ID.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint16_t vv_id;
+       uint16_t vddc = 0;
+       uint16_t vddgfx = 0;
+       uint16_t i, j;
+       uint32_t sclk = 0;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
+
+
+       if (table_info != NULL)
+               sclk_table = table_info->vdd_dep_on_sclk;
+
+       for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
+               vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+
+               if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+                       if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
+                                               table_info->vddgfx_lookup_table, vv_id, &sclk)) {
+                               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                                       PHM_PlatformCaps_ClockStretcher)) {
+                                       for (j = 1; j < sclk_table->count; j++) {
+                                               if (sclk_table->entries[j].clk == sclk &&
+                                                               sclk_table->entries[j].cks_enable == 0) {
+                                                       sclk += 5000;
+                                                       break;
+                                               }
+                                       }
+                               }
+                               if (0 == atomctrl_get_voltage_evv_on_sclk
+                                   (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
+                                    vv_id, &vddgfx)) {
+                                       /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
+                                       PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
+
+                                       /* the voltage should not be zero nor equal to leakage ID */
+                                       if (vddgfx != 0 && vddgfx != vv_id) {
+                                               data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
+                                               data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
+                                               data->vddcgfx_leakage.count++;
+                                       }
+                               } else {
+                                       printk("Error retrieving EVV voltage value!\n");
+                               }
+                       }
+               } else {
+
+                       if ((hwmgr->pp_table_version == PP_TABLE_V0)
+                               || !phm_get_sclk_for_voltage_evv(hwmgr,
+                                       table_info->vddc_lookup_table, vv_id, &sclk)) {
+                               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_ClockStretcher)) {
+                                       for (j = 1; j < sclk_table->count; j++) {
+                                               if (sclk_table->entries[j].clk == sclk &&
+                                                               sclk_table->entries[j].cks_enable == 0) {
+                                                       sclk += 5000;
+                                                       break;
+                                               }
+                                       }
+                               }
+
+                               if (phm_get_voltage_evv_on_sclk(hwmgr,
+                                                       VOLTAGE_TYPE_VDDC,
+                                                       sclk, vv_id, &vddc) == 0) {
+                                       if (vddc >= 2000 || vddc == 0)
+                                               return -EINVAL;
+                               } else {
+                                       printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+                                       continue;
+                               }
+
+                               /* the voltage should not be zero nor equal to leakage ID */
+                               if (vddc != 0 && vddc != vv_id) {
+                                       data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
+                                       data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
+                                       data->vddc_leakage.count++;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param     hwmgr  the address of the powerplay hardware manager.
+ * @param     pointer to changing voltage
+ * @param     pointer to leakage table
+ */
+static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+               uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
+{
+       uint32_t index;
+
+       /* search for leakage voltage ID 0xff01 ~ 0xff08 */
+       for (index = 0; index < leakage_table->count; index++) {
+               /* if this voltage matches a leakage voltage ID */
+               /* patch with actual leakage voltage */
+               if (leakage_table->leakage_id[index] == *voltage) {
+                       *voltage = leakage_table->actual_voltage[index];
+                       break;
+               }
+       }
+
+       if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+               printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+/**
+* Patch voltage lookup table by EVV leakages.
+*
+* @param     hwmgr  the address of the powerplay hardware manager.
+* @param     pointer to voltage lookup table
+* @param     pointer to leakage table
+* @return     always 0
+*/
+static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_voltage_lookup_table *lookup_table,
+               struct smu7_leakage_voltage *leakage_table)
+{
+       uint32_t i;
+
+       for (i = 0; i < lookup_table->count; i++)
+               smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
+                               &lookup_table->entries[i].us_vdd, leakage_table);
+
+       return 0;
+}
+
+static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
+               struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
+               uint16_t *vddc)
+{
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
+       hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
+                       table_info->max_clock_voltage_on_dc.vddc;
+       return 0;
+}
+
+static int smu7_patch_voltage_dependency_tables_with_lookup_table(
+               struct pp_hwmgr *hwmgr)
+{
+       uint8_t entry_id;
+       uint8_t voltage_id;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+       struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+                       table_info->vdd_dep_on_mclk;
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+               for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+                       voltage_id = sclk_table->entries[entry_id].vddInd;
+                       sclk_table->entries[entry_id].vddgfx =
+                               table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
+               }
+       } else {
+               for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+                       voltage_id = sclk_table->entries[entry_id].vddInd;
+                       sclk_table->entries[entry_id].vddc =
+                               table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+               }
+       }
+
+       for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
+               voltage_id = mclk_table->entries[entry_id].vddInd;
+               mclk_table->entries[entry_id].vddc =
+                       table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+       }
+
+       for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
+               voltage_id = mm_table->entries[entry_id].vddcInd;
+               mm_table->entries[entry_id].vddc =
+                       table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+       }
+
+       return 0;
+
+}
+
+static int phm_add_voltage(struct pp_hwmgr *hwmgr,
+                       phm_ppt_v1_voltage_lookup_table *look_up_table,
+                       phm_ppt_v1_voltage_lookup_record *record)
+{
+       uint32_t i;
+
+       PP_ASSERT_WITH_CODE((NULL != look_up_table),
+               "Lookup Table empty.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((0 != look_up_table->count),
+               "Lookup Table empty.", return -EINVAL);
+
+       i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+       PP_ASSERT_WITH_CODE((i >= look_up_table->count),
+               "Lookup Table is full.", return -EINVAL);
+
+       /* This is to avoid entering duplicate calculated records. */
+       for (i = 0; i < look_up_table->count; i++) {
+               if (look_up_table->entries[i].us_vdd == record->us_vdd) {
+                       if (look_up_table->entries[i].us_calculated == 1)
+                               return 0;
+                       break;
+               }
+       }
+
+       look_up_table->entries[i].us_calculated = 1;
+       look_up_table->entries[i].us_vdd = record->us_vdd;
+       look_up_table->entries[i].us_cac_low = record->us_cac_low;
+       look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
+       look_up_table->entries[i].us_cac_high = record->us_cac_high;
+       /* Only increment the count when we're appending, not replacing duplicate entry. */
+       if (i == look_up_table->count)
+               look_up_table->count++;
+
+       return 0;
+}
+
+
+static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+       uint8_t entry_id;
+       struct phm_ppt_v1_voltage_lookup_record v_record;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
+       phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
+
+       if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+               for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+                       if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
+                               v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
+                                       sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
+                       else
+                               v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
+                                       sclk_table->entries[entry_id].vdd_offset;
+
+                       sclk_table->entries[entry_id].vddc =
+                               v_record.us_cac_low = v_record.us_cac_mid =
+                               v_record.us_cac_high = v_record.us_vdd;
+
+                       phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
+               }
+
+               for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
+                       if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
+                               v_record.us_vdd = mclk_table->entries[entry_id].vddc +
+                                       mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
+                       else
+                               v_record.us_vdd = mclk_table->entries[entry_id].vddc +
+                                       mclk_table->entries[entry_id].vdd_offset;
+
+                       mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
+                               v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
+                       phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
+               }
+       }
+       return 0;
+}
+
+static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
+{
+       uint8_t entry_id;
+       struct phm_ppt_v1_voltage_lookup_record v_record;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
+
+       if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+               for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
+                       if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
+                               v_record.us_vdd = mm_table->entries[entry_id].vddc +
+                                       mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
+                       else
+                               v_record.us_vdd = mm_table->entries[entry_id].vddc +
+                                       mm_table->entries[entry_id].vddgfx_offset;
+
+                       /* Add the calculated VDDGFX to the VDDGFX lookup table */
+                       mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
+                               v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
+                       phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
+               }
+       }
+       return 0;
+}
+
+static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
+               struct phm_ppt_v1_voltage_lookup_table *lookup_table)
+{
+       uint32_t table_size, i, j;
+       struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
+       table_size = lookup_table->count;
+
+       PP_ASSERT_WITH_CODE(0 != lookup_table->count,
+               "Lookup table is empty", return -EINVAL);
+
+       /* Sorting voltages */
+       for (i = 0; i < table_size - 1; i++) {
+               for (j = i + 1; j > 0; j--) {
+                       if (lookup_table->entries[j].us_vdd <
+                                       lookup_table->entries[j - 1].us_vdd) {
+                               tmp_voltage_lookup_record = lookup_table->entries[j - 1];
+                               lookup_table->entries[j - 1] = lookup_table->entries[j];
+                               lookup_table->entries[j] = tmp_voltage_lookup_record;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+       int result = 0;
+       int tmp_result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+               tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
+                       table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
+               if (tmp_result != 0)
+                       result = tmp_result;
+
+               smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
+                       &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
+       } else {
+
+               tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
+                               table_info->vddc_lookup_table, &(data->vddc_leakage));
+               if (tmp_result)
+                       result = tmp_result;
+
+               tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
+                               &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
+               if (tmp_result)
+                       result = tmp_result;
+       }
+
+       tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
+       if (tmp_result)
+               result = tmp_result;
+
+       tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
+       if (tmp_result)
+               result = tmp_result;
+
+       tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
+       if (tmp_result)
+               result = tmp_result;
+
+       tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
+       if (tmp_result)
+               result = tmp_result;
+
+       tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
+       if (tmp_result)
+               result = tmp_result;
+
+       return result;
+}
+
+static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
+{
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
+                                               table_info->vdd_dep_on_sclk;
+       struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
+                                               table_info->vdd_dep_on_mclk;
+
+       PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
+               "VDD dependency on SCLK table is missing.",
+               return -EINVAL);
+       PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
+               "VDD dependency on SCLK table has to have is missing.",
+               return -EINVAL);
+
+       PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
+               "VDD dependency on MCLK table is missing",
+               return -EINVAL);
+       PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
+               "VDD dependency on MCLK table has to have is missing.",
+               return -EINVAL);
+
+       table_info->max_clock_voltage_on_ac.sclk =
+               allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
+       table_info->max_clock_voltage_on_ac.mclk =
+               allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
+       table_info->max_clock_voltage_on_ac.vddc =
+               allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
+       table_info->max_clock_voltage_on_ac.vddci =
+               allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
+
+       hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
+       hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
+       hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
+       hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
+
+       return 0;
+}
+
+int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+       struct phm_ppt_v1_information *table_info =
+                      (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+       struct phm_ppt_v1_voltage_lookup_table *lookup_table;
+       uint32_t i;
+       uint32_t hw_revision, sub_vendor_id, sub_sys_id;
+       struct cgs_system_info sys_info = {0};
+
+       if (table_info != NULL) {
+               dep_mclk_table = table_info->vdd_dep_on_mclk;
+               lookup_table = table_info->vddc_lookup_table;
+       } else
+               return 0;
+
+       sys_info.size = sizeof(struct cgs_system_info);
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       hw_revision = (uint32_t)sys_info.value;
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       sub_sys_id = (uint32_t)sys_info.value;
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       sub_vendor_id = (uint32_t)sys_info.value;
+
+       if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
+                       ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
+                   (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
+                   (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
+               if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+                       return 0;
+
+               for (i = 0; i < lookup_table->count; i++) {
+                       if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+                               dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+                               return 0;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
+{
+       struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+       uint32_t temp_reg;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+               temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
+               switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
+               case 0:
+                       temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
+                       break;
+               case 1:
+                       temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
+                       break;
+               case 2:
+                       temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
+                       break;
+               case 3:
+                       temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
+                       break;
+               case 4:
+                       temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
+                       break;
+               default:
+                       PP_ASSERT_WITH_CODE(0,
+                       "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
+                       );
+                       break;
+               }
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
+       }
+
+       if (table_info == NULL)
+               return 0;
+
+       if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
+               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
+                       (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
+                       (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
+                       (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
+
+               table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
+                                                               (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
+
+               table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+               table_info->cac_dtp_table->usOperatingTempStep = 1;
+               table_info->cac_dtp_table->usOperatingTempHyst = 1;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
+                              hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+                              hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
+
+               hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
+                              table_info->cac_dtp_table->usOperatingTempMinLimit;
+
+               hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
+                              table_info->cac_dtp_table->usOperatingTempMaxLimit;
+
+               hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
+                              table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+
+               hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
+                              table_info->cac_dtp_table->usOperatingTempStep;
+
+               hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
+                              table_info->cac_dtp_table->usTargetOperatingTemp;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_ODFuzzyFanControlSupport);
+       }
+
+       return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param     hwmgr  the address of the powerplay hardware manager.
+ * @param     pointer to changing voltage
+ * @param     pointer to leakage table
+ */
+static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+               uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
+{
+       uint32_t index;
+
+       /* search for leakage voltage ID 0xff01 ~ 0xff08 */
+       for (index = 0; index < leakage_table->count; index++) {
+               /* if this voltage matches a leakage voltage ID */
+               /* patch with actual leakage voltage */
+               if (leakage_table->leakage_id[index] == *voltage) {
+                       *voltage = leakage_table->actual_voltage[index];
+                       break;
+               }
+       }
+
+       if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+               printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+
+static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
+                             struct phm_clock_voltage_dependency_table *tab)
+{
+       uint16_t i;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab)
+               for (i = 0; i < tab->count; i++)
+                       smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+                                               &data->vddc_leakage);
+
+       return 0;
+}
+
+static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
+                              struct phm_clock_voltage_dependency_table *tab)
+{
+       uint16_t i;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab)
+               for (i = 0; i < tab->count; i++)
+                       smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+                                                       &data->vddci_leakage);
+
+       return 0;
+}
+
+static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
+                                 struct phm_vce_clock_voltage_dependency_table *tab)
+{
+       uint16_t i;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab)
+               for (i = 0; i < tab->count; i++)
+                       smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+                                                       &data->vddc_leakage);
+
+       return 0;
+}
+
+
+static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
+                                 struct phm_uvd_clock_voltage_dependency_table *tab)
+{
+       uint16_t i;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab)
+               for (i = 0; i < tab->count; i++)
+                       smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+                                                       &data->vddc_leakage);
+
+       return 0;
+}
+
+static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
+                                        struct phm_phase_shedding_limits_table *tab)
+{
+       uint16_t i;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab)
+               for (i = 0; i < tab->count; i++)
+                       smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
+                                                       &data->vddc_leakage);
+
+       return 0;
+}
+
+static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
+                                  struct phm_samu_clock_voltage_dependency_table *tab)
+{
+       uint16_t i;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab)
+               for (i = 0; i < tab->count; i++)
+                       smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+                                                       &data->vddc_leakage);
+
+       return 0;
+}
+
+static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
+                                 struct phm_acp_clock_voltage_dependency_table *tab)
+{
+       uint16_t i;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab)
+               for (i = 0; i < tab->count; i++)
+                       smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+                                       &data->vddc_leakage);
+
+       return 0;
+}
+
+static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
+                                    struct phm_clock_and_voltage_limits *tab)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab) {
+               smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
+                                                       &data->vddc_leakage);
+               smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
+                                                       &data->vddci_leakage);
+       }
+
+       return 0;
+}
+
+static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
+{
+       uint32_t i;
+       uint32_t vddc;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (tab) {
+               for (i = 0; i < tab->count; i++) {
+                       vddc = (uint32_t)(tab->entries[i].Vddc);
+                       smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
+                       tab->entries[i].Vddc = (uint16_t)vddc;
+               }
+       }
+
+       return 0;
+}
+
+static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
+{
+       int tmp;
+
+       tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
+       if (tmp)
+               return -EINVAL;
+
+       tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
+       if (tmp)
+               return -EINVAL;
+
+       return 0;
+}
+
+
+static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+       struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+       struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+       PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
+               "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
+       PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
+               "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+       PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
+               "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
+       PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
+               "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+       data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
+       data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+       hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
+               allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+       hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
+               allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
+       hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
+               allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+       if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
+               data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
+               data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+       }
+
+       if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
+               hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
+
+       return 0;
+}
+
+int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data;
+       int result;
+
+       data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
+
+       smu7_patch_voltage_workaround(hwmgr);
+       smu7_init_dpm_defaults(hwmgr);
+
+       /* Get leakage voltage based on leakage ID. */
+       result = smu7_get_evv_voltages(hwmgr);
+
+       if (result) {
+               printk("Get EVV Voltage Failed.  Abort Driver loading!\n");
+               return -EINVAL;
+       }
+
+       if (hwmgr->pp_table_version == PP_TABLE_V1) {
+               smu7_complete_dependency_tables(hwmgr);
+               smu7_set_private_data_based_on_pptable_v1(hwmgr);
+       } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+               smu7_patch_dependency_tables_with_leakage(hwmgr);
+               smu7_set_private_data_based_on_pptable_v0(hwmgr);
+       }
+
+       /* Initalize Dynamic State Adjustment Rule Settings */
+       result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+
+       if (0 == result) {
+               struct cgs_system_info sys_info = {0};
+
+               data->is_tlu_enabled = false;
+
+               hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+                                                       SMU7_MAX_HARDWARE_POWERLEVELS;
+               hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+               hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+               sys_info.size = sizeof(struct cgs_system_info);
+               sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
+               result = cgs_query_system_info(hwmgr->device, &sys_info);
+               if (result)
+                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+               else
+                       data->pcie_gen_cap = (uint32_t)sys_info.value;
+               if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+                       data->pcie_spc_cap = 20;
+               sys_info.size = sizeof(struct cgs_system_info);
+               sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
+               result = cgs_query_system_info(hwmgr->device, &sys_info);
+               if (result)
+                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
+               else
+                       data->pcie_lane_cap = (uint32_t)sys_info.value;
+
+               hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+               hwmgr->platform_descriptor.clockStep.engineClock = 500;
+               hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+               smu7_thermal_parameter_init(hwmgr);
+       } else {
+               /* Ignore return value in here, we are cleaning up a mess. */
+               phm_hwmgr_backend_fini(hwmgr);
+       }
+
+       return 0;
+}
+
+static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t level, tmp;
+
+       if (!data->pcie_dpm_key_disabled) {
+               if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+                       level = 0;
+                       tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+                       while (tmp >>= 1)
+                               level++;
+
+                       if (level)
+                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                               PPSMC_MSG_PCIeDPM_ForceLevel, level);
+               }
+       }
+
+       if (!data->sclk_dpm_key_disabled) {
+               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+                       level = 0;
+                       tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
+                       while (tmp >>= 1)
+                               level++;
+
+                       if (level)
+                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                               PPSMC_MSG_SCLKDPM_SetEnabledMask,
+                                               (1 << level));
+               }
+       }
+
+       if (!data->mclk_dpm_key_disabled) {
+               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+                       level = 0;
+                       tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
+                       while (tmp >>= 1)
+                               level++;
+
+                       if (level)
+                               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                               PPSMC_MSG_MCLKDPM_SetEnabledMask,
+                                               (1 << level));
+               }
+       }
+
+       return 0;
+}
+
+static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (hwmgr->pp_table_version == PP_TABLE_V1)
+               phm_apply_dal_min_voltage_request(hwmgr);
+/* TO DO  for v0 iceland and Ci*/
+
+       if (!data->sclk_dpm_key_disabled) {
+               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
+                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+       }
+
+       if (!data->mclk_dpm_key_disabled) {
+               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
+                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+       }
+
+       return 0;
+}
+
+static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (!smum_is_dpm_running(hwmgr))
+               return -EINVAL;
+
+       if (!data->pcie_dpm_key_disabled) {
+               smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_PCIeDPM_UnForceLevel);
+       }
+
+       return smu7_upload_dpm_level_enable_mask(hwmgr);
+}
+
+static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data =
+                       (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t level;
+
+       if (!data->sclk_dpm_key_disabled)
+               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+                       level = phm_get_lowest_enabled_level(hwmgr,
+                                                             data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                                           PPSMC_MSG_SCLKDPM_SetEnabledMask,
+                                                           (1 << level));
+
+       }
+
+       if (!data->mclk_dpm_key_disabled) {
+               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+                       level = phm_get_lowest_enabled_level(hwmgr,
+                                                             data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
+                                                           (1 << level));
+               }
+       }
+
+       if (!data->pcie_dpm_key_disabled) {
+               if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+                       level = phm_get_lowest_enabled_level(hwmgr,
+                                                             data->dpm_level_enable_mask.pcie_dpm_enable_mask);
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                                           PPSMC_MSG_PCIeDPM_ForceLevel,
+                                                           (level));
+               }
+       }
+
+       return 0;
+
+}
+static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
+                               enum amd_dpm_forced_level level)
+{
+       int ret = 0;
+
+       switch (level) {
+       case AMD_DPM_FORCED_LEVEL_HIGH:
+               ret = smu7_force_dpm_highest(hwmgr);
+               if (ret)
+                       return ret;
+               break;
+       case AMD_DPM_FORCED_LEVEL_LOW:
+               ret = smu7_force_dpm_lowest(hwmgr);
+               if (ret)
+                       return ret;
+               break;
+       case AMD_DPM_FORCED_LEVEL_AUTO:
+               ret = smu7_unforce_dpm_levels(hwmgr);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               break;
+       }
+
+       hwmgr->dpm_level = level;
+
+       return ret;
+}
+
+static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+       return sizeof(struct smu7_power_state);
+}
+
+
+static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+                               struct pp_power_state *request_ps,
+                       const struct pp_power_state *current_ps)
+{
+
+       struct smu7_power_state *smu7_ps =
+                               cast_phw_smu7_power_state(&request_ps->hardware);
+       uint32_t sclk;
+       uint32_t mclk;
+       struct PP_Clocks minimum_clocks = {0};
+       bool disable_mclk_switching;
+       bool disable_mclk_switching_for_frame_lock;
+       struct cgs_display_info info = {0};
+       const struct phm_clock_and_voltage_limits *max_limits;
+       uint32_t i;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       int32_t count;
+       int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+
+       data->battery_state = (PP_StateUILabel_Battery ==
+                       request_ps->classification.ui_label);
+
+       PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
+                                "VI should always have 2 performance levels",
+                               );
+
+       max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+                       &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+                       &(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+       /* Cap clock DPM tables at DC MAX if it is in DC. */
+       if (PP_PowerSource_DC == hwmgr->power_source) {
+               for (i = 0; i < smu7_ps->performance_level_count; i++) {
+                       if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
+                               smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
+                       if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
+                               smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
+               }
+       }
+
+       smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
+       smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+
+       /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
+
+       minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
+       minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState)) {
+               max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
+               stable_pstate_sclk = (max_limits->sclk * 75) / 100;
+
+               for (count = table_info->vdd_dep_on_sclk->count - 1;
+                               count >= 0; count--) {
+                       if (stable_pstate_sclk >=
+                                       table_info->vdd_dep_on_sclk->entries[count].clk) {
+                               stable_pstate_sclk =
+                                               table_info->vdd_dep_on_sclk->entries[count].clk;
+                               break;
+                       }
+               }
+
+               if (count < 0)
+                       stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+
+               stable_pstate_mclk = max_limits->mclk;
+
+               minimum_clocks.engineClock = stable_pstate_sclk;
+               minimum_clocks.memoryClock = stable_pstate_mclk;
+       }
+
+       if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
+               minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
+
+       if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
+               minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
+
+       smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
+
+       if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
+               PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
+                               hwmgr->platform_descriptor.overdriveLimit.engineClock),
+                               "Overdrive sclk exceeds limit",
+                               hwmgr->gfx_arbiter.sclk_over_drive =
+                                               hwmgr->platform_descriptor.overdriveLimit.engineClock);
+
+               if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
+                       smu7_ps->performance_levels[1].engine_clock =
+                                       hwmgr->gfx_arbiter.sclk_over_drive;
+       }
+
+       if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
+               PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
+                               hwmgr->platform_descriptor.overdriveLimit.memoryClock),
+                               "Overdrive mclk exceeds limit",
+                               hwmgr->gfx_arbiter.mclk_over_drive =
+                                               hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+
+               if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
+                       smu7_ps->performance_levels[1].memory_clock =
+                                       hwmgr->gfx_arbiter.mclk_over_drive;
+       }
+
+       disable_mclk_switching_for_frame_lock = phm_cap_enabled(
+                                   hwmgr->platform_descriptor.platformCaps,
+                                   PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+
+       disable_mclk_switching = (1 < info.display_count) ||
+                                   disable_mclk_switching_for_frame_lock;
+
+       sclk = smu7_ps->performance_levels[0].engine_clock;
+       mclk = smu7_ps->performance_levels[0].memory_clock;
+
+       if (disable_mclk_switching)
+               mclk = smu7_ps->performance_levels
+               [smu7_ps->performance_level_count - 1].memory_clock;
+
+       if (sclk < minimum_clocks.engineClock)
+               sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
+                               max_limits->sclk : minimum_clocks.engineClock;
+
+       if (mclk < minimum_clocks.memoryClock)
+               mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
+                               max_limits->mclk : minimum_clocks.memoryClock;
+
+       smu7_ps->performance_levels[0].engine_clock = sclk;
+       smu7_ps->performance_levels[0].memory_clock = mclk;
+
+       smu7_ps->performance_levels[1].engine_clock =
+               (smu7_ps->performance_levels[1].engine_clock >=
+                               smu7_ps->performance_levels[0].engine_clock) ?
+                                               smu7_ps->performance_levels[1].engine_clock :
+                                               smu7_ps->performance_levels[0].engine_clock;
+
+       if (disable_mclk_switching) {
+               if (mclk < smu7_ps->performance_levels[1].memory_clock)
+                       mclk = smu7_ps->performance_levels[1].memory_clock;
+
+               smu7_ps->performance_levels[0].memory_clock = mclk;
+               smu7_ps->performance_levels[1].memory_clock = mclk;
+       } else {
+               if (smu7_ps->performance_levels[1].memory_clock <
+                               smu7_ps->performance_levels[0].memory_clock)
+                       smu7_ps->performance_levels[1].memory_clock =
+                                       smu7_ps->performance_levels[0].memory_clock;
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState)) {
+               for (i = 0; i < smu7_ps->performance_level_count; i++) {
+                       smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
+                       smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
+                       smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
+                       smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
+               }
+       }
+       return 0;
+}
+
+
+static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+       struct pp_power_state  *ps;
+       struct smu7_power_state  *smu7_ps;
+
+       if (hwmgr == NULL)
+               return -EINVAL;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+       if (low)
+               return smu7_ps->performance_levels[0].memory_clock;
+       else
+               return smu7_ps->performance_levels
+                               [smu7_ps->performance_level_count-1].memory_clock;
+}
+
+static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+       struct pp_power_state  *ps;
+       struct smu7_power_state  *smu7_ps;
+
+       if (hwmgr == NULL)
+               return -EINVAL;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+       if (low)
+               return smu7_ps->performance_levels[0].engine_clock;
+       else
+               return smu7_ps->performance_levels
+                               [smu7_ps->performance_level_count-1].engine_clock;
+}
+
+static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+                                       struct pp_hw_power_state *hw_ps)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
+       ATOM_FIRMWARE_INFO_V2_2 *fw_info;
+       uint16_t size;
+       uint8_t frev, crev;
+       int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+
+       /* First retrieve the Boot clocks and VDDC from the firmware info table.
+        * We assume here that fw_info is unchanged if this call fails.
+        */
+       fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
+                       hwmgr->device, index,
+                       &size, &frev, &crev);
+       if (!fw_info)
+               /* During a test, there is no firmware info table. */
+               return 0;
+
+       /* Patch the state. */
+       data->vbios_boot_state.sclk_bootup_value =
+                       le32_to_cpu(fw_info->ulDefaultEngineClock);
+       data->vbios_boot_state.mclk_bootup_value =
+                       le32_to_cpu(fw_info->ulDefaultMemoryClock);
+       data->vbios_boot_state.mvdd_bootup_value =
+                       le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
+       data->vbios_boot_state.vddc_bootup_value =
+                       le16_to_cpu(fw_info->usBootUpVDDCVoltage);
+       data->vbios_boot_state.vddci_bootup_value =
+                       le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
+       data->vbios_boot_state.pcie_gen_bootup_value =
+                       smu7_get_current_pcie_speed(hwmgr);
+
+       data->vbios_boot_state.pcie_lane_bootup_value =
+                       (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
+
+       /* set boot power state */
+       ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
+       ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
+       ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
+       ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
+
+       return 0;
+}
+
+static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       unsigned long ret = 0;
+
+       if (hwmgr->pp_table_version == PP_TABLE_V0) {
+               result = pp_tables_get_num_of_entries(hwmgr, &ret);
+               return result ? 0 : ret;
+       } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+               result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
+               return result;
+       }
+       return 0;
+}
+
+static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
+               void *state, struct pp_power_state *power_state,
+               void *pp_table, uint32_t classification_flag)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_power_state  *smu7_power_state =
+                       (struct smu7_power_state *)(&(power_state->hardware));
+       struct smu7_performance_level *performance_level;
+       ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
+       ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
+                       (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
+       PPTable_Generic_SubTable_Header *sclk_dep_table =
+                       (PPTable_Generic_SubTable_Header *)
+                       (((unsigned long)powerplay_table) +
+                               le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
+       ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
+                       (ATOM_Tonga_MCLK_Dependency_Table *)
+                       (((unsigned long)powerplay_table) +
+                               le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
+
+       /* The following fields are not initialized here: id orderedList allStatesList */
+       power_state->classification.ui_label =
+                       (le16_to_cpu(state_entry->usClassification) &
+                       ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
+                       ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
+       power_state->classification.flags = classification_flag;
+       /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
+
+       power_state->classification.temporary_state = false;
+       power_state->classification.to_be_deleted = false;
+
+       power_state->validation.disallowOnDC =
+                       (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+                                       ATOM_Tonga_DISALLOW_ON_DC));
+
+       power_state->pcie.lanes = 0;
+
+       power_state->display.disableFrameModulation = false;
+       power_state->display.limitRefreshrate = false;
+       power_state->display.enableVariBright =
+                       (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+                                       ATOM_Tonga_ENABLE_VARIBRIGHT));
+
+       power_state->validation.supportedPowerLevels = 0;
+       power_state->uvd_clocks.VCLK = 0;
+       power_state->uvd_clocks.DCLK = 0;
+       power_state->temperatures.min = 0;
+       power_state->temperatures.max = 0;
+
+       performance_level = &(smu7_power_state->performance_levels
+                       [smu7_power_state->performance_level_count++]);
+
+       PP_ASSERT_WITH_CODE(
+                       (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+                       "Performance levels exceeds SMC limit!",
+                       return -EINVAL);
+
+       PP_ASSERT_WITH_CODE(
+                       (smu7_power_state->performance_level_count <=
+                                       hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+                       "Performance levels exceeds Driver limit!",
+                       return -EINVAL);
+
+       /* Performance levels are arranged from low to high. */
+       performance_level->memory_clock = mclk_dep_table->entries
+                       [state_entry->ucMemoryClockIndexLow].ulMclk;
+       if (sclk_dep_table->ucRevId == 0)
+               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexLow].ulSclk;
+       else if (sclk_dep_table->ucRevId == 1)
+               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexLow].ulSclk;
+       performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+                       state_entry->ucPCIEGenLow);
+       performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+                       state_entry->ucPCIELaneHigh);
+
+       performance_level = &(smu7_power_state->performance_levels
+                       [smu7_power_state->performance_level_count++]);
+       performance_level->memory_clock = mclk_dep_table->entries
+                       [state_entry->ucMemoryClockIndexHigh].ulMclk;
+
+       if (sclk_dep_table->ucRevId == 0)
+               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexHigh].ulSclk;
+       else if (sclk_dep_table->ucRevId == 1)
+               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexHigh].ulSclk;
+
+       performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+                       state_entry->ucPCIEGenHigh);
+       performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+                       state_entry->ucPCIELaneHigh);
+
+       return 0;
+}
+
+static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
+               unsigned long entry_index, struct pp_power_state *state)
+{
+       int result;
+       struct smu7_power_state *ps;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+                       table_info->vdd_dep_on_mclk;
+
+       state->hardware.magic = PHM_VIslands_Magic;
+
+       ps = (struct smu7_power_state *)(&state->hardware);
+
+       result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
+                       smu7_get_pp_table_entry_callback_func_v1);
+
+       /* This is the earliest time we have all the dependency table and the VBIOS boot state
+        * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
+        * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
+        */
+       if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+               if (dep_mclk_table->entries[0].clk !=
+                               data->vbios_boot_state.mclk_bootup_value)
+                       printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+                                       "does not match VBIOS boot MCLK level");
+               if (dep_mclk_table->entries[0].vddci !=
+                               data->vbios_boot_state.vddci_bootup_value)
+                       printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+                                       "does not match VBIOS boot VDDCI level");
+       }
+
+       /* set DC compatible flag if this state supports DC */
+       if (!state->validation.disallowOnDC)
+               ps->dc_compatible = true;
+
+       if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+               data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+       ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+       ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+       if (!result) {
+               uint32_t i;
+
+               switch (state->classification.ui_label) {
+               case PP_StateUILabel_Performance:
+                       data->use_pcie_performance_levels = true;
+                       for (i = 0; i < ps->performance_level_count; i++) {
+                               if (data->pcie_gen_performance.max <
+                                               ps->performance_levels[i].pcie_gen)
+                                       data->pcie_gen_performance.max =
+                                                       ps->performance_levels[i].pcie_gen;
+
+                               if (data->pcie_gen_performance.min >
+                                               ps->performance_levels[i].pcie_gen)
+                                       data->pcie_gen_performance.min =
+                                                       ps->performance_levels[i].pcie_gen;
+
+                               if (data->pcie_lane_performance.max <
+                                               ps->performance_levels[i].pcie_lane)
+                                       data->pcie_lane_performance.max =
+                                                       ps->performance_levels[i].pcie_lane;
+                               if (data->pcie_lane_performance.min >
+                                               ps->performance_levels[i].pcie_lane)
+                                       data->pcie_lane_performance.min =
+                                                       ps->performance_levels[i].pcie_lane;
+                       }
+                       break;
+               case PP_StateUILabel_Battery:
+                       data->use_pcie_power_saving_levels = true;
+
+                       for (i = 0; i < ps->performance_level_count; i++) {
+                               if (data->pcie_gen_power_saving.max <
+                                               ps->performance_levels[i].pcie_gen)
+                                       data->pcie_gen_power_saving.max =
+                                                       ps->performance_levels[i].pcie_gen;
+
+                               if (data->pcie_gen_power_saving.min >
+                                               ps->performance_levels[i].pcie_gen)
+                                       data->pcie_gen_power_saving.min =
+                                                       ps->performance_levels[i].pcie_gen;
+
+                               if (data->pcie_lane_power_saving.max <
+                                               ps->performance_levels[i].pcie_lane)
+                                       data->pcie_lane_power_saving.max =
+                                                       ps->performance_levels[i].pcie_lane;
+
+                               if (data->pcie_lane_power_saving.min >
+                                               ps->performance_levels[i].pcie_lane)
+                                       data->pcie_lane_power_saving.min =
+                                                       ps->performance_levels[i].pcie_lane;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
+                                       struct pp_hw_power_state *power_state,
+                                       unsigned int index, const void *clock_info)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
+       const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
+       struct smu7_performance_level *performance_level;
+       uint32_t engine_clock, memory_clock;
+       uint16_t pcie_gen_from_bios;
+
+       engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
+       memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
+
+       if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
+               data->highest_mclk = memory_clock;
+
+       performance_level = &(ps->performance_levels
+                       [ps->performance_level_count++]);
+
+       PP_ASSERT_WITH_CODE(
+                       (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+                       "Performance levels exceeds SMC limit!",
+                       return -EINVAL);
+
+       PP_ASSERT_WITH_CODE(
+                       (ps->performance_level_count <=
+                                       hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+                       "Performance levels exceeds Driver limit!",
+                       return -EINVAL);
+
+       /* Performance levels are arranged from low to high. */
+       performance_level->memory_clock = memory_clock;
+       performance_level->engine_clock = engine_clock;
+
+       pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
+
+       performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
+       performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
+
+       return 0;
+}
+
+static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
+               unsigned long entry_index, struct pp_power_state *state)
+{
+       int result;
+       struct smu7_power_state *ps;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_clock_voltage_dependency_table *dep_mclk_table =
+                       hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+       memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
+
+       state->hardware.magic = PHM_VIslands_Magic;
+
+       ps = (struct smu7_power_state *)(&state->hardware);
+
+       result = pp_tables_get_entry(hwmgr, entry_index, state,
+                       smu7_get_pp_table_entry_callback_func_v0);
+
+       /*
+        * This is the earliest time we have all the dependency table
+        * and the VBIOS boot state as
+        * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
+        * state if there is only one VDDCI/MCLK level, check if it's
+        * the same as VBIOS boot state
+        */
+       if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+               if (dep_mclk_table->entries[0].clk !=
+                               data->vbios_boot_state.mclk_bootup_value)
+                       printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+                                       "does not match VBIOS boot MCLK level");
+               if (dep_mclk_table->entries[0].v !=
+                               data->vbios_boot_state.vddci_bootup_value)
+                       printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+                                       "does not match VBIOS boot VDDCI level");
+       }
+
+       /* set DC compatible flag if this state supports DC */
+       if (!state->validation.disallowOnDC)
+               ps->dc_compatible = true;
+
+       if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+               data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+       ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+       ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+       if (!result) {
+               uint32_t i;
+
+               switch (state->classification.ui_label) {
+               case PP_StateUILabel_Performance:
+                       data->use_pcie_performance_levels = true;
+
+                       for (i = 0; i < ps->performance_level_count; i++) {
+                               if (data->pcie_gen_performance.max <
+                                               ps->performance_levels[i].pcie_gen)
+                                       data->pcie_gen_performance.max =
+                                                       ps->performance_levels[i].pcie_gen;
+
+                               if (data->pcie_gen_performance.min >
+                                               ps->performance_levels[i].pcie_gen)
+                                       data->pcie_gen_performance.min =
+                                                       ps->performance_levels[i].pcie_gen;
+
+                               if (data->pcie_lane_performance.max <
+                                               ps->performance_levels[i].pcie_lane)
+                                       data->pcie_lane_performance.max =
+                                                       ps->performance_levels[i].pcie_lane;
+
+                               if (data->pcie_lane_performance.min >
+                                               ps->performance_levels[i].pcie_lane)
+                                       data->pcie_lane_performance.min =
+                                                       ps->performance_levels[i].pcie_lane;
+                       }
+                       break;
+               case PP_StateUILabel_Battery:
+                       data->use_pcie_power_saving_levels = true;
+
+                       for (i = 0; i < ps->performance_level_count; i++) {
+                               if (data->pcie_gen_power_saving.max <
+                                               ps->performance_levels[i].pcie_gen)
+                                       data->pcie_gen_power_saving.max =
+                                                       ps->performance_levels[i].pcie_gen;
+
+                               if (data->pcie_gen_power_saving.min >
+                                               ps->performance_levels[i].pcie_gen)
+                                       data->pcie_gen_power_saving.min =
+                                                       ps->performance_levels[i].pcie_gen;
+
+                               if (data->pcie_lane_power_saving.max <
+                                               ps->performance_levels[i].pcie_lane)
+                                       data->pcie_lane_power_saving.max =
+                                                       ps->performance_levels[i].pcie_lane;
+
+                               if (data->pcie_lane_power_saving.min >
+                                               ps->performance_levels[i].pcie_lane)
+                                       data->pcie_lane_power_saving.min =
+                                                       ps->performance_levels[i].pcie_lane;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+               unsigned long entry_index, struct pp_power_state *state)
+{
+       if (hwmgr->pp_table_version == PP_TABLE_V0)
+               return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
+       else if (hwmgr->pp_table_version == PP_TABLE_V1)
+               return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
+
+       return 0;
+}
+
+static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+       uint32_t sclk, mclk, activity_percent;
+       uint32_t offset;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       switch (idx) {
+       case AMDGPU_PP_SENSOR_GFX_SCLK:
+               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+               sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               *value = sclk;
+               return 0;
+       case AMDGPU_PP_SENSOR_GFX_MCLK:
+               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+               mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               *value = mclk;
+               return 0;
+       case AMDGPU_PP_SENSOR_GPU_LOAD:
+               offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+                                                               SMU_SoftRegisters,
+                                                               AverageGraphicsActivity);
+
+               activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
+               activity_percent += 0x80;
+               activity_percent >>= 8;
+               *value = activity_percent > 100 ? 100 : activity_percent;
+               return 0;
+       case AMDGPU_PP_SENSOR_GPU_TEMP:
+               *value = smu7_thermal_get_temperature(hwmgr);
+               return 0;
+       case AMDGPU_PP_SENSOR_UVD_POWER:
+               *value = data->uvd_power_gated ? 0 : 1;
+               return 0;
+       case AMDGPU_PP_SENSOR_VCE_POWER:
+               *value = data->vce_power_gated ? 0 : 1;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
+{
+       const struct phm_set_power_state_input *states =
+                       (const struct phm_set_power_state_input *)input;
+       const struct smu7_power_state *smu7_ps =
+                       cast_const_phw_smu7_power_state(states->pnew_state);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       uint32_t sclk = smu7_ps->performance_levels
+                       [smu7_ps->performance_level_count - 1].engine_clock;
+       struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       uint32_t mclk = smu7_ps->performance_levels
+                       [smu7_ps->performance_level_count - 1].memory_clock;
+       struct PP_Clocks min_clocks = {0};
+       uint32_t i;
+       struct cgs_display_info info = {0};
+
+       data->need_update_smu7_dpm_table = 0;
+
+       for (i = 0; i < sclk_table->count; i++) {
+               if (sclk == sclk_table->dpm_levels[i].value)
+                       break;
+       }
+
+       if (i >= sclk_table->count)
+               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+       else {
+       /* TODO: Check SCLK in DAL's minimum clocks
+        * in case DeepSleep divider update is required.
+        */
+               if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
+                       (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
+                               data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+                       data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+       }
+
+       for (i = 0; i < mclk_table->count; i++) {
+               if (mclk == mclk_table->dpm_levels[i].value)
+                       break;
+       }
+
+       if (i >= mclk_table->count)
+               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+
+       if (data->display_timing.num_existing_displays != info.display_count)
+               data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+
+       return 0;
+}
+
+static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
+               const struct smu7_power_state *smu7_ps)
+{
+       uint32_t i;
+       uint32_t sclk, max_sclk = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+
+       for (i = 0; i < smu7_ps->performance_level_count; i++) {
+               sclk = smu7_ps->performance_levels[i].engine_clock;
+               if (max_sclk < sclk)
+                       max_sclk = sclk;
+       }
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+               if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
+                       return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
+                                       dpm_table->pcie_speed_table.dpm_levels
+                                       [dpm_table->pcie_speed_table.count - 1].value :
+                                       dpm_table->pcie_speed_table.dpm_levels[i].value);
+       }
+
+       return 0;
+}
+
+static int smu7_request_link_speed_change_before_state_change(
+               struct pp_hwmgr *hwmgr, const void *input)
+{
+       const struct phm_set_power_state_input *states =
+                       (const struct phm_set_power_state_input *)input;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       const struct smu7_power_state *smu7_nps =
+                       cast_const_phw_smu7_power_state(states->pnew_state);
+       const struct smu7_power_state *polaris10_cps =
+                       cast_const_phw_smu7_power_state(states->pcurrent_state);
+
+       uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
+       uint16_t current_link_speed;
+
+       if (data->force_pcie_gen == PP_PCIEGenInvalid)
+               current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
+       else
+               current_link_speed = data->force_pcie_gen;
+
+       data->force_pcie_gen = PP_PCIEGenInvalid;
+       data->pspp_notify_required = false;
+
+       if (target_link_speed > current_link_speed) {
+               switch (target_link_speed) {
+               case PP_PCIEGen3:
+                       if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
+                               break;
+                       data->force_pcie_gen = PP_PCIEGen2;
+                       if (current_link_speed == PP_PCIEGen2)
+                               break;
+               case PP_PCIEGen2:
+                       if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
+                               break;
+               default:
+                       data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
+                       break;
+               }
+       } else {
+               if (target_link_speed < current_link_speed)
+                       data->pspp_notify_required = true;
+       }
+
+       return 0;
+}
+
+static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (0 == data->need_update_smu7_dpm_table)
+               return 0;
+
+       if ((0 == data->sclk_dpm_key_disabled) &&
+               (data->need_update_smu7_dpm_table &
+                       (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+               PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+                               "Trying to freeze SCLK DPM when DPM is disabled",
+                               );
+               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_SCLKDPM_FreezeLevel),
+                               "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
+                               return -EINVAL);
+       }
+
+       if ((0 == data->mclk_dpm_key_disabled) &&
+               (data->need_update_smu7_dpm_table &
+                DPMTABLE_OD_UPDATE_MCLK)) {
+               PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+                               "Trying to freeze MCLK DPM when DPM is disabled",
+                               );
+               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_MCLKDPM_FreezeLevel),
+                               "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
+                               return -EINVAL);
+       }
+
+       return 0;
+}
+
+static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
+               struct pp_hwmgr *hwmgr, const void *input)
+{
+       int result = 0;
+       const struct phm_set_power_state_input *states =
+                       (const struct phm_set_power_state_input *)input;
+       const struct smu7_power_state *smu7_ps =
+                       cast_const_phw_smu7_power_state(states->pnew_state);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t sclk = smu7_ps->performance_levels
+                       [smu7_ps->performance_level_count - 1].engine_clock;
+       uint32_t mclk = smu7_ps->performance_levels
+                       [smu7_ps->performance_level_count - 1].memory_clock;
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+
+       struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+       uint32_t dpm_count, clock_percent;
+       uint32_t i;
+
+       if (0 == data->need_update_smu7_dpm_table)
+               return 0;
+
+       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+               dpm_table->sclk_table.dpm_levels
+               [dpm_table->sclk_table.count - 1].value = sclk;
+
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+                   phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+               /* Need to do calculation based on the golden DPM table
+                * as the Heatmap GPU Clock axis is also based on the default values
+                */
+                       PP_ASSERT_WITH_CODE(
+                               (golden_dpm_table->sclk_table.dpm_levels
+                                               [golden_dpm_table->sclk_table.count - 1].value != 0),
+                               "Divide by 0!",
+                               return -EINVAL);
+                       dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
+
+                       for (i = dpm_count; i > 1; i--) {
+                               if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
+                                       clock_percent =
+                                             ((sclk
+                                               - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
+                                               ) * 100)
+                                               / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+                                       dpm_table->sclk_table.dpm_levels[i].value =
+                                                       golden_dpm_table->sclk_table.dpm_levels[i].value +
+                                                       (golden_dpm_table->sclk_table.dpm_levels[i].value *
+                                                               clock_percent)/100;
+
+                               } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
+                                       clock_percent =
+                                               ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
+                                               - sclk) * 100)
+                                               / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+                                       dpm_table->sclk_table.dpm_levels[i].value =
+                                                       golden_dpm_table->sclk_table.dpm_levels[i].value -
+                                                       (golden_dpm_table->sclk_table.dpm_levels[i].value *
+                                                                       clock_percent) / 100;
+                               } else
+                                       dpm_table->sclk_table.dpm_levels[i].value =
+                                                       golden_dpm_table->sclk_table.dpm_levels[i].value;
+                       }
+               }
+       }
+
+       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+               dpm_table->mclk_table.dpm_levels
+                       [dpm_table->mclk_table.count - 1].value = mclk;
+
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+                   phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+
+                       PP_ASSERT_WITH_CODE(
+                                       (golden_dpm_table->mclk_table.dpm_levels
+                                               [golden_dpm_table->mclk_table.count-1].value != 0),
+                                       "Divide by 0!",
+                                       return -EINVAL);
+                       dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
+                       for (i = dpm_count; i > 1; i--) {
+                               if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
+                                       clock_percent = ((mclk -
+                                       golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
+                                       / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+                                       dpm_table->mclk_table.dpm_levels[i].value =
+                                                       golden_dpm_table->mclk_table.dpm_levels[i].value +
+                                                       (golden_dpm_table->mclk_table.dpm_levels[i].value *
+                                                       clock_percent) / 100;
+
+                               } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
+                                       clock_percent = (
+                                        (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
+                                       * 100)
+                                       / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+                                       dpm_table->mclk_table.dpm_levels[i].value =
+                                                       golden_dpm_table->mclk_table.dpm_levels[i].value -
+                                                       (golden_dpm_table->mclk_table.dpm_levels[i].value *
+                                                                       clock_percent) / 100;
+                               } else
+                                       dpm_table->mclk_table.dpm_levels[i].value =
+                                                       golden_dpm_table->mclk_table.dpm_levels[i].value;
+                       }
+               }
+       }
+
+       if (data->need_update_smu7_dpm_table &
+                       (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+               result = smum_populate_all_graphic_levels(hwmgr);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+                               return result);
+       }
+
+       if (data->need_update_smu7_dpm_table &
+                       (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+               /*populate MCLK dpm table to SMU7 */
+               result = smum_populate_all_memory_levels(hwmgr);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+                               return result);
+       }
+
+       return result;
+}
+
+static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+                         struct smu7_single_dpm_table *dpm_table,
+                       uint32_t low_limit, uint32_t high_limit)
+{
+       uint32_t i;
+
+       for (i = 0; i < dpm_table->count; i++) {
+               if ((dpm_table->dpm_levels[i].value < low_limit)
+               || (dpm_table->dpm_levels[i].value > high_limit))
+                       dpm_table->dpm_levels[i].enabled = false;
+               else
+                       dpm_table->dpm_levels[i].enabled = true;
+       }
+
+       return 0;
+}
+
+static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
+               const struct smu7_power_state *smu7_ps)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t high_limit_count;
+
+       PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
+                       "power state did not have any performance level",
+                       return -EINVAL);
+
+       high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
+
+       smu7_trim_single_dpm_states(hwmgr,
+                       &(data->dpm_table.sclk_table),
+                       smu7_ps->performance_levels[0].engine_clock,
+                       smu7_ps->performance_levels[high_limit_count].engine_clock);
+
+       smu7_trim_single_dpm_states(hwmgr,
+                       &(data->dpm_table.mclk_table),
+                       smu7_ps->performance_levels[0].memory_clock,
+                       smu7_ps->performance_levels[high_limit_count].memory_clock);
+
+       return 0;
+}
+
+static int smu7_generate_dpm_level_enable_mask(
+               struct pp_hwmgr *hwmgr, const void *input)
+{
+       int result;
+       const struct phm_set_power_state_input *states =
+                       (const struct phm_set_power_state_input *)input;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       const struct smu7_power_state *smu7_ps =
+                       cast_const_phw_smu7_power_state(states->pnew_state);
+
+       result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+       if (result)
+               return result;
+
+       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
+       data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
+
+       return 0;
+}
+
+static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (0 == data->need_update_smu7_dpm_table)
+               return 0;
+
+       if ((0 == data->sclk_dpm_key_disabled) &&
+               (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+
+               PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+                               "Trying to Unfreeze SCLK DPM when DPM is disabled",
+                               );
+               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+                       "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
+                       return -EINVAL);
+       }
+
+       if ((0 == data->mclk_dpm_key_disabled) &&
+               (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+
+               PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+                               "Trying to Unfreeze MCLK DPM when DPM is disabled",
+                               );
+               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+                   "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
+                   return -EINVAL);
+       }
+
+       data->need_update_smu7_dpm_table = 0;
+
+       return 0;
+}
+
+static int smu7_notify_link_speed_change_after_state_change(
+               struct pp_hwmgr *hwmgr, const void *input)
+{
+       const struct phm_set_power_state_input *states =
+                       (const struct phm_set_power_state_input *)input;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       const struct smu7_power_state *smu7_ps =
+                       cast_const_phw_smu7_power_state(states->pnew_state);
+       uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
+       uint8_t  request;
+
+       if (data->pspp_notify_required) {
+               if (target_link_speed == PP_PCIEGen3)
+                       request = PCIE_PERF_REQ_GEN3;
+               else if (target_link_speed == PP_PCIEGen2)
+                       request = PCIE_PERF_REQ_GEN2;
+               else
+                       request = PCIE_PERF_REQ_GEN1;
+
+               if (request == PCIE_PERF_REQ_GEN1 &&
+                               smu7_get_current_pcie_speed(hwmgr) > 0)
+                       return 0;
+
+               if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
+                       if (PP_PCIEGen2 == target_link_speed)
+                               printk("PSPP request to switch to Gen2 from Gen3 Failed!");
+                       else
+                               printk("PSPP request to switch to Gen1 from Gen2 Failed!");
+               }
+       }
+
+       return 0;
+}
+
+static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+       return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
+}
+
+static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+       int tmp_result, result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to find DPM states clocks in DPM table!",
+                       result = tmp_result);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PCIEPerformanceRequest)) {
+               tmp_result =
+                       smu7_request_link_speed_change_before_state_change(hwmgr, input);
+               PP_ASSERT_WITH_CODE((0 == tmp_result),
+                               "Failed to request link speed change before state change!",
+                               result = tmp_result);
+       }
+
+       tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
+
+       tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to populate and upload SCLK MCLK DPM levels!",
+                       result = tmp_result);
+
+       tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to generate DPM level enabled mask!",
+                       result = tmp_result);
+
+       tmp_result = smum_update_sclk_threshold(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to update SCLK threshold!",
+                       result = tmp_result);
+
+       tmp_result = smu7_notify_smc_display(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to notify smc display settings!",
+                       result = tmp_result);
+
+       tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to unfreeze SCLK MCLK DPM!",
+                       result = tmp_result);
+
+       tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to upload DPM level enabled mask!",
+                       result = tmp_result);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PCIEPerformanceRequest)) {
+               tmp_result =
+                       smu7_notify_link_speed_change_after_state_change(hwmgr, input);
+               PP_ASSERT_WITH_CODE((0 == tmp_result),
+                               "Failed to notify link speed change after state change!",
+                               result = tmp_result);
+       }
+       data->apply_optimized_settings = false;
+       return result;
+}
+
+static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
+{
+       hwmgr->thermal_controller.
+       advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
+
+       if (phm_is_hw_access_blocked(hwmgr))
+               return 0;
+
+       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                       PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+}
+
+int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+{
+       PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
+
+       return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
+}
+
+int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+{
+       uint32_t num_active_displays = 0;
+       struct cgs_display_info info = {0};
+
+       info.mode_info = NULL;
+       cgs_get_active_displays_info(hwmgr->device, &info);
+
+       num_active_displays = info.display_count;
+
+       if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
+               smu7_notify_smc_display_change(hwmgr, false);
+
+       return 0;
+}
+
+/**
+* Programs the display gap
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always OK
+*/
+int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t num_active_displays = 0;
+       uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
+       uint32_t display_gap2;
+       uint32_t pre_vbi_time_in_us;
+       uint32_t frame_time_in_us;
+       uint32_t ref_clock;
+       uint32_t refresh_rate = 0;
+       struct cgs_display_info info = {0};
+       struct cgs_mode_info mode_info;
+
+       info.mode_info = &mode_info;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+       num_active_displays = info.display_count;
+
+       display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+       ref_clock = mode_info.ref_clock;
+       refresh_rate = mode_info.refresh_rate;
+
+       if (0 == refresh_rate)
+               refresh_rate = 60;
+
+       frame_time_in_us = 1000000 / refresh_rate;
+
+       pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+       data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
+       display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+                                                       SMU_SoftRegisters,
+                                                       PreVBlankGap), 0x64);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+                                                       SMU_SoftRegisters,
+                                                       VBlankTimeout),
+                                       (frame_time_in_us - pre_vbi_time_in_us));
+
+       return 0;
+}
+
+int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+       return smu7_program_display_gap(hwmgr);
+}
+
+/**
+*  Set maximum target operating fan output RPM
+*
+* @param    hwmgr:  the address of the powerplay hardware manager.
+* @param    usMaxFanRpm:  max operating fan RPM value.
+* @return   The response that came from the SMC.
+*/
+static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
+{
+       hwmgr->thermal_controller.
+       advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
+
+       if (phm_is_hw_access_blocked(hwmgr))
+               return 0;
+
+       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                       PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+}
+
+int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+                                       const void *thermal_interrupt_info)
+{
+       return 0;
+}
+
+bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       bool is_update_required = false;
+       struct cgs_display_info info = {0, 0, NULL};
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+
+       if (data->display_timing.num_existing_displays != info.display_count)
+               is_update_required = true;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+               if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
+                       (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
+                       hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+                       is_update_required = true;
+       }
+       return is_update_required;
+}
+
+static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
+                                                          const struct smu7_performance_level *pl2)
+{
+       return ((pl1->memory_clock == pl2->memory_clock) &&
+                 (pl1->engine_clock == pl2->engine_clock) &&
+                 (pl1->pcie_gen == pl2->pcie_gen) &&
+                 (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+       const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
+       const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
+       int i;
+
+       if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
+               return -EINVAL;
+
+       /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+       if (psa->performance_level_count != psb->performance_level_count) {
+               *equal = false;
+               return 0;
+       }
+
+       for (i = 0; i < psa->performance_level_count; i++) {
+               if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+                       /* If we have found even one performance level pair that is different the states are different. */
+                       *equal = false;
+                       return 0;
+               }
+       }
+
+       /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+       *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+       *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+       *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+
+       return 0;
+}
+
+int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       uint32_t vbios_version;
+       uint32_t tmp;
+
+       /* Read MC indirect register offset 0x9F bits [3:0] to see
+        * if VBIOS has already loaded a full version of MC ucode
+        * or not.
+        */
+
+       smu7_get_mc_microcode_version(hwmgr);
+       vbios_version = hwmgr->microcode_version_info.MC & 0xf;
+
+       data->need_long_memory_training = false;
+
+       cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
+                                                       ixMC_IO_DEBUG_UP_13);
+       tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+       if (tmp & (1 << 23)) {
+               data->mem_latency_high = MEM_LATENCY_HIGH;
+               data->mem_latency_low = MEM_LATENCY_LOW;
+       } else {
+               data->mem_latency_high = 330;
+               data->mem_latency_low = 330;
+       }
+
+       return 0;
+}
+
+static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       data->clock_registers.vCG_SPLL_FUNC_CNTL         =
+               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
+       data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
+               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
+       data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
+               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
+       data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
+               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
+       data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
+               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
+       data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
+               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
+       data->clock_registers.vDLL_CNTL                  =
+               cgs_read_register(hwmgr->device, mmDLL_CNTL);
+       data->clock_registers.vMCLK_PWRMGT_CNTL          =
+               cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
+       data->clock_registers.vMPLL_AD_FUNC_CNTL         =
+               cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
+       data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
+               cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
+       data->clock_registers.vMPLL_FUNC_CNTL            =
+               cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
+       data->clock_registers.vMPLL_FUNC_CNTL_1          =
+               cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
+       data->clock_registers.vMPLL_FUNC_CNTL_2          =
+               cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
+       data->clock_registers.vMPLL_SS1                  =
+               cgs_read_register(hwmgr->device, mmMPLL_SS1);
+       data->clock_registers.vMPLL_SS2                  =
+               cgs_read_register(hwmgr->device, mmMPLL_SS2);
+       return 0;
+
+}
+
+/**
+ * Find out if memory is GDDR5.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t temp;
+
+       temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
+
+       data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
+                       ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
+                        MC_SEQ_MISC0_GDDR5_SHIFT));
+
+       return 0;
+}
+
+/**
+ * Enables Dynamic Power Management by SMC
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
+{
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       GENERAL_PWRMGT, STATIC_PM_EN, 1);
+
+       return 0;
+}
+
+/**
+ * Initialize PowerGating States for different engines
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       data->uvd_power_gated = false;
+       data->vce_power_gated = false;
+       data->samu_power_gated = false;
+
+       return 0;
+}
+
+static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       data->low_sclk_interrupt_threshold = 0;
+       return 0;
+}
+
+int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+       int tmp_result, result = 0;
+
+       smu7_upload_mc_firmware(hwmgr);
+
+       tmp_result = smu7_read_clock_registers(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to read clock registers!", result = tmp_result);
+
+       tmp_result = smu7_get_memory_type(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to get memory type!", result = tmp_result);
+
+       tmp_result = smu7_enable_acpi_power_management(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable ACPI power management!", result = tmp_result);
+
+       tmp_result = smu7_init_power_gate_state(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to init power gate state!", result = tmp_result);
+
+       tmp_result = smu7_get_mc_microcode_version(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to get MC microcode version!", result = tmp_result);
+
+       tmp_result = smu7_init_sclk_threshold(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to init sclk threshold!", result = tmp_result);
+
+       return result;
+}
+
+static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
+               enum pp_clock_type type, uint32_t mask)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+               return -EINVAL;
+
+       switch (type) {
+       case PP_SCLK:
+               if (!data->sclk_dpm_key_disabled)
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
+                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+               break;
+       case PP_MCLK:
+               if (!data->mclk_dpm_key_disabled)
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
+                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+               break;
+       case PP_PCIE:
+       {
+               uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+               uint32_t level = 0;
+
+               while (tmp >>= 1)
+                       level++;
+
+               if (!data->pcie_dpm_key_disabled)
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_PCIeDPM_ForceLevel,
+                                       level);
+               break;
+       }
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
+               enum pp_clock_type type, char *buf)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+       int i, now, size = 0;
+       uint32_t clock, pcie_speed;
+
+       switch (type) {
+       case PP_SCLK:
+               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+               for (i = 0; i < sclk_table->count; i++) {
+                       if (clock > sclk_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < sclk_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                                       i, sclk_table->dpm_levels[i].value / 100,
+                                       (i == now) ? "*" : "");
+               break;
+       case PP_MCLK:
+               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+               for (i = 0; i < mclk_table->count; i++) {
+                       if (clock > mclk_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < mclk_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                                       i, mclk_table->dpm_levels[i].value / 100,
+                                       (i == now) ? "*" : "");
+               break;
+       case PP_PCIE:
+               pcie_speed = smu7_get_current_pcie_speed(hwmgr);
+               for (i = 0; i < pcie_table->count; i++) {
+                       if (pcie_speed != pcie_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < pcie_table->count; i++)
+                       size += sprintf(buf + size, "%d: %s %s\n", i,
+                                       (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+                                       (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+                                       (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+                                       (i == now) ? "*" : "");
+               break;
+       default:
+               break;
+       }
+       return size;
+}
+
+static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+       if (mode) {
+               /* stop auto-manage */
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl))
+                       smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+               smu7_fan_ctrl_set_static_mode(hwmgr, mode);
+       } else
+               /* restart auto-manage */
+               smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+
+       return 0;
+}
+
+static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+{
+       if (hwmgr->fan_ctrl_is_in_default_mode)
+               return hwmgr->fan_ctrl_default_mode;
+       else
+               return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               CG_FDO_CTRL2, FDO_PWM_MODE);
+}
+
+static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       struct smu7_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       struct pp_power_state  *ps;
+       struct smu7_power_state  *smu7_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+       smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       struct smu7_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       struct pp_power_state  *ps;
+       struct smu7_power_state  *smu7_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+       smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
+
+
+static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+{
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+       int i;
+
+       if (table_info == NULL)
+               return -EINVAL;
+
+       dep_sclk_table = table_info->vdd_dep_on_sclk;
+
+       for (i = 0; i < dep_sclk_table->count; i++) {
+               clocks->clock[i] = dep_sclk_table->entries[i].clk;
+               clocks->count++;
+       }
+       return 0;
+}
+
+static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
+               return data->mem_latency_high;
+       else if (clk >= MEM_FREQ_HIGH_LATENCY)
+               return data->mem_latency_low;
+       else
+               return MEM_LATENCY_ERR;
+}
+
+static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+{
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+       int i;
+
+       if (table_info == NULL)
+               return -EINVAL;
+
+       dep_mclk_table = table_info->vdd_dep_on_mclk;
+
+       for (i = 0; i < dep_mclk_table->count; i++) {
+               clocks->clock[i] = dep_mclk_table->entries[i].clk;
+               clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+                                               dep_mclk_table->entries[i].clk);
+               clocks->count++;
+       }
+       return 0;
+}
+
+static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+                                               struct amd_pp_clocks *clocks)
+{
+       switch (type) {
+       case amd_pp_sys_clock:
+               smu7_get_sclks(hwmgr, clocks);
+               break;
+       case amd_pp_mem_clock:
+               smu7_get_mclks(hwmgr, clocks);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct pp_hwmgr_func smu7_hwmgr_funcs = {
+       .backend_init = &smu7_hwmgr_backend_init,
+       .backend_fini = &phm_hwmgr_backend_fini,
+       .asic_setup = &smu7_setup_asic_task,
+       .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
+       .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
+       .force_dpm_level = &smu7_force_dpm_level,
+       .power_state_set = smu7_set_power_state_tasks,
+       .get_power_state_size = smu7_get_power_state_size,
+       .get_mclk = smu7_dpm_get_mclk,
+       .get_sclk = smu7_dpm_get_sclk,
+       .patch_boot_state = smu7_dpm_patch_boot_state,
+       .get_pp_table_entry = smu7_get_pp_table_entry,
+       .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
+       .powerdown_uvd = smu7_powerdown_uvd,
+       .powergate_uvd = smu7_powergate_uvd,
+       .powergate_vce = smu7_powergate_vce,
+       .disable_clock_power_gating = smu7_disable_clock_power_gating,
+       .update_clock_gatings = smu7_update_clock_gatings,
+       .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
+       .display_config_changed = smu7_display_configuration_changed_task,
+       .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
+       .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
+       .get_temperature = smu7_thermal_get_temperature,
+       .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
+       .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
+       .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
+       .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
+       .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
+       .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
+       .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
+       .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
+       .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
+       .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
+       .check_states_equal = smu7_check_states_equal,
+       .set_fan_control_mode = smu7_set_fan_control_mode,
+       .get_fan_control_mode = smu7_get_fan_control_mode,
+       .force_clock_level = smu7_force_clock_level,
+       .print_clock_levels = smu7_print_clock_levels,
+       .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+       .get_sclk_od = smu7_get_sclk_od,
+       .set_sclk_od = smu7_set_sclk_od,
+       .get_mclk_od = smu7_get_mclk_od,
+       .set_mclk_od = smu7_set_mclk_od,
+       .get_clock_by_type = smu7_get_clock_by_type,
+       .read_sensor = smu7_read_sensor,
+};
+
+uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+               uint32_t clock_insr)
+{
+       uint8_t i;
+       uint32_t temp;
+       uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
+
+       PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
+       for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+               temp = clock >> i;
+
+               if (temp >= min || i == 0)
+                       break;
+       }
+       return i;
+}
+
+int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
+{
+       int ret = 0;
+
+       hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
+       if (hwmgr->pp_table_version == PP_TABLE_V0)
+               hwmgr->pptable_func = &pptable_funcs;
+       else if (hwmgr->pp_table_version == PP_TABLE_V1)
+               hwmgr->pptable_func = &pptable_v1_0_funcs;
+
+       pp_smu7_thermal_initialize(hwmgr);
+       return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
new file mode 100644 (file)
index 0000000..27e7f76
--- /dev/null
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_HWMGR_H
+#define _SMU7_HWMGR_H
+
+#include "hwmgr.h"
+#include "ppatomctrl.h"
+
+#define SMU7_MAX_HARDWARE_POWERLEVELS   2
+
+#define SMU7_VOLTAGE_CONTROL_NONE                   0x0
+#define SMU7_VOLTAGE_CONTROL_BY_GPIO                0x1
+#define SMU7_VOLTAGE_CONTROL_BY_SVID2               0x2
+#define SMU7_VOLTAGE_CONTROL_MERGED                 0x3
+
+#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
+#define DPMTABLE_UPDATE_SCLK        0x00000004
+#define DPMTABLE_UPDATE_MCLK        0x00000008
+
+enum gpu_pt_config_reg_type {
+       GPU_CONFIGREG_MMR = 0,
+       GPU_CONFIGREG_SMC_IND,
+       GPU_CONFIGREG_DIDT_IND,
+       GPU_CONFIGREG_GC_CAC_IND,
+       GPU_CONFIGREG_CACHE,
+       GPU_CONFIGREG_MAX
+};
+
+struct gpu_pt_config_reg {
+       uint32_t                           offset;
+       uint32_t                           mask;
+       uint32_t                           shift;
+       uint32_t                           value;
+       enum gpu_pt_config_reg_type       type;
+};
+
+struct smu7_performance_level {
+       uint32_t  memory_clock;
+       uint32_t  engine_clock;
+       uint16_t  pcie_gen;
+       uint16_t  pcie_lane;
+};
+
+struct smu7_thermal_temperature_setting {
+       long temperature_low;
+       long temperature_high;
+       long temperature_shutdown;
+};
+
+struct smu7_uvd_clocks {
+       uint32_t  vclk;
+       uint32_t  dclk;
+};
+
+struct smu7_vce_clocks {
+       uint32_t  evclk;
+       uint32_t  ecclk;
+};
+
+struct smu7_power_state {
+       uint32_t                  magic;
+       struct smu7_uvd_clocks    uvd_clks;
+       struct smu7_vce_clocks    vce_clks;
+       uint32_t                  sam_clk;
+       uint16_t                  performance_level_count;
+       bool                      dc_compatible;
+       uint32_t                  sclk_threshold;
+       struct smu7_performance_level  performance_levels[SMU7_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct smu7_dpm_level {
+       bool    enabled;
+       uint32_t        value;
+       uint32_t        param1;
+};
+
+#define SMU7_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define SMU7_MINIMUM_ENGINE_CLOCK 2500
+
+struct smu7_single_dpm_table {
+       uint32_t                count;
+       struct smu7_dpm_level   dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct smu7_dpm_table {
+       struct smu7_single_dpm_table  sclk_table;
+       struct smu7_single_dpm_table  mclk_table;
+       struct smu7_single_dpm_table  pcie_speed_table;
+       struct smu7_single_dpm_table  vddc_table;
+       struct smu7_single_dpm_table  vddci_table;
+       struct smu7_single_dpm_table  mvdd_table;
+};
+
+struct smu7_clock_registers {
+       uint32_t  vCG_SPLL_FUNC_CNTL;
+       uint32_t  vCG_SPLL_FUNC_CNTL_2;
+       uint32_t  vCG_SPLL_FUNC_CNTL_3;
+       uint32_t  vCG_SPLL_FUNC_CNTL_4;
+       uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
+       uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
+       uint32_t  vDLL_CNTL;
+       uint32_t  vMCLK_PWRMGT_CNTL;
+       uint32_t  vMPLL_AD_FUNC_CNTL;
+       uint32_t  vMPLL_DQ_FUNC_CNTL;
+       uint32_t  vMPLL_FUNC_CNTL;
+       uint32_t  vMPLL_FUNC_CNTL_1;
+       uint32_t  vMPLL_FUNC_CNTL_2;
+       uint32_t  vMPLL_SS1;
+       uint32_t  vMPLL_SS2;
+};
+
+#define DISABLE_MC_LOADMICROCODE   1
+#define DISABLE_MC_CFGPROGRAMMING  2
+
+struct smu7_voltage_smio_registers {
+       uint32_t vS0_VID_LOWER_SMIO_CNTL;
+};
+
+#define SMU7_MAX_LEAKAGE_COUNT  8
+
+struct smu7_leakage_voltage {
+       uint16_t  count;
+       uint16_t  leakage_id[SMU7_MAX_LEAKAGE_COUNT];
+       uint16_t  actual_voltage[SMU7_MAX_LEAKAGE_COUNT];
+};
+
+struct smu7_vbios_boot_state {
+       uint16_t    mvdd_bootup_value;
+       uint16_t    vddc_bootup_value;
+       uint16_t    vddci_bootup_value;
+       uint16_t    vddgfx_bootup_value;
+       uint32_t    sclk_bootup_value;
+       uint32_t    mclk_bootup_value;
+       uint16_t    pcie_gen_bootup_value;
+       uint16_t    pcie_lane_bootup_value;
+};
+
+struct smu7_display_timing {
+       uint32_t  min_clock_in_sr;
+       uint32_t  num_existing_displays;
+};
+
+struct smu7_dpmlevel_enable_mask {
+       uint32_t  uvd_dpm_enable_mask;
+       uint32_t  vce_dpm_enable_mask;
+       uint32_t  acp_dpm_enable_mask;
+       uint32_t  samu_dpm_enable_mask;
+       uint32_t  sclk_dpm_enable_mask;
+       uint32_t  mclk_dpm_enable_mask;
+       uint32_t  pcie_dpm_enable_mask;
+};
+
+struct smu7_pcie_perf_range {
+       uint16_t  max;
+       uint16_t  min;
+};
+
+struct smu7_hwmgr {
+       struct smu7_dpm_table                   dpm_table;
+       struct smu7_dpm_table                   golden_dpm_table;
+
+       uint32_t                                                voting_rights_clients0;
+       uint32_t                                                voting_rights_clients1;
+       uint32_t                                                voting_rights_clients2;
+       uint32_t                                                voting_rights_clients3;
+       uint32_t                                                voting_rights_clients4;
+       uint32_t                                                voting_rights_clients5;
+       uint32_t                                                voting_rights_clients6;
+       uint32_t                                                voting_rights_clients7;
+       uint32_t                                                static_screen_threshold_unit;
+       uint32_t                                                static_screen_threshold;
+       uint32_t                                                voltage_control;
+       uint32_t                                                vdd_gfx_control;
+       uint32_t                                                vddc_vddgfx_delta;
+       uint32_t                                                active_auto_throttle_sources;
+
+       struct smu7_clock_registers            clock_registers;
+
+       bool                           is_memory_gddr5;
+       uint16_t                       acpi_vddc;
+       bool                           pspp_notify_required;
+       uint16_t                       force_pcie_gen;
+       uint16_t                       acpi_pcie_gen;
+       uint32_t                       pcie_gen_cap;
+       uint32_t                       pcie_lane_cap;
+       uint32_t                       pcie_spc_cap;
+       struct smu7_leakage_voltage          vddc_leakage;
+       struct smu7_leakage_voltage          vddci_leakage;
+       struct smu7_leakage_voltage          vddcgfx_leakage;
+
+       uint32_t                             mvdd_control;
+       uint32_t                             vddc_mask_low;
+       uint32_t                             mvdd_mask_low;
+       uint16_t                            max_vddc_in_pptable;
+       uint16_t                            min_vddc_in_pptable;
+       uint16_t                            max_vddci_in_pptable;
+       uint16_t                            min_vddci_in_pptable;
+       bool                                is_uvd_enabled;
+       struct smu7_vbios_boot_state        vbios_boot_state;
+
+       bool                           pcie_performance_request;
+       bool                           battery_state;
+       bool                           is_tlu_enabled;
+       bool                           disable_handshake;
+       bool                           smc_voltage_control_enabled;
+       bool                           vbi_time_out_support;
+
+       uint32_t                       soft_regs_start;
+       /* ---- Stuff originally coming from Evergreen ---- */
+       uint32_t                             vddci_control;
+       struct pp_atomctrl_voltage_table     vddc_voltage_table;
+       struct pp_atomctrl_voltage_table     vddci_voltage_table;
+       struct pp_atomctrl_voltage_table     mvdd_voltage_table;
+       struct pp_atomctrl_voltage_table     vddgfx_voltage_table;
+
+       uint32_t                             mgcg_cgtt_local2;
+       uint32_t                             mgcg_cgtt_local3;
+       uint32_t                             gpio_debug;
+       uint32_t                             mc_micro_code_feature;
+       uint32_t                             highest_mclk;
+       uint16_t                             acpi_vddci;
+       uint8_t                              mvdd_high_index;
+       uint8_t                              mvdd_low_index;
+       bool                                 dll_default_on;
+       bool                                 performance_request_registered;
+
+       /* ---- Low Power Features ---- */
+       bool                           ulv_supported;
+
+       /* ---- CAC Stuff ---- */
+       uint32_t                       cac_table_start;
+       bool                           cac_configuration_required;
+       bool                           driver_calculate_cac_leakage;
+       bool                           cac_enabled;
+
+       /* ---- DPM2 Parameters ---- */
+       uint32_t                       power_containment_features;
+       bool                           enable_dte_feature;
+       bool                           enable_tdc_limit_feature;
+       bool                           enable_pkg_pwr_tracking_feature;
+       bool                           disable_uvd_power_tune_feature;
+
+
+       uint32_t                       dte_tj_offset;
+       uint32_t                       fast_watermark_threshold;
+
+       /* ---- Phase Shedding ---- */
+       bool                           vddc_phase_shed_control;
+
+       /* ---- DI/DT ---- */
+       struct smu7_display_timing        display_timing;
+
+       /* ---- Thermal Temperature Setting ---- */
+       struct smu7_thermal_temperature_setting  thermal_temp_setting;
+       struct smu7_dpmlevel_enable_mask     dpm_level_enable_mask;
+       uint32_t                                  need_update_smu7_dpm_table;
+       uint32_t                                  sclk_dpm_key_disabled;
+       uint32_t                                  mclk_dpm_key_disabled;
+       uint32_t                                  pcie_dpm_key_disabled;
+       uint32_t                                  min_engine_clocks;
+       struct smu7_pcie_perf_range          pcie_gen_performance;
+       struct smu7_pcie_perf_range          pcie_lane_performance;
+       struct smu7_pcie_perf_range          pcie_gen_power_saving;
+       struct smu7_pcie_perf_range          pcie_lane_power_saving;
+       bool                                      use_pcie_performance_levels;
+       bool                                      use_pcie_power_saving_levels;
+       uint32_t                                  mclk_activity_target;
+       uint32_t                                  mclk_dpm0_activity_target;
+       uint32_t                                  low_sclk_interrupt_threshold;
+       uint32_t                                  last_mclk_dpm_enable_mask;
+       bool                                      uvd_enabled;
+
+       /* ---- Power Gating States ---- */
+       bool                           uvd_power_gated;
+       bool                           vce_power_gated;
+       bool                           samu_power_gated;
+       bool                           need_long_memory_training;
+
+       /* Application power optimization parameters */
+       bool                               update_up_hyst;
+       bool                               update_down_hyst;
+       uint32_t                           down_hyst;
+       uint32_t                           up_hyst;
+       uint32_t disable_dpm_mask;
+       bool apply_optimized_settings;
+
+       uint32_t                              avfs_vdroop_override_setting;
+       bool                                  apply_avfs_cks_off_voltage;
+       uint32_t                              frame_time_x2;
+       uint16_t                              mem_latency_high;
+       uint16_t                              mem_latency_low;
+};
+
+/* To convert to Q8.8 format for firmware */
+#define SMU7_Q88_FORMAT_CONVERSION_UNIT             256
+
+enum SMU7_I2CLineID {
+       SMU7_I2CLineID_DDC1 = 0x90,
+       SMU7_I2CLineID_DDC2 = 0x91,
+       SMU7_I2CLineID_DDC3 = 0x92,
+       SMU7_I2CLineID_DDC4 = 0x93,
+       SMU7_I2CLineID_DDC5 = 0x94,
+       SMU7_I2CLineID_DDC6 = 0x95,
+       SMU7_I2CLineID_SCLSDA = 0x96,
+       SMU7_I2CLineID_DDCVGA = 0x97
+};
+
+#define SMU7_I2C_DDC1DATA          0
+#define SMU7_I2C_DDC1CLK           1
+#define SMU7_I2C_DDC2DATA          2
+#define SMU7_I2C_DDC2CLK           3
+#define SMU7_I2C_DDC3DATA          4
+#define SMU7_I2C_DDC3CLK           5
+#define SMU7_I2C_SDA               40
+#define SMU7_I2C_SCL               41
+#define SMU7_I2C_DDC4DATA          65
+#define SMU7_I2C_DDC4CLK           66
+#define SMU7_I2C_DDC5DATA          0x48
+#define SMU7_I2C_DDC5CLK           0x49
+#define SMU7_I2C_DDC6DATA          0x4a
+#define SMU7_I2C_DDC6CLK           0x4b
+#define SMU7_I2C_DDCVGADATA        0x4c
+#define SMU7_I2C_DDCVGACLK         0x4d
+
+#define SMU7_UNUSED_GPIO_PIN       0x7F
+uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
+uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+               uint32_t clock_insr);
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
new file mode 100644 (file)
index 0000000..260fce0
--- /dev/null
@@ -0,0 +1,729 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "hwmgr.h"
+#include "smumgr.h"
+#include "smu7_hwmgr.h"
+#include "smu7_powertune.h"
+#include "pp_debug.h"
+#include "smu7_common.h"
+
+#define VOLTAGE_SCALE  4
+
+static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
+static struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                Shift                                               Value       Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060013, GPU_CONFIGREG_GC_CAC_IND },
+
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
+
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100013, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900013, GPU_CONFIGREG_GC_CAC_IND },
+
+       {   0xFFFFFFFF  }
+};
+
+static struct gpu_pt_config_reg GCCACConfig_Polaris11[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                Shift                                               Value       Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060011, GPU_CONFIGREG_GC_CAC_IND },
+
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0011, GPU_CONFIGREG_GC_CAC_IND },
+
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100011, GPU_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900011, GPU_CONFIGREG_GC_CAC_IND },
+
+       {   0xFFFFFFFF  }
+};
+
+static struct gpu_pt_config_reg DIDTConfig_Polaris10[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                Shift                                               Value       Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0009,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0009,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   0xFFFFFFFF  }
+};
+
+static struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                Shift                                               Value       Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0008,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0008,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   0xFFFFFFFF  }
+};
+
+
+static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+{
+
+       uint32_t en = enable ? 1 : 0;
+       int32_t result = 0;
+       uint32_t data;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
+               data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+               data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+               DIDTBlock_Info &= ~SQ_Enable_MASK;
+               DIDTBlock_Info |= en << SQ_Enable_SHIFT;
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
+               data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+               data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+               DIDTBlock_Info &= ~DB_Enable_MASK;
+               DIDTBlock_Info |= en << DB_Enable_SHIFT;
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
+               data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+               data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+               DIDTBlock_Info &= ~TD_Enable_MASK;
+               DIDTBlock_Info |= en << TD_Enable_SHIFT;
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
+               data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+               data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+               DIDTBlock_Info &= ~TCP_Enable_MASK;
+               DIDTBlock_Info |= en << TCP_Enable_SHIFT;
+       }
+
+       if (enable)
+               result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
+
+       return result;
+}
+
+static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr,
+                               struct gpu_pt_config_reg *cac_config_regs)
+{
+       struct gpu_pt_config_reg *config_regs = cac_config_regs;
+       uint32_t cache = 0;
+       uint32_t data = 0;
+
+       PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
+
+       while (config_regs->offset != 0xFFFFFFFF) {
+               if (config_regs->type == GPU_CONFIGREG_CACHE)
+                       cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+               else {
+                       switch (config_regs->type) {
+                       case GPU_CONFIGREG_SMC_IND:
+                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
+                               break;
+
+                       case GPU_CONFIGREG_DIDT_IND:
+                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
+                               break;
+
+                       case GPU_CONFIGREG_GC_CAC_IND:
+                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
+                               break;
+
+                       default:
+                               data = cgs_read_register(hwmgr->device, config_regs->offset);
+                               break;
+                       }
+
+                       data &= ~config_regs->mask;
+                       data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+                       data |= cache;
+
+                       switch (config_regs->type) {
+                       case GPU_CONFIGREG_SMC_IND:
+                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
+                               break;
+
+                       case GPU_CONFIGREG_DIDT_IND:
+                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
+                               break;
+
+                       case GPU_CONFIGREG_GC_CAC_IND:
+                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
+                               break;
+
+                       default:
+                               cgs_write_register(hwmgr->device, config_regs->offset, data);
+                               break;
+                       }
+                       cache = 0;
+               }
+
+               config_regs++;
+       }
+
+       return 0;
+}
+
+int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       uint32_t num_se = 0;
+       uint32_t count, value, value2;
+       struct cgs_system_info sys_info = {0};
+
+       sys_info.size = sizeof(struct cgs_system_info);
+       sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+       result = cgs_query_system_info(hwmgr->device, &sys_info);
+
+
+       if (result == 0)
+               num_se = sys_info.value;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+
+               /* TO DO Pre DIDT disable clock gating */
+               value = 0;
+               value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
+               for (count = 0; count < num_se; count++) {
+                       value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK
+                               | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK
+                               | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT);
+                       cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
+
+                       if (hwmgr->chip_id == CHIP_POLARIS10) {
+                               result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
+                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+                               result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
+                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+                       } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+                               result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
+                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+                               result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+                       }
+               }
+               cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
+
+               result = smu7_enable_didt(hwmgr, true);
+               PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
+
+               /* TO DO Post DIDT enable clock gating */
+       }
+
+       return 0;
+}
+
+int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
+{
+       int result;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+               /* TO DO Pre DIDT disable clock gating */
+
+               result = smu7_enable_didt(hwmgr, false);
+               PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
+               /* TO DO Post DIDT enable clock gating */
+       }
+
+       return 0;
+}
+
+int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC)) {
+               int smc_result;
+               smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                               (uint16_t)(PPSMC_MSG_EnableCac));
+               PP_ASSERT_WITH_CODE((0 == smc_result),
+                               "Failed to enable CAC in SMC.", result = -1);
+
+               data->cac_enabled = (0 == smc_result) ? true : false;
+       }
+       return result;
+}
+
+int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC) && data->cac_enabled) {
+               int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                               (uint16_t)(PPSMC_MSG_DisableCac));
+               PP_ASSERT_WITH_CODE((smc_result == 0),
+                               "Failed to disable CAC in SMC.", result = -1);
+
+               data->cac_enabled = false;
+       }
+       return result;
+}
+
+int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->power_containment_features &
+                       POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+               return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_PkgPwrSetLimit, n);
+       return 0;
+}
+
+static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+{
+       return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
+                       PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+}
+
+int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       int smc_result;
+       int result = 0;
+       struct phm_cac_tdp_table *cac_table;
+
+       data->power_containment_features = 0;
+       if (hwmgr->pp_table_version == PP_TABLE_V1)
+               cac_table = table_info->cac_dtp_table;
+       else
+               cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+
+               if (data->enable_tdc_limit_feature) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_TDCLimitEnable));
+                       PP_ASSERT_WITH_CODE((0 == smc_result),
+                                       "Failed to enable TDCLimit in SMC.", result = -1;);
+                       if (0 == smc_result)
+                               data->power_containment_features |=
+                                               POWERCONTAINMENT_FEATURE_TDCLimit;
+               }
+
+               if (data->enable_pkg_pwr_tracking_feature) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+                       PP_ASSERT_WITH_CODE((0 == smc_result),
+                                       "Failed to enable PkgPwrTracking in SMC.", result = -1;);
+                       if (0 == smc_result) {
+                               uint32_t default_limit =
+                                       (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
+
+                               data->power_containment_features |=
+                                               POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+                               if (smu7_set_power_limit(hwmgr, default_limit))
+                                       printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
+                       }
+               }
+       }
+       return result;
+}
+
+int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment) &&
+                       data->power_containment_features) {
+               int smc_result;
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_TDCLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable TDCLimit in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_DTE) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_DisableDTE));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable DTE in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable PkgPwrTracking in SMC.",
+                                       result = smc_result);
+               }
+               data->power_containment_features = 0;
+       }
+
+       return result;
+}
+
+int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
+{
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_cac_tdp_table *cac_table;
+
+       int adjust_percent, target_tdp;
+       int result = 0;
+
+       if (hwmgr->pp_table_version == PP_TABLE_V1)
+               cac_table = table_info->cac_dtp_table;
+       else
+               cac_table = hwmgr->dyn_state.cac_dtp_table;
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               /* adjustment percentage has already been validated */
+               adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
+                               hwmgr->platform_descriptor.TDPAdjustment :
+                               (-1 * hwmgr->platform_descriptor.TDPAdjustment);
+               /* SMC requested that target_tdp to be 7 bit fraction in DPM table
+                * but message to be 8 bit fraction for messages
+                */
+               target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+               result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
+       }
+
+       return result;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
new file mode 100644 (file)
index 0000000..22f86b6
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU7_POWERTUNE_H
+#define _SMU7_POWERTUNE_H
+
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK    0xfffc0000
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT  0x12
+#define DIDT_TD_CTRL0__UNUSED_0_MASK    0xfffc0000
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT  0x12
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK   0xfffc0000
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xc0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001e
+
+/* PowerContainment Features */
+#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
+
+#define ixGC_CAC_CNTL 0x0000
+#define ixDIDT_SQ_STALL_CTRL 0x0004
+#define ixDIDT_SQ_TUNING_CTRL 0x0005
+#define ixDIDT_TD_STALL_CTRL 0x0044
+#define ixDIDT_TD_TUNING_CTRL 0x0045
+#define ixDIDT_TCP_STALL_CTRL 0x0064
+#define ixDIDT_TCP_TUNING_CTRL 0x0065
+
+
+int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr);
+int smu7_enable_power_containment(struct pp_hwmgr *hwmgr);
+int smu7_disable_power_containment(struct pp_hwmgr *hwmgr);
+int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
+int smu7_power_control_set_level(struct pp_hwmgr *hwmgr);
+int smu7_enable_didt_config(struct pp_hwmgr *hwmgr);
+int smu7_disable_didt_config(struct pp_hwmgr *hwmgr);
+#endif  /* DGPU_POWERTUNE_H */
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
new file mode 100644 (file)
index 0000000..fb6c6f6
--- /dev/null
@@ -0,0 +1,577 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <asm/div64.h>
+#include "smu7_thermal.h"
+#include "smu7_hwmgr.h"
+#include "smu7_common.h"
+
+int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+               struct phm_fan_speed_info *fan_speed_info)
+{
+       if (hwmgr->thermal_controller.fanInfo.bNoFan)
+               return 0;
+
+       fan_speed_info->supports_percent_read = true;
+       fan_speed_info->supports_percent_write = true;
+       fan_speed_info->min_percent = 0;
+       fan_speed_info->max_percent = 100;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+               hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
+               fan_speed_info->supports_rpm_read = true;
+               fan_speed_info->supports_rpm_write = true;
+               fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
+               fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
+       } else {
+               fan_speed_info->min_rpm = 0;
+               fan_speed_info->max_rpm = 0;
+       }
+
+       return 0;
+}
+
+int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+               uint32_t *speed)
+{
+       uint32_t duty100;
+       uint32_t duty;
+       uint64_t tmp64;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan)
+               return 0;
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL1, FMAX_DUTY100);
+       duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_THERMAL_STATUS, FDO_PWM_DUTY);
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+
+       tmp64 = (uint64_t)duty * 100;
+       do_div(tmp64, duty100);
+       *speed = (uint32_t)tmp64;
+
+       if (*speed > 100)
+               *speed = 100;
+
+       return 0;
+}
+
+int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+{
+       uint32_t tach_period;
+       uint32_t crystal_clock_freq;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+                       (hwmgr->thermal_controller.fanInfo.
+                               ucTachometerPulsesPerRevolution == 0))
+               return 0;
+
+       tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_TACH_STATUS, TACH_PERIOD);
+
+       if (tach_period == 0)
+               return -EINVAL;
+
+       crystal_clock_freq = smu7_get_xclk(hwmgr);
+
+       *speed = 60 * crystal_clock_freq * 10000 / tach_period;
+
+       return 0;
+}
+
+/**
+* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
+* @param    hwmgr  the address of the powerplay hardware manager.
+*           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
+* @exception Should always succeed.
+*/
+int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+
+       if (hwmgr->fan_ctrl_is_in_default_mode) {
+               hwmgr->fan_ctrl_default_mode =
+                               PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,     CGS_IND_REG__SMC,
+                                               CG_FDO_CTRL2, FDO_PWM_MODE);
+               hwmgr->tmin =
+                               PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                                               CG_FDO_CTRL2, TMIN);
+               hwmgr->fan_ctrl_is_in_default_mode = false;
+       }
+
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL2, TMIN, 0);
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL2, FDO_PWM_MODE, mode);
+
+       return 0;
+}
+
+/**
+* Reset Fan Speed Control to default mode.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @exception Should always succeed.
+*/
+int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
+{
+       if (!hwmgr->fan_ctrl_is_in_default_mode) {
+               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
+               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               CG_FDO_CTRL2, TMIN, hwmgr->tmin);
+               hwmgr->fan_ctrl_is_in_default_mode = true;
+       }
+
+       return 0;
+}
+
+static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+       int result;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
+               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
+               result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_FanSpeedInTableIsRPM))
+                       hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
+                                       hwmgr->thermal_controller.
+                                       advanceFanControlParameters.usMaxFanRPM);
+               else
+                       hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
+                                       hwmgr->thermal_controller.
+                                       advanceFanControlParameters.usMaxFanPWM);
+
+       } else {
+               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
+               result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+       }
+
+       if (!result && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ucTargetTemperature)
+               result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_SetFanTemperatureTarget,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ucTargetTemperature);
+
+       return result;
+}
+
+
+int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
+}
+
+/**
+* Set Fan Speed in percent.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    speed is the percentage value (0% - 100%) to be set.
+* @exception Fails is the 100% setting appears to be 0.
+*/
+int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+               uint32_t speed)
+{
+       uint32_t duty100;
+       uint32_t duty;
+       uint64_t tmp64;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan)
+               return 0;
+
+       if (speed > 100)
+               speed = 100;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl))
+               smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (uint64_t)speed * duty100;
+       do_div(tmp64, 100);
+       duty = (uint32_t)tmp64;
+
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
+
+       return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+}
+
+/**
+* Reset Fan Speed to default.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @exception Always succeeds.
+*/
+int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
+{
+       int result;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan)
+               return 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl)) {
+               result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+               if (!result)
+                       result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+       } else
+               result = smu7_fan_ctrl_set_default_mode(hwmgr);
+
+       return result;
+}
+
+/**
+* Set Fan Speed in RPM.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    speed is the percentage value (min - max) to be set.
+* @exception Fails is the speed not lie between min and max.
+*/
+int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+{
+       uint32_t tach_period;
+       uint32_t crystal_clock_freq;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+                       (hwmgr->thermal_controller.fanInfo.
+                       ucTachometerPulsesPerRevolution == 0) ||
+                       (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+                       (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+               return 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl))
+               smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+       crystal_clock_freq = smu7_get_xclk(hwmgr);
+
+       tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               CG_TACH_STATUS, TACH_PERIOD, tach_period);
+
+       return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+}
+
+/**
+* Reads the remote temperature from the SIslands thermal controller.
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+       int temp;
+
+       temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_MULT_THERMAL_STATUS, CTF_TEMP);
+
+       /* Bit 9 means the reading is lower than the lowest usable value. */
+       if (temp & 0x200)
+               temp = SMU7_THERMAL_MAXIMUM_TEMP_READING;
+       else
+               temp = temp & 0x1ff;
+
+       temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+       return temp;
+}
+
+/**
+* Set the requested temperature range for high and low alert signals
+*
+* @param    hwmgr The address of the hardware manager.
+* @param    range Temperature range to be programmed for high and low alert signals
+* @exception PP_Result_BadInput if the input data is not valid.
+*/
+static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+               uint32_t low_temp, uint32_t high_temp)
+{
+       uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
+                       PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
+                       PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+       if (low < low_temp)
+               low = low_temp;
+       if (high > high_temp)
+               high = high_temp;
+
+       if (low > high)
+               return -EINVAL;
+
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_THERMAL_INT, DIG_THERM_INTH,
+                       (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_THERMAL_INT, DIG_THERM_INTL,
+                       (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_THERMAL_CTRL, DIG_THERM_DPM,
+                       (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+
+       return 0;
+}
+
+/**
+* Programs thermal controller one-time setting registers
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
+{
+       if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
+               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               CG_TACH_CTRL, EDGE_PER_REV,
+                               hwmgr->thermal_controller.fanInfo.
+                               ucTachometerPulsesPerRevolution - 1);
+
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
+
+       return 0;
+}
+
+/**
+* Enable thermal alerts on the RV770 thermal controller.
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+{
+       uint32_t alert;
+
+       alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_THERMAL_INT, THERM_INT_MASK);
+       alert &= ~(SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_THERMAL_INT, THERM_INT_MASK, alert);
+
+       /* send message to SMU to enable internal thermal interrupts */
+       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
+}
+
+/**
+* Disable thermal alerts on the RV770 thermal controller.
+* @param    hwmgr The address of the hardware manager.
+*/
+int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+{
+       uint32_t alert;
+
+       alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_THERMAL_INT, THERM_INT_MASK);
+       alert |= (SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
+       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_THERMAL_INT, THERM_INT_MASK, alert);
+
+       /* send message to SMU to disable internal thermal interrupts */
+       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
+}
+
+/**
+* Uninitialize the thermal controller.
+* Currently just disables alerts.
+* @param    hwmgr The address of the hardware manager.
+*/
+int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+       int result = smu7_thermal_disable_alert(hwmgr);
+
+       if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+               smu7_fan_ctrl_set_default_mode(hwmgr);
+
+       return result;
+}
+
+/**
+* Start the fan control on the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result)
+{
+/* If the fantable setup has failed we could have disabled
+ * PHM_PlatformCaps_MicrocodeFanControl even after
+ * this function was included in the table.
+ * Make sure that we still think controlling the fan is OK.
+*/
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl)) {
+               smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+               smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+       }
+
+       return 0;
+}
+
+/**
+* Set temperature range for high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result)
+{
+       struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+
+       if (range == NULL)
+               return -EINVAL;
+
+       return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
+}
+
+/**
+* Programs one-time setting registers
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from initialize thermal controller routine
+*/
+static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result)
+{
+       return smu7_thermal_initialize(hwmgr);
+}
+
+/**
+* Enable high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from enable alert routine
+*/
+static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result)
+{
+       return smu7_thermal_enable_alert(hwmgr);
+}
+
+/**
+* Disable high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from disable alert routine
+*/
+static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result)
+{
+       return smu7_thermal_disable_alert(hwmgr);
+}
+
+static const struct phm_master_table_item
+phm_thermal_start_thermal_controller_master_list[] = {
+       {NULL, tf_smu7_thermal_initialize},
+       {NULL, tf_smu7_thermal_set_temperature_range},
+       {NULL, tf_smu7_thermal_enable_alert},
+       {NULL, smum_thermal_avfs_enable},
+/* We should restrict performance levels to low before we halt the SMC.
+ * On the other hand we are still in boot state when we do this
+ * so it would be pointless.
+ * If this assumption changes we have to revisit this table.
+ */
+       {NULL, smum_thermal_setup_fan_table},
+       {NULL, tf_smu7_thermal_start_smc_fan_control},
+       {NULL, NULL}
+};
+
+static const struct phm_master_table_header
+phm_thermal_start_thermal_controller_master = {
+       0,
+       PHM_MasterTableFlag_None,
+       phm_thermal_start_thermal_controller_master_list
+};
+
+static const struct phm_master_table_item
+phm_thermal_set_temperature_range_master_list[] = {
+       {NULL, tf_smu7_thermal_disable_alert},
+       {NULL, tf_smu7_thermal_set_temperature_range},
+       {NULL, tf_smu7_thermal_enable_alert},
+       {NULL, NULL}
+};
+
+static const struct phm_master_table_header
+phm_thermal_set_temperature_range_master = {
+       0,
+       PHM_MasterTableFlag_None,
+       phm_thermal_set_temperature_range_master_list
+};
+
+int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+       if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+               smu7_fan_ctrl_set_default_mode(hwmgr);
+       return 0;
+}
+
+/**
+* Initializes the thermal controller related functions in the Hardware Manager structure.
+* @param    hwmgr The address of the hardware manager.
+* @exception Any error code from the low-level communication.
+*/
+int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
+{
+       int result;
+
+       result = phm_construct_table(hwmgr,
+                       &phm_thermal_set_temperature_range_master,
+                       &(hwmgr->set_temperature_range));
+
+       if (!result) {
+               result = phm_construct_table(hwmgr,
+                               &phm_thermal_start_thermal_controller_master,
+                               &(hwmgr->start_thermal_controller));
+               if (result)
+                       phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
+       }
+
+       if (!result)
+               hwmgr->fan_ctrl_is_in_default_mode = true;
+       return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
new file mode 100644 (file)
index 0000000..6face97
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_THERMAL_H_
+#define _SMU7_THERMAL_H_
+
+#include "hwmgr.h"
+
+#define SMU7_THERMAL_HIGH_ALERT_MASK         0x1
+#define SMU7_THERMAL_LOW_ALERT_MASK          0x2
+
+#define SMU7_THERMAL_MINIMUM_TEMP_READING    -256
+#define SMU7_THERMAL_MAXIMUM_TEMP_READING    255
+
+#define SMU7_THERMAL_MINIMUM_ALERT_TEMP      0
+#define SMU7_THERMAL_MAXIMUM_ALERT_TEMP      255
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
+extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
+extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
deleted file mode 100644 (file)
index e58d038..0000000
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "tonga_clockpowergating.h"
-#include "tonga_ppsmc.h"
-#include "tonga_hwmgr.h"
-
-int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cf_want_uvd_power_gating(hwmgr))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                                                    PPSMC_MSG_UVDPowerOFF);
-       return 0;
-}
-
-int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cf_want_uvd_power_gating(hwmgr)) {
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                 PHM_PlatformCaps_UVDDynamicPowerGating)) {
-                       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                                               PPSMC_MSG_UVDPowerON, 1);
-               } else {
-                       return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                                               PPSMC_MSG_UVDPowerON, 0);
-               }
-       }
-
-       return 0;
-}
-
-int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cf_want_vce_power_gating(hwmgr))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                                                 PPSMC_MSG_VCEPowerOFF);
-       return 0;
-}
-
-int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cf_want_vce_power_gating(hwmgr))
-               return smum_send_msg_to_smc(hwmgr->smumgr,
-                                                 PPSMC_MSG_VCEPowerON);
-       return 0;
-}
-
-int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
-{
-       int ret = 0;
-
-       switch (block) {
-       case PHM_AsicBlock_UVD_MVC:
-       case PHM_AsicBlock_UVD:
-       case PHM_AsicBlock_UVD_HD:
-       case PHM_AsicBlock_UVD_SD:
-               if (gating == PHM_ClockGateSetting_StaticOff)
-                       ret = tonga_phm_powerdown_uvd(hwmgr);
-               else
-                       ret = tonga_phm_powerup_uvd(hwmgr);
-               break;
-       case PHM_AsicBlock_GFX:
-       default:
-               break;
-       }
-
-       return ret;
-}
-
-int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       data->uvd_power_gated = false;
-       data->vce_power_gated = false;
-
-       tonga_phm_powerup_uvd(hwmgr);
-       tonga_phm_powerup_vce(hwmgr);
-
-       return 0;
-}
-
-int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (data->uvd_power_gated == bgate)
-               return 0;
-
-       data->uvd_power_gated = bgate;
-
-       if (bgate) {
-               cgs_set_clockgating_state(hwmgr->device,
-                                               AMD_IP_BLOCK_TYPE_UVD,
-                                               AMD_CG_STATE_UNGATE);
-               cgs_set_powergating_state(hwmgr->device,
-                                               AMD_IP_BLOCK_TYPE_UVD,
-                                               AMD_PG_STATE_GATE);
-               tonga_update_uvd_dpm(hwmgr, true);
-               tonga_phm_powerdown_uvd(hwmgr);
-       } else {
-               tonga_phm_powerup_uvd(hwmgr);
-               cgs_set_powergating_state(hwmgr->device,
-                                               AMD_IP_BLOCK_TYPE_UVD,
-                                               AMD_PG_STATE_UNGATE);
-               cgs_set_clockgating_state(hwmgr->device,
-                                               AMD_IP_BLOCK_TYPE_UVD,
-                                               AMD_PG_STATE_GATE);
-
-               tonga_update_uvd_dpm(hwmgr, false);
-       }
-
-       return 0;
-}
-
-int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct phm_set_power_state_input states;
-       const struct pp_power_state  *pcurrent;
-       struct pp_power_state  *requested;
-
-       pcurrent = hwmgr->current_ps;
-       requested = hwmgr->request_ps;
-
-       states.pcurrent_state = &(pcurrent->hardware);
-       states.pnew_state = &(requested->hardware);
-
-       if (phm_cf_want_vce_power_gating(hwmgr)) {
-               if (data->vce_power_gated != bgate) {
-                       if (bgate) {
-                               cgs_set_clockgating_state(
-                                                       hwmgr->device,
-                                                       AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_CG_STATE_UNGATE);
-                               cgs_set_powergating_state(
-                                                       hwmgr->device,
-                                                       AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_PG_STATE_GATE);
-                               tonga_enable_disable_vce_dpm(hwmgr, false);
-                               data->vce_power_gated = true;
-                       } else {
-                               tonga_phm_powerup_vce(hwmgr);
-                               data->vce_power_gated = false;
-                               cgs_set_powergating_state(
-                                                       hwmgr->device,
-                                                       AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_PG_STATE_UNGATE);
-                               cgs_set_clockgating_state(
-                                                       hwmgr->device,
-                                                       AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_PG_STATE_GATE);
-
-                               tonga_update_vce_dpm(hwmgr, &states);
-                               tonga_enable_disable_vce_dpm(hwmgr, true);
-                               return 0;
-                       }
-               }
-       } else {
-               tonga_update_vce_dpm(hwmgr, &states);
-               tonga_enable_disable_vce_dpm(hwmgr, true);
-               return 0;
-       }
-
-       if (!data->vce_power_gated)
-               tonga_update_vce_dpm(hwmgr, &states);
-
-       return 0;
-}
-
-int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
-                                       const uint32_t *msg_id)
-{
-       PPSMC_Msg msg;
-       uint32_t value;
-
-       switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
-       case PP_GROUP_GFX:
-               switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
-               case PP_BLOCK_GFX_CG:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-                                 ? PPSMC_MSG_EnableClockGatingFeature
-                                 : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_GFX_CGCG_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-                                       ? PPSMC_MSG_EnableClockGatingFeature
-                                       : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_GFX_CGLS_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_GFX_MG:
-                       /* For GFX MGCG, there are three different ones;
-                        * CPF, RLC, and all others.  CPF MGCG will not be used for Tonga.
-                        * For GFX MGLS, Tonga will not support it.
-                        * */
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-                               ? PPSMC_MSG_EnableClockGatingFeature
-                               : PPSMC_MSG_DisableClockGatingFeature;
-                               value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK);
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               default:
-                       return -1;
-               }
-               break;
-
-       case PP_GROUP_SYS:
-               switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
-               case PP_BLOCK_SYS_BIF:
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-                               ? PPSMC_MSG_EnableClockGatingFeature
-                               : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_BIF_MGLS_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_MC:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-                               ? PPSMC_MSG_EnableClockGatingFeature
-                               : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_MC_MGCG_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-                               ? PPSMC_MSG_EnableClockGatingFeature
-                               : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_MC_MGLS_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_HDP:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-                                       ? PPSMC_MSG_EnableClockGatingFeature
-                                       : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_HDP_MGCG_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-                                       ? PPSMC_MSG_EnableClockGatingFeature
-                                       : PPSMC_MSG_DisableClockGatingFeature;
-
-                               value = CG_SYS_HDP_MGLS_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_SDMA:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-                               ? PPSMC_MSG_EnableClockGatingFeature
-                               : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_SDMA_MGCG_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-
-                       if (PP_STATE_SUPPORT_LS & *msg_id) {
-                               msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-                               ? PPSMC_MSG_EnableClockGatingFeature
-                               : PPSMC_MSG_DisableClockGatingFeature;
-
-                               value = CG_SYS_SDMA_MGLS_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               case PP_BLOCK_SYS_ROM:
-                       if (PP_STATE_SUPPORT_CG & *msg_id) {
-                               msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-                               ? PPSMC_MSG_EnableClockGatingFeature
-                               : PPSMC_MSG_DisableClockGatingFeature;
-                               value = CG_SYS_ROM_MASK;
-
-                               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-                                       return -1;
-                       }
-                       break;
-
-               default:
-                       return -1;
-
-               }
-               break;
-
-       default:
-               return -1;
-
-       }
-
-       return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
deleted file mode 100644 (file)
index 8bc38cb..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _TONGA_CLOCK_POWER_GATING_H_
-#define _TONGA_CLOCK_POWER_GATING_H_
-
-#include "tonga_hwmgr.h"
-#include "pp_asicblocks.h"
-
-extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
-#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
deleted file mode 100644 (file)
index 080d69d..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_DYN_DEFAULTS_H
-#define TONGA_DYN_DEFAULTS_H
-
-
-/** \file
- * Volcanic Islands Dynamic default parameters.
- */
-
-enum TONGAdpm_TrendDetection {
-       TONGAdpm_TrendDetection_AUTO,
-       TONGAdpm_TrendDetection_UP,
-       TONGAdpm_TrendDetection_DOWN
-};
-typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection;
-
-/* Bit vector representing same fields as hardware register. */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0              0x3FFFC102  /* CP_Gfx_busy */
-/* HDP_busy */
-/* IH_busy */
-/* DRM_busy */
-/* DRMDMA_busy */
-/* UVD_busy */
-/* VCE_busy */
-/* ACP_busy */
-/* SAMU_busy */
-/* AVP_busy  */
-/* SDMA enabled */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1              0x000400  /* FE_Gfx_busy  - Intended for primary usage.   Rest are for flexibility. */
-/* SH_Gfx_busy */
-/* RB_Gfx_busy */
-/* VCE_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2              0xC00080  /* SH_Gfx_busy - Intended for primary usage.   Rest are for flexibility. */
-/* FE_Gfx_busy */
-/* RB_Gfx_busy */
-/* ACP_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3              0xC00200  /* RB_Gfx_busy - Intended for primary usage.   Rest are for flexibility. */
-/* FE_Gfx_busy */
-/* SH_Gfx_busy */
-/* UVD_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4              0xC01680  /* UVD_busy */
-/* VCE_busy */
-/* ACP_busy */
-/* SAMU_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5              0xC00033  /* GFX, HDP, DRMDMA */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6              0xC00033  /* GFX, HDP, DRMDMA */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7              0x3FFFC000  /* GFX, HDP, DRMDMA */
-
-
-/* thermal protection counter (units).*/
-#define PPTONGA_THERMALPROTECTCOUNTER_DFLT            0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT        0
-
-/* static screen threshold */
-#define PPTONGA_STATICSCREENTHRESHOLD_DFLT            0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPTONGA_REFERENCEDIVIDER_DFLT                  4
-
-/*
- * ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-
-#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT             1687
-
-#define PPTONGA_CGULVPARAMETER_DFLT                    0x00040035
-#define PPTONGA_CGULVCONTROL_DFLT                      0x00007450
-#define PPTONGA_TARGETACTIVITY_DFLT                     30  /*30%  */
-#define PPTONGA_MCLK_TARGETACTIVITY_DFLT                10  /*10%  */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
deleted file mode 100644 (file)
index c7dc111..0000000
+++ /dev/null
@@ -1,6276 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include <atombios.h>
-#include "tonga_hwmgr.h"
-#include "pptable.h"
-#include "processpptables.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
-#include "pp_debug.h"
-#include "tonga_ppsmc.h"
-#include "cgs_common.h"
-#include "pppcielanes.h"
-#include "tonga_dyn_defaults.h"
-#include "smumgr.h"
-#include "tonga_smumgr.h"
-#include "tonga_clockpowergating.h"
-#include "tonga_thermal.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-#include "cgs_linux.h"
-#include "eventmgr.h"
-#include "amd_pcie_helpers.h"
-
-#define MC_CG_ARB_FREQ_F0           0x0a
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define MC_CG_ARB_FREQ_F2           0x0c
-#define MC_CG_ARB_FREQ_F3           0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0       0x05
-#define MC_CG_SEQ_DRAMCONF_S1       0x06
-#define MC_CG_SEQ_YCLK_SUSPEND      0x04
-#define MC_CG_SEQ_YCLK_RESUME       0x0a
-
-#define PCIE_BUS_CLK                10000
-#define TCLK                        (PCIE_BUS_CLK / 10)
-
-#define SMC_RAM_END 0x40000
-#define SMC_CG_IND_START            0xc0030000
-#define SMC_CG_IND_END              0xc0040000  /* First byte after SMC_CG_IND*/
-
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-
-#define VDDC_VDDCI_DELTA            200
-#define VDDC_VDDGFX_DELTA           300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-typedef uint32_t PECI_RegistryValue;
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-static const uint16_t PP_ClockStretcherLookupTable[2][4] = {
-       {600, 1050, 3, 0},
-       {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
-       { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-       { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t PP_ClockStretchAmountConversion[2][6] = {
-       {0, 1, 3, 2, 4, 5},
-       {0, 2, 4, 5, 6, 5} };
-
-/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
-       DPM_EVENT_SRC_ANALOG = 0,               /* Internal analog trip point */
-       DPM_EVENT_SRC_EXTERNAL = 1,             /* External (GPIO 17) signal */
-       DPM_EVENT_SRC_DIGITAL = 2,              /* Internal digital trip point (DIG_THERM_DPM) */
-       DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,   /* Internal analog or external */
-       DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4   /* Internal digital or external */
-};
-typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
-
-static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct tonga_power_state *cast_phw_tonga_power_state(
-                                 struct pp_hw_power_state *hw_ps)
-{
-       if (hw_ps == NULL)
-               return NULL;
-
-       PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
-                               "Invalid Powerstate Type!",
-                                return NULL);
-
-       return (struct tonga_power_state *)hw_ps;
-}
-
-const struct tonga_power_state *cast_const_phw_tonga_power_state(
-                                const struct pp_hw_power_state *hw_ps)
-{
-       if (hw_ps == NULL)
-               return NULL;
-
-       PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
-                               "Invalid Powerstate Type!",
-                                return NULL);
-
-       return (const struct tonga_power_state *)hw_ps;
-}
-
-int tonga_add_voltage(struct pp_hwmgr *hwmgr,
-       phm_ppt_v1_voltage_lookup_table *look_up_table,
-       phm_ppt_v1_voltage_lookup_record *record)
-{
-       uint32_t i;
-       PP_ASSERT_WITH_CODE((NULL != look_up_table),
-               "Lookup Table empty.", return -1;);
-       PP_ASSERT_WITH_CODE((0 != look_up_table->count),
-               "Lookup Table empty.", return -1;);
-       PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count),
-               "Lookup Table is full.", return -1;);
-
-       /* This is to avoid entering duplicate calculated records. */
-       for (i = 0; i < look_up_table->count; i++) {
-               if (look_up_table->entries[i].us_vdd == record->us_vdd) {
-                       if (look_up_table->entries[i].us_calculated == 1)
-                               return 0;
-                       else
-                               break;
-               }
-       }
-
-       look_up_table->entries[i].us_calculated = 1;
-       look_up_table->entries[i].us_vdd = record->us_vdd;
-       look_up_table->entries[i].us_cac_low = record->us_cac_low;
-       look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
-       look_up_table->entries[i].us_cac_high = record->us_cac_high;
-       /* Only increment the count when we're appending, not replacing duplicate entry. */
-       if (i == look_up_table->count)
-               look_up_table->count++;
-
-       return 0;
-}
-
-int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
-       PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
-       return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
-}
-
-uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
-               uint32_t voltage)
-{
-       uint8_t count = (uint8_t) (voltage_table->count);
-       uint8_t i = 0;
-
-       PP_ASSERT_WITH_CODE((NULL != voltage_table),
-               "Voltage Table empty.", return 0;);
-       PP_ASSERT_WITH_CODE((0 != count),
-               "Voltage Table empty.", return 0;);
-
-       for (i = 0; i < count; i++) {
-               /* find first voltage bigger than requested */
-               if (voltage_table->entries[i].value >= voltage)
-                       return i;
-       }
-
-       /* voltage is bigger than max voltage in the table */
-       return i - 1;
-}
-
-/**
- * @brief PhwTonga_GetVoltageOrder
- *  Returns index of requested voltage record in lookup(table)
- * @param hwmgr - pointer to hardware manager
- * @param lookupTable - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
-               uint16_t voltage)
-{
-       uint8_t count = (uint8_t) (look_up_table->count);
-       uint8_t i;
-
-       PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
-       PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
-
-       for (i = 0; i < count; i++) {
-               /* find first voltage equal or bigger than requested */
-               if (look_up_table->entries[i].us_vdd >= voltage)
-                       return i;
-       }
-
-       /* voltage is bigger than max voltage in the table */
-       return i-1;
-}
-
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       /*
-        * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
-        * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
-        * whereas voltage control is a fundemental change that will not be disabled
-        */
-
-       return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                                       FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
-}
-
-/**
- * Re-generate the DPM level mask value
- * @param    hwmgr      the address of the hardware manager
- */
-static uint32_t tonga_get_dpm_level_enable_mask_value(
-                       struct tonga_single_dpm_table * dpm_table)
-{
-       uint32_t i;
-       uint32_t mask_value = 0;
-
-       for (i = dpm_table->count; i > 0; i--) {
-               mask_value = mask_value << 1;
-
-               if (dpm_table->dpm_levels[i-1].enabled)
-                       mask_value |= 0x1;
-               else
-                       mask_value &= 0xFFFFFFFE;
-       }
-       return mask_value;
-}
-
-/**
- * Retrieve DPM default values from registry (if available)
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- */
-void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       phw_tonga_ulv_parm *ulv = &(data->ulv);
-       uint32_t tmp;
-
-       ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT;
-       data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0;
-       data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1;
-       data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2;
-       data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3;
-       data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4;
-       data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5;
-       data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6;
-       data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7;
-
-       data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT;
-       data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT;
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_ABM);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_NonABMSupportInPPLib);
-
-       tmp = 0;
-       if (tmp == 0)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_DynamicACTiming);
-
-       tmp = 0;
-       if (0 != tmp)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_DisableMemoryTransition);
-
-       data->mclk_strobe_mode_threshold = 40000;
-       data->mclk_stutter_mode_threshold = 30000;
-       data->mclk_edc_enable_threshold = 40000;
-       data->mclk_edc_wr_enable_threshold = 40000;
-
-       tmp = 0;
-       if (tmp != 0)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_DisableMCLS);
-
-       data->pcie_gen_performance.max = PP_PCIEGen1;
-       data->pcie_gen_performance.min = PP_PCIEGen3;
-       data->pcie_gen_power_saving.max = PP_PCIEGen1;
-       data->pcie_gen_power_saving.min = PP_PCIEGen3;
-
-       data->pcie_lane_performance.max = 0;
-       data->pcie_lane_performance.min = 16;
-       data->pcie_lane_power_saving.max = 0;
-       data->pcie_lane_power_saving.min = 16;
-
-       tmp = 0;
-
-       if (tmp)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_DynamicUVDState);
-
-}
-
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       int result = 0;
-       uint32_t low_sclk_interrupt_threshold = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification)
-               && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
-               data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-               low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-               result = tonga_copy_bytes_to_smc(
-                               hwmgr->smumgr,
-                               data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable,
-                               LowSclkInterruptThreshold),
-                               (uint8_t *)&low_sclk_interrupt_threshold,
-                               sizeof(uint32_t),
-                               data->sram_end
-                               );
-       }
-
-       return result;
-}
-
-/**
- * Find SCLK value that is associated with specified virtual_voltage_Id.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    virtual_voltage_Id  voltageId to look for.
- * @param    sclk output value .
- * @return   always 0 if success and 2 if association not found
- */
-static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
-       phm_ppt_v1_voltage_lookup_table *lookup_table,
-       uint16_t virtual_voltage_id, uint32_t *sclk)
-{
-       uint8_t entryId;
-       uint8_t voltageId;
-       struct phm_ppt_v1_information *pptable_info =
-                                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1);
-
-       /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
-       for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) {
-               voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd;
-               if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
-                       break;
-       }
-
-       PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count,
-                       "Can't find requested voltage id in vdd_dep_on_sclk table!",
-                       return -1;
-                       );
-
-       *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk;
-
-       return 0;
-}
-
-/**
- * Get Leakage VDDC based on leakage ID.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   2 if vddgfx returned is greater than 2V or if BIOS
- */
-int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
-       uint16_t    virtual_voltage_id;
-       uint16_t    vddc = 0;
-       uint16_t    vddgfx = 0;
-       uint16_t    i, j;
-       uint32_t  sclk = 0;
-
-       /* retrieve voltage for leakage ID (0xff01 + i) */
-       for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) {
-               virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-
-               /* in split mode we should have only vddgfx EVV leakages */
-               if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-                       if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
-                                               pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) {
-                               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                                       PHM_PlatformCaps_ClockStretcher)) {
-                                       for (j = 1; j < sclk_table->count; j++) {
-                                               if (sclk_table->entries[j].clk == sclk &&
-                                                               sclk_table->entries[j].cks_enable == 0) {
-                                                       sclk += 5000;
-                                                       break;
-                                               }
-                                       }
-                               }
-                               if (0 == atomctrl_get_voltage_evv_on_sclk
-                                   (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
-                                    virtual_voltage_id, &vddgfx)) {
-                                       /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
-                                       PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
-
-                                       /* the voltage should not be zero nor equal to leakage ID */
-                                       if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
-                                               data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
-                                               data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
-                                               data->vddcgfx_leakage.count++;
-                                       }
-                               } else {
-                                       printk("Error retrieving EVV voltage value!\n");
-                               }
-                       }
-               } else {
-                       /*  in merged mode we have only vddc EVV leakages */
-                       if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
-                                               pptable_info->vddc_lookup_table,
-                                               virtual_voltage_id, &sclk)) {
-                               if (0 == atomctrl_get_voltage_evv_on_sclk
-                                   (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
-                                    virtual_voltage_id, &vddc)) {
-                                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
-                                       PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1);
-
-                                       /* the voltage should not be zero nor equal to leakage ID */
-                                       if (vddc != 0 && vddc != virtual_voltage_id) {
-                                               data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
-                                               data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
-                                               data->vddc_leakage.count++;
-                                       }
-                               } else {
-                                       printk("Error retrieving EVV voltage value!\n");
-                               }
-                       }
-               }
-       }
-
-       return 0;
-}
-
-int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       /* enable SCLK dpm */
-       if (0 == data->sclk_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                                  PPSMC_MSG_DPM_Enable)),
-                               "Failed to enable SCLK DPM during DPM Start Function!",
-                               return -1);
-       }
-
-       /* enable MCLK dpm */
-       if (0 == data->mclk_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                            PPSMC_MSG_MCLKDPM_Enable)),
-                               "Failed to enable MCLK DPM during DPM Start Function!",
-                               return -1);
-
-               PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixLCAC_CPL_CNTL, 0x100005);/*Read */
-
-               udelay(10);
-
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixLCAC_CPL_CNTL, 0x500005);/* write */
-
-       }
-
-       return 0;
-}
-
-int tonga_start_dpm(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       /* enable general power management */
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
-       /* enable sclk deep sleep */
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
-
-       /* prepare for PCIE DPM */
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
-                       offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000);
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
-
-       PP_ASSERT_WITH_CODE(
-                       (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                       PPSMC_MSG_Voltage_Cntl_Enable)),
-                       "Failed to enable voltage DPM during DPM Start Function!",
-                       return -1);
-
-       if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) {
-               PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
-       }
-
-       /* enable PCIE dpm */
-       if (0 == data->pcie_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_PCIeDPM_Enable)),
-                               "Failed to enable pcie DPM during DPM Start Function!",
-                               return -1
-                               );
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_Falcon_QuickTransition)) {
-                                  smum_send_msg_to_smc(hwmgr->smumgr,
-                                   PPSMC_MSG_EnableACDCGPIOInterrupt);
-       }
-
-       return 0;
-}
-
-int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       /* disable SCLK dpm */
-       if (0 == data->sclk_dpm_key_disabled) {
-               /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-               PP_ASSERT_WITH_CODE(
-                               !tonga_is_dpm_running(hwmgr),
-                               "Trying to Disable SCLK DPM when DPM is disabled",
-                               return -1
-                               );
-
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                                 PPSMC_MSG_DPM_Disable)),
-                               "Failed to disable SCLK DPM during DPM stop Function!",
-                               return -1);
-       }
-
-       /* disable MCLK dpm */
-       if (0 == data->mclk_dpm_key_disabled) {
-               /* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
-               PP_ASSERT_WITH_CODE(
-                               !tonga_is_dpm_running(hwmgr),
-                               "Trying to Disable MCLK DPM when DPM is disabled",
-                               return -1
-                               );
-
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                           PPSMC_MSG_MCLKDPM_Disable)),
-                               "Failed to Disable MCLK DPM during DPM stop Function!",
-                               return -1);
-       }
-
-       return 0;
-}
-
-int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0);
-       /* disable sclk deep sleep*/
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0);
-
-       /* disable PCIE dpm */
-       if (0 == data->pcie_dpm_key_disabled) {
-               /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-               PP_ASSERT_WITH_CODE(
-                               !tonga_is_dpm_running(hwmgr),
-                               "Trying to Disable PCIE DPM when DPM is disabled",
-                               return -1
-                               );
-               PP_ASSERT_WITH_CODE(
-                               (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_PCIeDPM_Disable)),
-                               "Failed to disable pcie DPM during DPM stop Function!",
-                               return -1);
-       }
-
-       if (0 != tonga_disable_sclk_mclk_dpm(hwmgr))
-               PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1);
-
-       /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-       PP_ASSERT_WITH_CODE(
-                       !tonga_is_dpm_running(hwmgr),
-                       "Trying to Disable Voltage CNTL when DPM is disabled",
-                       return -1
-                       );
-
-       PP_ASSERT_WITH_CODE(
-                       (0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                       PPSMC_MSG_Voltage_Cntl_Disable)),
-                       "Failed to disable voltage DPM during DPM stop Function!",
-                       return -1);
-
-       return 0;
-}
-
-int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
-
-       return 0;
-}
-
-/**
- * Send a message to the SMC and return a parameter
- *
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    msg: the message to send.
- * @param    parameter: pointer to the received parameter
- * @return   The response that came from the SMC.
- */
-PPSMC_Result tonga_send_msg_to_smc_return_parameter(
-               struct pp_hwmgr *hwmgr,
-               PPSMC_Msg msg,
-               uint32_t *parameter)
-{
-       int result;
-
-       result = smum_send_msg_to_smc(hwmgr->smumgr, msg);
-
-       if ((0 == result) && parameter) {
-               *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-       }
-
-       return result;
-}
-
-/**
- * force DPM power State
- *
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    n     :  DPM level
- * @return   The response that came from the SMC.
- */
-int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       uint32_t level_mask = 1 << n;
-
-       /* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
-       PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-                           "Trying to force SCLK when DPM is disabled",
-                           return -1;);
-       if (0 == data->sclk_dpm_key_disabled)
-               return (0 == smum_send_msg_to_smc_with_parameter(
-                                                            hwmgr->smumgr,
-                    (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask),
-                                                   level_mask) ? 0 : 1);
-
-       return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    n     :  DPM level
- * @return   The response that came from the SMC.
- */
-int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       uint32_t level_mask = 1 << n;
-
-       /* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
-       PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-                           "Trying to Force MCLK when DPM is disabled",
-                           return -1;);
-       if (0 == data->mclk_dpm_key_disabled)
-               return (0 == smum_send_msg_to_smc_with_parameter(
-                                                               hwmgr->smumgr,
-                                                               (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask),
-                                                               level_mask) ? 0 : 1);
-
-       return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    n     :  DPM level
- * @return   The response that came from the SMC.
- */
-int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-       PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-                           "Trying to Force PCIE level when DPM is disabled",
-                           return -1;);
-       if (0 == data->pcie_dpm_key_disabled)
-               return (0 == smum_send_msg_to_smc_with_parameter(
-                                                            hwmgr->smumgr,
-                          (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel),
-                                                               n) ? 0 : 1);
-
-       return 0;
-}
-
-/**
- * Set the initial state by calling SMC to switch to this state directly
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
-{
-       /*
-       * SMC only stores one state that SW will ask to switch too,
-       * so we switch the the just uploaded one
-       */
-       return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1;
-}
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
-       uint32_t tmp;
-       int result;
-       bool error = false;
-
-       result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, DpmTable),
-                               &tmp, data->sram_end);
-
-       if (0 == result) {
-               data->dpm_table_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, SoftRegisters),
-                               &tmp, data->sram_end);
-
-       if (0 == result) {
-               data->soft_regs_start = tmp;
-               tonga_smu->ulSoftRegsStart = tmp;
-       }
-
-       error |= (0 != result);
-
-
-       result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, mcRegisterTable),
-                               &tmp, data->sram_end);
-
-       if (0 == result) {
-               data->mc_reg_table_start = tmp;
-       }
-
-       result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, FanTable),
-                               &tmp, data->sram_end);
-
-       if (0 == result) {
-               data->fan_table_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
-                               &tmp, data->sram_end);
-
-       if (0 == result) {
-               data->arb_table_start = tmp;
-       }
-
-       error |= (0 != result);
-
-
-       result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, Version),
-                               &tmp, data->sram_end);
-
-       if (0 == result) {
-               hwmgr->microcode_version_info.SMC = tmp;
-       }
-
-       error |= (0 != result);
-
-       return error ? 1 : 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       data->clock_registers.vCG_SPLL_FUNC_CNTL         =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
-       data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
-       data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
-       data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
-       data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
-       data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
-               cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
-       data->clock_registers.vDLL_CNTL                  =
-               cgs_read_register(hwmgr->device, mmDLL_CNTL);
-       data->clock_registers.vMCLK_PWRMGT_CNTL          =
-               cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
-       data->clock_registers.vMPLL_AD_FUNC_CNTL         =
-               cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
-       data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
-               cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
-       data->clock_registers.vMPLL_FUNC_CNTL            =
-               cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
-       data->clock_registers.vMPLL_FUNC_CNTL_1          =
-               cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
-       data->clock_registers.vMPLL_FUNC_CNTL_2          =
-               cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
-       data->clock_registers.vMPLL_SS1                  =
-               cgs_read_register(hwmgr->device, mmMPLL_SS1);
-       data->clock_registers.vMPLL_SS2                  =
-               cgs_read_register(hwmgr->device, mmMPLL_SS2);
-
-       return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_get_memory_type(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       uint32_t temp;
-
-       temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
-       data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
-                       ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
-                        MC_SEQ_MISC0_GDDR5_SHIFT));
-
-       return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
-       return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       data->uvd_power_gated = false;
-       data->vce_power_gated = false;
-       data->samu_power_gated = false;
-       data->acp_power_gated = false;
-       data->pg_acp_init = true;
-
-       return 0;
-}
-
-/**
- * Checks if DPM is enabled
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       /*
-        * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
-        * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
-        * whereas voltage control is a fundemental change that will not be disabled
-        */
-       return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
-}
-
-/**
- * Checks if DPM is stopped
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       if (tonga_is_dpm_running(hwmgr)) {
-               /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
-               if (!data->dpm_table_start) {
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-/**
- * Remove repeated voltage values and create table with unique values.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    voltage_table  the pointer to changing voltage table
- * @return    1 in success
- */
-
-static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
-                       pp_atomctrl_voltage_table *voltage_table)
-{
-       uint32_t table_size, i, j;
-       uint16_t vvalue;
-       bool bVoltageFound = false;
-       pp_atomctrl_voltage_table *table;
-
-       PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
-       table_size = sizeof(pp_atomctrl_voltage_table);
-       table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       memset(table, 0x00, table_size);
-       table->mask_low = voltage_table->mask_low;
-       table->phase_delay = voltage_table->phase_delay;
-
-       for (i = 0; i < voltage_table->count; i++) {
-               vvalue = voltage_table->entries[i].value;
-               bVoltageFound = false;
-
-               for (j = 0; j < table->count; j++) {
-                       if (vvalue == table->entries[j].value) {
-                               bVoltageFound = true;
-                               break;
-                       }
-               }
-
-               if (!bVoltageFound) {
-                       table->entries[table->count].value = vvalue;
-                       table->entries[table->count].smio_low =
-                               voltage_table->entries[i].smio_low;
-                       table->count++;
-               }
-       }
-
-       memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table));
-
-       kfree(table);
-
-       return 0;
-}
-
-static int tonga_get_svi2_vdd_ci_voltage_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table)
-{
-       uint32_t i;
-       int result;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table);
-
-       PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count),
-                       "Voltage Dependency Table empty.", return -1;);
-
-       vddci_voltage_table->mask_low = 0;
-       vddci_voltage_table->phase_delay = 0;
-       vddci_voltage_table->count = voltage_dependency_table->count;
-
-       for (i = 0; i < voltage_dependency_table->count; i++) {
-               vddci_voltage_table->entries[i].value =
-                       voltage_dependency_table->entries[i].vddci;
-               vddci_voltage_table->entries[i].smio_low = 0;
-       }
-
-       result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to trim VDDCI table.", return result;);
-
-       return 0;
-}
-
-
-
-static int tonga_get_svi2_vdd_voltage_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_voltage_lookup_table *look_up_table,
-               pp_atomctrl_voltage_table *voltage_table)
-{
-       uint8_t i = 0;
-
-       PP_ASSERT_WITH_CODE((0 != look_up_table->count),
-                       "Voltage Lookup Table empty.", return -1;);
-
-       voltage_table->mask_low = 0;
-       voltage_table->phase_delay = 0;
-
-       voltage_table->count = look_up_table->count;
-
-       for (i = 0; i < voltage_table->count; i++) {
-               voltage_table->entries[i].value = look_up_table->entries[i].us_vdd;
-               voltage_table->entries[i].smio_low = 0;
-       }
-
-       return 0;
-}
-
-/*
- * -------------------------------------------------------- Voltage Tables --------------------------------------------------------------------------
- * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries.
- */
-
-static void tonga_trim_voltage_table_to_fit_state_table(
-               struct pp_hwmgr *hwmgr,
-               uint32_t max_voltage_steps,
-               pp_atomctrl_voltage_table *voltage_table)
-{
-       unsigned int i, diff;
-
-       if (voltage_table->count <= max_voltage_steps) {
-               return;
-       }
-
-       diff = voltage_table->count - max_voltage_steps;
-
-       for (i = 0; i < max_voltage_steps; i++) {
-               voltage_table->entries[i] = voltage_table->entries[i + diff];
-       }
-
-       voltage_table->count = max_voltage_steps;
-
-       return;
-}
-
-/**
- * Create Voltage Tables.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int result;
-
-       /* MVDD has only GPIO voltage control */
-       if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               result = atomctrl_get_voltage_table_v3(hwmgr,
-                                       VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to retrieve MVDD table.", return result;);
-       }
-
-       if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
-               /* GPIO voltage */
-               result = atomctrl_get_voltage_table_v3(hwmgr,
-                                       VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to retrieve VDDCI table.", return result;);
-       } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
-               /* SVI2 voltage */
-               result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr,
-                                       pptable_info->vdd_dep_on_mclk);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
-       }
-
-       if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
-               /* VDDGFX has only SVI2 voltage control */
-               result = tonga_get_svi2_vdd_voltage_table(hwmgr,
-                                       pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table));
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
-       }
-
-       if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               /* VDDC has only SVI2 voltage control */
-               result = tonga_get_svi2_vdd_voltage_table(hwmgr,
-                                       pptable_info->vddc_lookup_table, &(data->vddc_voltage_table));
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to retrieve SVI2 VDDC table from lookup table.", return result;);
-       }
-
-       PP_ASSERT_WITH_CODE(
-                       (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)),
-                       "Too many voltage values for VDDC. Trimming to fit state table.",
-                       tonga_trim_voltage_table_to_fit_state_table(hwmgr,
-                       SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
-                       );
-
-       PP_ASSERT_WITH_CODE(
-                       (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)),
-                       "Too many voltage values for VDDGFX. Trimming to fit state table.",
-                       tonga_trim_voltage_table_to_fit_state_table(hwmgr,
-                       SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table));
-                       );
-
-       PP_ASSERT_WITH_CODE(
-                       (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)),
-                       "Too many voltage values for VDDCI. Trimming to fit state table.",
-                       tonga_trim_voltage_table_to_fit_state_table(hwmgr,
-                       SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
-                       );
-
-       PP_ASSERT_WITH_CODE(
-                       (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)),
-                       "Too many voltage values for MVDD. Trimming to fit state table.",
-                       tonga_trim_voltage_table_to_fit_state_table(hwmgr,
-                       SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
-                       );
-
-       return 0;
-}
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       unsigned int count;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               table->VddcLevelCount = data->vddc_voltage_table.count;
-               for (count = 0; count < table->VddcLevelCount; count++) {
-                       table->VddcTable[count] =
-                               PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
-               }
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
-       }
-       return 0;
-}
-
-/**
- * VddGfx table preparation for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       unsigned int count;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
-               table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
-               for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
-                       table->VddGfxTable[count] =
-                               PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
-               }
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
-       }
-       return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       uint32_t count;
-
-       table->VddciLevelCount = data->vddci_voltage_table.count;
-       for (count = 0; count < table->VddciLevelCount; count++) {
-               if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
-                       table->VddciTable[count] =
-                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-               } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
-                       table->SmioTable1.Pattern[count].Voltage =
-                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
-                       table->SmioTable1.Pattern[count].Smio =
-                               (uint8_t) count;
-                       table->Smio[count] |=
-                               data->vddci_voltage_table.entries[count].smio_low;
-                       table->VddciTable[count] =
-                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-               }
-       }
-
-       table->SmioMask1 = data->vddci_voltage_table.mask_low;
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
-       return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       uint32_t count;
-
-       if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               table->MvddLevelCount = data->mvdd_voltage_table.count;
-               for (count = 0; count < table->MvddLevelCount; count++) {
-                       table->SmioTable2.Pattern[count].Voltage =
-                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
-                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
-                       table->SmioTable2.Pattern[count].Smio =
-                               (uint8_t) count;
-                       table->Smio[count] |=
-                               data->mvdd_voltage_table.entries[count].smio_low;
-               }
-               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
-       }
-
-       return 0;
-}
-
-/**
- * Convert a voltage value in mv unit to VID number required by SMU firmware
- */
-static uint8_t convert_to_vid(uint16_t vddc)
-{
-       return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-
-/**
- * Preparation of vddc and vddgfx CAC tables for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       uint32_t count;
-       uint8_t index;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
-       struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table;
-
-       /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */
-       uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
-       uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
-
-       for (count = 0; count < vddcLevelCount; count++) {
-               /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
-               index = tonga_get_voltage_index(vddc_lookup_table,
-                       data->vddc_voltage_table.entries[count].value);
-               table->BapmVddcVidLoSidd[count] =
-                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
-               table->BapmVddcVidHiSidd[count] =
-                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
-               table->BapmVddcVidHiSidd2[count] =
-                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
-       }
-
-       if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) {
-               /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
-               for (count = 0; count < vddgfxLevelCount; count++) {
-                       index = tonga_get_voltage_index(vddgfx_lookup_table,
-                               data->vddgfx_voltage_table.entries[count].value);
-                       table->BapmVddGfxVidLoSidd[count] =
-                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low);
-                       table->BapmVddGfxVidHiSidd[count] =
-                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid);
-                       table->BapmVddGfxVidHiSidd2[count] =
-                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
-               }
-       } else {
-               for (count = 0; count < vddcLevelCount; count++) {
-                       index = tonga_get_voltage_index(vddc_lookup_table,
-                               data->vddc_voltage_table.entries[count].value);
-                       table->BapmVddGfxVidLoSidd[count] =
-                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
-                       table->BapmVddGfxVidHiSidd[count] =
-                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
-                       table->BapmVddGfxVidHiSidd2[count] =
-                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
-               }
-       }
-
-       return 0;
-}
-
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-
-int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-       SMU72_Discrete_DpmTable *table)
-{
-       int result;
-
-       result = tonga_populate_smc_vddc_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate VDDC voltage table to SMC", return -1);
-
-       result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate VDDCI voltage table to SMC", return -1);
-
-       result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate VDDGFX voltage table to SMC", return -1);
-
-       result = tonga_populate_smc_mvdd_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate MVDD voltage table to SMC", return -1);
-
-       result = tonga_populate_cac_tables(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate CAC voltage tables to SMC", return -1);
-
-       return 0;
-}
-
-/**
- * Populates the SMC VRConfig field in DPM table.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       uint16_t config;
-
-       if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
-               /*  Splitted mode */
-               config = VR_SVI2_PLANE_1;
-               table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
-               if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-                       config = VR_SVI2_PLANE_2;
-                       table->VRConfig |= config;
-               } else {
-                       printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n");
-               }
-       } else {
-               /* Merged mode  */
-               config = VR_MERGED_WITH_VDDC;
-               table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
-               /* Set Vddc Voltage Controller  */
-               if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-                       config = VR_SVI2_PLANE_1;
-                       table->VRConfig |= config;
-               } else {
-                       printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n");
-               }
-       }
-
-       /* Set Vddci Voltage Controller  */
-       if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
-               config = VR_SVI2_PLANE_2;  /* only in merged mode */
-               table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
-       } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
-               config = VR_SMIO_PATTERN_1;
-               table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
-       }
-
-       /* Set Mvdd Voltage Controller */
-       if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               config = VR_SMIO_PATTERN_2;
-               table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
-       }
-
-       return 0;
-}
-
-static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
-       phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
-       uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
-       uint32_t i = 0;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       /* clock - voltage dependency table is empty table */
-       if (allowed_clock_voltage_table->count == 0)
-               return -1;
-
-       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
-               /* find first sclk bigger than request */
-               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
-                       voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                                                               allowed_clock_voltage_table->entries[i].vddgfx);
-
-                       voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-                                                               allowed_clock_voltage_table->entries[i].vddc);
-
-                       if (allowed_clock_voltage_table->entries[i].vddci) {
-                               voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
-                                                                       allowed_clock_voltage_table->entries[i].vddci);
-                       } else {
-                               voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
-                                                                       allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta);
-                       }
-
-                       if (allowed_clock_voltage_table->entries[i].mvdd) {
-                               *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
-                       }
-
-                       voltage->Phases = 1;
-                       return 0;
-               }
-       }
-
-       /* sclk is bigger than max sclk in the dependence table */
-       voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-               allowed_clock_voltage_table->entries[i-1].vddgfx);
-       voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-               allowed_clock_voltage_table->entries[i-1].vddc);
-
-       if (allowed_clock_voltage_table->entries[i-1].vddci) {
-               voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
-                       allowed_clock_voltage_table->entries[i-1].vddci);
-       }
-       if (allowed_clock_voltage_table->entries[i-1].mvdd) {
-               *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
-       }
-
-       return 0;
-}
-
-/**
- * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_reset_to_default(struct pp_hwmgr *hwmgr)
-{
-       return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1;
-}
-
-int tonga_populate_memory_timing_parameters(
-               struct pp_hwmgr *hwmgr,
-               uint32_t engine_clock,
-               uint32_t memory_clock,
-               struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
-               )
-{
-       uint32_t dramTiming;
-       uint32_t dramTiming2;
-       uint32_t burstTime;
-       int result;
-
-       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-                               engine_clock, memory_clock);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error calling VBIOS to set DRAM_TIMING.", return result);
-
-       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
-       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
-       arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
-       return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- * This function is to be called from the SetPowerState table.
- */
-int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       int result = 0;
-       SMU72_Discrete_MCArbDramTimingTable  arb_regs;
-       uint32_t i, j;
-
-       memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
-
-       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-                       result = tonga_populate_memory_timing_parameters
-                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
-                                data->dpm_table.mclk_table.dpm_levels[j].value,
-                                &arb_regs.entries[i][j]);
-
-                       if (0 != result) {
-                               break;
-                       }
-               }
-       }
-
-       if (0 == result) {
-               result = tonga_copy_bytes_to_smc(
-                               hwmgr->smumgr,
-                               data->arb_table_start,
-                               (uint8_t *)&arb_regs,
-                               sizeof(SMU72_Discrete_MCArbDramTimingTable),
-                               data->sram_end
-                               );
-       }
-
-       return result;
-}
-
-static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_dpm_table *dpm_table = &data->dpm_table;
-       uint32_t i;
-
-       /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
-       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-               table->LinkLevel[i].PcieGenSpeed  =
-                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-               table->LinkLevel[i].PcieLaneCount =
-                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
-               table->LinkLevel[i].EnabledForActivity =
-                       1;
-               table->LinkLevel[i].SPC =
-                       (uint8_t)(data->pcie_spc_cap & 0xff);
-               table->LinkLevel[i].DownThreshold =
-                       PP_HOST_TO_SMC_UL(5);
-               table->LinkLevel[i].UpThreshold =
-                       PP_HOST_TO_SMC_UL(30);
-       }
-
-       data->smc_state_table.LinkLevelCount =
-               (uint8_t)dpm_table->pcie_speed_table.count;
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-               tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-       return 0;
-}
-
-static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-                                       SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-       table->UvdLevelCount = (uint8_t) (mm_table->count);
-       table->UvdBootLevel = 0;
-
-       for (count = 0; count < table->UvdLevelCount; count++) {
-               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-               table->UvdLevel[count].MinVoltage.Vddc =
-                       tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-                                               mm_table->entries[count].vddc);
-               table->UvdLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
-                       tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                                               mm_table->entries[count].vddgfx) : 0;
-               table->UvdLevel[count].MinVoltage.Vddci =
-                       tonga_get_voltage_id(&data->vddci_voltage_table,
-                                            mm_table->entries[count].vddc - data->vddc_vddci_delta);
-               table->UvdLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                                         table->UvdLevel[count].VclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                                   "can not find divide id for Vclk clock", return result);
-
-               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                                         table->UvdLevel[count].DclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                                   "can not find divide id for Dclk clock", return result);
-
-               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-               //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage);
-       }
-
-       return result;
-
-}
-
-static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-       table->VceLevelCount = (uint8_t) (mm_table->count);
-       table->VceBootLevel = 0;
-
-       for (count = 0; count < table->VceLevelCount; count++) {
-               table->VceLevel[count].Frequency =
-                       mm_table->entries[count].eclk;
-               table->VceLevel[count].MinVoltage.Vddc =
-                       tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-                               mm_table->entries[count].vddc);
-               table->VceLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
-                       tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                               mm_table->entries[count].vddgfx) : 0;
-               table->VceLevel[count].MinVoltage.Vddci =
-                       tonga_get_voltage_id(&data->vddci_voltage_table,
-                               mm_table->entries[count].vddc - data->vddc_vddci_delta);
-               table->VceLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                       table->VceLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for VCE engine clock", return result);
-
-               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-       }
-
-       return result;
-}
-
-static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-       table->AcpLevelCount = (uint8_t) (mm_table->count);
-       table->AcpBootLevel = 0;
-
-       for (count = 0; count < table->AcpLevelCount; count++) {
-               table->AcpLevel[count].Frequency =
-                       pptable_info->mm_dep_table->entries[count].aclk;
-               table->AcpLevel[count].MinVoltage.Vddc =
-                       tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-                       mm_table->entries[count].vddc);
-               table->AcpLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
-                       tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                               mm_table->entries[count].vddgfx) : 0;
-               table->AcpLevel[count].MinVoltage.Vddci =
-                       tonga_get_voltage_id(&data->vddci_voltage_table,
-                               mm_table->entries[count].vddc - data->vddc_vddci_delta);
-               table->AcpLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                       table->AcpLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find divide id for engine clock", return result);
-
-               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
-       }
-
-       return result;
-}
-
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-       SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t) (mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].Frequency =
-                       pptable_info->mm_dep_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage.Vddc =
-                       tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-                               mm_table->entries[count].vddc);
-               table->SamuLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
-                       tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                               mm_table->entries[count].vddgfx) : 0;
-               table->SamuLevel[count].MinVoltage.Vddci =
-                       tonga_get_voltage_id(&data->vddci_voltage_table,
-                               mm_table->entries[count].vddc - data->vddc_vddci_delta);
-               table->SamuLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                       table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-       }
-
-       return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    memory_clock the memory clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int tonga_calculate_mclk_params(
-               struct pp_hwmgr *hwmgr,
-               uint32_t memory_clock,
-               SMU72_Discrete_MemoryLevel *mclk,
-               bool strobe_mode,
-               bool dllStateOn
-               )
-{
-       const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
-       uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-       uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
-       uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
-       uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
-       uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
-       uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
-       uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
-       uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
-       pp_atomctrl_memory_clock_param mpll_param;
-       int result;
-
-       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
-                               memory_clock, &mpll_param, strobe_mode);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Error retrieving Memory Clock Parameters from VBIOS.", return result);
-
-       /* MPLL_FUNC_CNTL setup*/
-       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
-
-       /* MPLL_FUNC_CNTL_1 setup*/
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
-
-       /* MPLL_AD_FUNC_CNTL setup*/
-       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
-                                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-
-       if (data->is_memory_GDDR5) {
-               /* MPLL_DQ_FUNC_CNTL setup*/
-               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-                                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
-               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-                                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
-               /*
-                ************************************
-                Fref = Reference Frequency
-                NF = Feedback divider ratio
-                NR = Reference divider ratio
-                Fnom = Nominal VCO output frequency = Fref * NF / NR
-                Fs = Spreading Rate
-                D = Percentage down-spread / 2
-                Fint = Reference input frequency to PFD = Fref / NR
-                NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
-                CLKS = NS - 1 = ISS_STEP_NUM[11:0]
-                NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
-                CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
-                *************************************
-                */
-               pp_atomctrl_internal_ss_info ss_info;
-               uint32_t freq_nom;
-               uint32_t tmp;
-               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
-               /* for GDDR5 for all modes and DDR3 */
-               if (1 == mpll_param.qdr)
-                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
-               else
-                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
-               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
-               tmp = (freq_nom / reference_clock);
-               tmp = tmp * tmp;
-
-               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
-                       /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
-                       /* ss.Info.speed_spectrum_rate -- in unit of khz */
-                       /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
-                       /*     = reference_clock * 5 / speed_spectrum_rate */
-                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
-                       /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
-                       /*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
-                       uint32_t clkv =
-                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
-                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
-                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
-                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
-               }
-       }
-
-       /* MCLK_PWRMGT_CNTL setup */
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
-
-       /* Save the result data to outpupt memory level structure */
-       mclk->MclkFrequency   = memory_clock;
-       mclk->MpllFuncCntl    = mpll_func_cntl;
-       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
-       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
-       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
-       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
-       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
-       mclk->DllCntl         = dll_cntl;
-       mclk->MpllSs1         = mpll_ss1;
-       mclk->MpllSs2         = mpll_ss2;
-
-       return 0;
-}
-
-static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
-               bool strobe_mode)
-{
-       uint8_t mc_para_index;
-
-       if (strobe_mode) {
-               if (memory_clock < 12500) {
-                       mc_para_index = 0x00;
-               } else if (memory_clock > 47500) {
-                       mc_para_index = 0x0f;
-               } else {
-                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
-               }
-       } else {
-               if (memory_clock < 65000) {
-                       mc_para_index = 0x00;
-               } else if (memory_clock > 135000) {
-                       mc_para_index = 0x0f;
-               } else {
-                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
-               }
-       }
-
-       return mc_para_index;
-}
-
-static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
-       uint8_t mc_para_index;
-
-       if (memory_clock < 10000) {
-               mc_para_index = 0;
-       } else if (memory_clock >= 80000) {
-               mc_para_index = 0x0f;
-       } else {
-               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
-       }
-
-       return mc_para_index;
-}
-
-static int tonga_populate_single_memory_level(
-               struct pp_hwmgr *hwmgr,
-               uint32_t memory_clock,
-               SMU72_Discrete_MemoryLevel *memory_level
-               )
-{
-       uint32_t minMvdd = 0;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int result = 0;
-       bool dllStateOn;
-       struct cgs_display_info info = {0};
-
-
-       if (NULL != pptable_info->vdd_dep_on_mclk) {
-               result = tonga_get_dependecy_volt_by_clk(hwmgr,
-                       pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
-       }
-
-       if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) {
-               memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
-       } else {
-               memory_level->MinMvdd = minMvdd;
-       }
-       memory_level->EnabledForThrottle = 1;
-       memory_level->EnabledForActivity = 0;
-       memory_level->UpHyst = 0;
-       memory_level->DownHyst = 100;
-       memory_level->VoltageDownHyst = 0;
-
-       /* Indicates maximum activity level for this performance level.*/
-       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-       memory_level->StutterEnable = 0;
-       memory_level->StrobeEnable = 0;
-       memory_level->EdcReadEnable = 0;
-       memory_level->EdcWriteEnable = 0;
-       memory_level->RttEnable = 0;
-
-       /* default set to low watermark. Highest level will be set to high later.*/
-       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-       data->display_timing.num_existing_displays = info.display_count;
-
-       if ((data->mclk_stutter_mode_threshold != 0) &&
-           (memory_clock <= data->mclk_stutter_mode_threshold) &&
-           (!data->is_uvd_enabled)
-           && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
-           && (data->display_timing.num_existing_displays <= 2)
-           && (data->display_timing.num_existing_displays != 0))
-               memory_level->StutterEnable = 1;
-
-       /* decide strobe mode*/
-       memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
-               (memory_clock <= data->mclk_strobe_mode_threshold);
-
-       /* decide EDC mode and memory clock ratio*/
-       if (data->is_memory_GDDR5) {
-               memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
-                                       memory_level->StrobeEnable);
-
-               if ((data->mclk_edc_enable_threshold != 0) &&
-                               (memory_clock > data->mclk_edc_enable_threshold)) {
-                       memory_level->EdcReadEnable = 1;
-               }
-
-               if ((data->mclk_edc_wr_enable_threshold != 0) &&
-                               (memory_clock > data->mclk_edc_wr_enable_threshold)) {
-                       memory_level->EdcWriteEnable = 1;
-               }
-
-               if (memory_level->StrobeEnable) {
-                       if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
-                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
-                               dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-                       } else {
-                               dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
-                       }
-
-               } else {
-                       dllStateOn = data->dll_defaule_on;
-               }
-       } else {
-               memory_level->StrobeRatio =
-                       tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
-               dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-       }
-
-       result = tonga_calculate_mclk_params(hwmgr,
-               memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
-
-       if (0 == result) {
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
-               /* MCLK frequency in units of 10KHz*/
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
-               /* Indicates maximum activity level for this performance level.*/
-               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
-       }
-
-       return result;
-}
-
-/**
- * Populates the SMC MVDD structure using the provided memory clock.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
- * @param    voltage     the SMC VOLTAGE structure to be populated
- */
-int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern)
-{
-       const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i = 0;
-
-       if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-               /* find mvdd value which clock is more than request */
-               for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) {
-                       if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) {
-                               /* Always round to higher voltage. */
-                               smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value;
-                               break;
-                       }
-               }
-
-               PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count,
-                       "MVDD Voltage is outside the supported range.", return -1);
-
-       } else {
-               return -1;
-       }
-
-       return 0;
-}
-
-
-static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr,
-       SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       pp_atomctrl_clock_dividers_vi dividers;
-       SMIO_Pattern voltage_level;
-       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
-       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
-       /* The ACPI state should not do DPM on DC (or ever).*/
-       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-       table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage;
-
-       /* assign zero for now*/
-       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
-       /* get the engine clock dividers for this clock value*/
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
-               table->ACPILevel.SclkFrequency,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-       /* divider ID for required SCLK*/
-       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
-       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-       table->ACPILevel.DeepSleepDivId = 0;
-
-       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
-                                                       CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
-       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
-                                                       CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
-       spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
-                                                       CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
-
-       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
-       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
-       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       table->ACPILevel.CcPwrDynRm = 0;
-       table->ACPILevel.CcPwrDynRm1 = 0;
-
-
-       /* For various features to be enabled/disabled while this level is active.*/
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-       /* SCLK frequency in units of 10KHz*/
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
-       table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage;
-
-       /*  CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
-       if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
-               table->MemoryACPILevel.MinMvdd =
-                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
-       else
-               table->MemoryACPILevel.MinMvdd = 0;
-
-       /* Force reset on DLL*/
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
-       /* Disable DLL in ACPIState*/
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
-       /* Enable DLL bypass signal*/
-       dll_cntl            = PHM_SET_FIELD(dll_cntl,
-               DLL_CNTL, MRDCK0_BYPASS, 0);
-       dll_cntl            = PHM_SET_FIELD(dll_cntl,
-               DLL_CNTL, MRDCK1_BYPASS, 0);
-
-       table->MemoryACPILevel.DllCntl            =
-               PP_HOST_TO_SMC_UL(dll_cntl);
-       table->MemoryACPILevel.MclkPwrmgtCntl     =
-               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
-       table->MemoryACPILevel.MpllAdFuncCntl     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
-       table->MemoryACPILevel.MpllDqFuncCntl     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
-       table->MemoryACPILevel.MpllFuncCntl       =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
-       table->MemoryACPILevel.MpllFuncCntl_1     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
-       table->MemoryACPILevel.MpllFuncCntl_2     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
-       table->MemoryACPILevel.MpllSs1            =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
-       table->MemoryACPILevel.MpllSs2            =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
-       table->MemoryACPILevel.EnabledForThrottle = 0;
-       table->MemoryACPILevel.EnabledForActivity = 0;
-       table->MemoryACPILevel.UpHyst = 0;
-       table->MemoryACPILevel.DownHyst = 100;
-       table->MemoryACPILevel.VoltageDownHyst = 0;
-       /* Indicates maximum activity level for this performance level.*/
-       table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-       table->MemoryACPILevel.StutterEnable = 0;
-       table->MemoryACPILevel.StrobeEnable = 0;
-       table->MemoryACPILevel.EdcReadEnable = 0;
-       table->MemoryACPILevel.EdcWriteEnable = 0;
-       table->MemoryACPILevel.RttEnable = 0;
-
-       return result;
-}
-
-static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
-{
-       int result = 0;
-       uint32_t i;
-
-       for (i = 0; i < table->count; i++) {
-               if (value == table->dpm_levels[i].value) {
-                       *boot_level = i;
-                       result = 0;
-               }
-       }
-       return result;
-}
-
-static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       table->GraphicsBootLevel  = 0;        /* 0 == DPM[0] (low), etc. */
-       table->MemoryBootLevel    = 0;        /* 0 == DPM[0] (low), etc. */
-
-       /* find boot level from dpm table*/
-       result = tonga_find_boot_level(&(data->dpm_table.sclk_table),
-       data->vbios_boot_state.sclk_bootup_value,
-       (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
-
-       if (0 != result) {
-               data->smc_state_table.GraphicsBootLevel = 0;
-               printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
-                       in dependency table. Using Graphics DPM level 0!");
-               result = 0;
-       }
-
-       result = tonga_find_boot_level(&(data->dpm_table.mclk_table),
-               data->vbios_boot_state.mclk_bootup_value,
-               (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
-
-       if (0 != result) {
-               data->smc_state_table.MemoryBootLevel = 0;
-               printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
-                       in dependency table. Using Memory DPM level 0!");
-               result = 0;
-       }
-
-       table->BootVoltage.Vddc =
-               tonga_get_voltage_id(&(data->vddc_voltage_table),
-                       data->vbios_boot_state.vddc_bootup_value);
-       table->BootVoltage.VddGfx =
-               tonga_get_voltage_id(&(data->vddgfx_voltage_table),
-                       data->vbios_boot_state.vddgfx_bootup_value);
-       table->BootVoltage.Vddci =
-               tonga_get_voltage_id(&(data->vddci_voltage_table),
-                       data->vbios_boot_state.vddci_bootup_value);
-       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-       return result;
-}
-
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    engine_clock the engine clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-               uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
-{
-       const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       pp_atomctrl_clock_dividers_vi dividers;
-       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t    reference_clock;
-       uint32_t reference_divider;
-       uint32_t fbdiv;
-       int result;
-
-       /* get the engine clock dividers for this clock value*/
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
-       reference_clock = atomctrl_get_reference_clock(hwmgr);
-
-       reference_divider = 1 + dividers.uc_pll_ref_div;
-
-       /* low 14 bits is fraction and high 12 bits is divider*/
-       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
-       /* SPLL_FUNC_CNTL setup*/
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
-               CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
-               CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
-
-       /* SPLL_FUNC_CNTL_3 setup*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
-               CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
-       /* set to use fractional accumulation*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
-               CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
-               pp_atomctrl_internal_ss_info ss_info;
-
-               uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
-               if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
-                       /*
-                       * ss_info.speed_spectrum_percentage -- in unit of 0.01%
-                       * ss_info.speed_spectrum_rate -- in unit of khz
-                       */
-                       /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
-                       uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
-                       /* clkv = 2 * D * fbdiv / NS */
-                       uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
-                       cg_spll_spread_spectrum =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
-                       cg_spll_spread_spectrum =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
-                       cg_spll_spread_spectrum_2 =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
-               }
-       }
-
-       sclk->SclkFrequency        = engine_clock;
-       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
-       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
-       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
-       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
-       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
-
-       return 0;
-}
-
-static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
-               uint32_t min_engine_clock_in_sr)
-{
-       uint32_t i, temp;
-       uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
-
-       PP_ASSERT_WITH_CODE((engine_clock >= min),
-                       "Engine clock can't satisfy stutter requirement!", return 0);
-
-       for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
-               temp = engine_clock >> i;
-
-               if(temp >= min || i == 0)
-                       break;
-       }
-       return (uint8_t)i;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    engine_clock the engine clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level)
-{
-       int result;
-       uint32_t threshold;
-       uint32_t mvdd;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
-
-       /* populate graphics levels*/
-       result = tonga_get_dependecy_volt_by_clk(hwmgr,
-               pptable_info->vdd_dep_on_sclk, engine_clock,
-               &graphic_level->MinVoltage, &mvdd);
-       PP_ASSERT_WITH_CODE((0 == result),
-               "can not find VDDC voltage value for VDDC       \
-               engine clock dependency table", return result);
-
-       /* SCLK frequency in units of 10KHz*/
-       graphic_level->SclkFrequency = engine_clock;
-
-       /* Indicates maximum activity level for this performance level. 50% for now*/
-       graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
-       graphic_level->CcPwrDynRm = 0;
-       graphic_level->CcPwrDynRm1 = 0;
-       /* this level can be used if activity is high enough.*/
-       graphic_level->EnabledForActivity = 0;
-       /* this level can be used for throttling.*/
-       graphic_level->EnabledForThrottle = 1;
-       graphic_level->UpHyst = 0;
-       graphic_level->DownHyst = 0;
-       graphic_level->VoltageDownHyst = 0;
-       graphic_level->PowerThrottle = 0;
-
-       threshold = engine_clock * data->fast_watemark_threshold / 100;
-/*
-       *get the DAL clock. do it in funture.
-       PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
-       data->display_timing.min_clock_insr = minClocks.engineClockInSR;
-*/
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep))
-               graphic_level->DeepSleepDivId =
-                               tonga_get_sleep_divider_id_from_clock(engine_clock,
-                                               data->display_timing.min_clock_insr);
-
-       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
-       graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       if (0 == result) {
-               /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
-               /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
-       }
-
-       return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param    hwmgr      the address of the hardware manager
- */
-static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct tonga_dpm_table *dpm_table = &data->dpm_table;
-       phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
-       uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
-       int result = 0;
-       uint32_t level_array_adress = data->dpm_table_start +
-               offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
-       uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
-               SMU72_MAX_LEVELS_GRAPHICS;   /* 64 -> long; 32 -> int*/
-       SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
-       uint32_t i, maxEntry;
-       uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
-       PECI_RegistryValue reg_value;
-       memset(levels, 0x00, level_array_size);
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-               result = tonga_populate_single_graphic_level(hwmgr,
-                                       dpm_table->sclk_table.dpm_levels[i].value,
-                                       (uint16_t)data->activity_target[i],
-                                       &(data->smc_state_table.GraphicsLevel[i]));
-
-               if (0 != result)
-                       return result;
-
-               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-               if (i > 1)
-                       data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
-
-               if (0 == i) {
-                       reg_value = 0;
-                       if (reg_value != 0)
-                               data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value;
-               }
-
-               if (1 == i) {
-                       reg_value = 0;
-                       if (reg_value != 0)
-                               data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value;
-               }
-       }
-
-       /* Only enable level 0 for now. */
-       data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
-       /* set highest level watermark to high */
-       if (dpm_table->sclk_table.count > 1)
-               data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
-                       PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       data->smc_state_table.GraphicsDpmLevelCount =
-               (uint8_t)dpm_table->sclk_table.count;
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-               tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-       if (pcie_table != NULL) {
-               PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
-                       "There must be 1 or more PCIE levels defined in PPTable.", return -1);
-               maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
-               for (i = 0; i < dpm_table->sclk_table.count; i++) {
-                       data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
-                               (uint8_t) ((i < maxEntry) ? i : maxEntry);
-               }
-       } else {
-               if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
-                       printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!");
-
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                       (1<<(highest_pcie_level_enabled+1))) != 0)) {
-                       highest_pcie_level_enabled++;
-               }
-
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                       (1<<lowest_pcie_level_enabled)) == 0)) {
-                       lowest_pcie_level_enabled++;
-               }
-
-               while ((count < highest_pcie_level_enabled) &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                       (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
-                       count++;
-               }
-               mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
-                       (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
-               /* set pcieDpmLevel to highest_pcie_level_enabled*/
-               for (i = 2; i < dpm_table->sclk_table.count; i++) {
-                       data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
-               }
-
-               /* set pcieDpmLevel to lowest_pcie_level_enabled*/
-               data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to mid_pcie_level_enabled*/
-               data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
-       }
-       /* level count will send to smc once at init smc table and never change*/
-       result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
-       if (0 != result)
-               return result;
-
-       return 0;
-}
-
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param    hwmgr      the address of the hardware manager
- */
-
-static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_dpm_table *dpm_table = &data->dpm_table;
-       int result;
-       /* populate MCLK dpm table to SMU7 */
-       uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
-       uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY;
-       SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
-       uint32_t i;
-
-       memset(levels, 0x00, level_array_size);
-
-       for (i = 0; i < dpm_table->mclk_table.count; i++) {
-               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-                       "can not populate memory level as memory clock is zero", return -1);
-               result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
-                       &(data->smc_state_table.MemoryLevel[i]));
-               if (0 != result) {
-                       return result;
-               }
-       }
-
-       /* Only enable level 0 for now.*/
-       data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
-       /*
-       * in order to prevent MC activity from stutter mode to push DPM up.
-       * the UVD change complements this by putting the MCLK in a higher state
-       * by default such that we are not effected by up threshold or and MCLK DPM latency.
-       */
-       data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
-       data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-       /* set highest level watermark to high*/
-       data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       /* level count will send to smc once at init smc table and never change*/
-       result = tonga_copy_bytes_to_smc(hwmgr->smumgr,
-               level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
-       if (0 != result) {
-               return result;
-       }
-
-       return 0;
-}
-
-struct TONGA_DLL_SPEED_SETTING {
-       uint16_t            Min;                          /* Minimum Data Rate*/
-       uint16_t            Max;                          /* Maximum Data Rate*/
-       uint32_t                        dll_speed;                     /* The desired DLL_SPEED setting*/
-};
-
-static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-       return 0;
-}
-
-/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
-
-
-static int tonga_reset_single_dpm_table(
-       struct pp_hwmgr *hwmgr,
-       struct tonga_single_dpm_table *dpm_table,
-       uint32_t count)
-{
-       uint32_t i;
-       if (!(count <= MAX_REGULAR_DPM_NUMBER))
-               printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
-                       table entries to exceed max number! \n");
-
-       dpm_table->count = count;
-       for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
-               dpm_table->dpm_levels[i].enabled = false;
-       }
-
-       return 0;
-}
-
-static void tonga_setup_pcie_table_entry(
-       struct tonga_single_dpm_table *dpm_table,
-       uint32_t index, uint32_t pcie_gen,
-       uint32_t pcie_lanes)
-{
-       dpm_table->dpm_levels[index].value = pcie_gen;
-       dpm_table->dpm_levels[index].param1 = pcie_lanes;
-       dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
-       uint32_t i, maxEntry;
-
-       if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
-               data->pcie_gen_power_saving = data->pcie_gen_performance;
-               data->pcie_lane_power_saving = data->pcie_lane_performance;
-       } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
-               data->pcie_gen_performance = data->pcie_gen_power_saving;
-               data->pcie_lane_performance = data->pcie_lane_power_saving;
-       }
-
-       tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK);
-
-       if (pcie_table != NULL) {
-               /*
-               * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue).
-               * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry.
-               */
-               maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ?
-                                               SMU72_MAX_LEVELS_LINK : pcie_table->count;
-               for (i = 1; i < maxEntry; i++) {
-                       tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1,
-                               get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed),
-                               get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-               }
-               data->dpm_table.pcie_speed_table.count = maxEntry - 1;
-       } else {
-               /* Hardcode Pcie Table */
-               tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
-                       get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
-                       get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-               tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
-                       get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
-                       get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-               tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
-                       get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
-                       get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-               tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
-                       get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
-                       get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-               tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
-                       get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
-                       get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-               tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
-                       get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
-                       get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-               data->dpm_table.pcie_speed_table.count = 6;
-       }
-       /* Populate last level for boot PCIE level, but do not increment count. */
-       tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
-               data->dpm_table.pcie_speed_table.count,
-               get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
-               get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-
-       return 0;
-
-}
-
-/*
- * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these state tables to the allowed range based
- * on the power policy or external client requests, such as UVD request, etc.
- */
-static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i;
-
-       phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table =
-               pptable_info->vdd_dep_on_sclk;
-       phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table =
-               pptable_info->vdd_dep_on_mclk;
-
-       PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
-               "SCLK dependency table is missing. This table is mandatory", return -1);
-       PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
-               "SCLK dependency table has to have is missing. This table is mandatory", return -1);
-
-       PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
-               "MCLK dependency table is missing. This table is mandatory", return -1);
-       PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
-               "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
-
-       /* clear the state table to reset everything to default */
-       memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
-       tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS);
-       tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY);
-       /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */
-       /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/
-       /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/
-       /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/
-
-       PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
-               "SCLK dependency table is missing. This table is mandatory", return -1);
-       /* Initialize Sclk DPM table based on allow Sclk values*/
-       data->dpm_table.sclk_table.count = 0;
-
-       for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
-               if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
-                               allowed_vdd_sclk_table->entries[i].clk) {
-                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
-                               allowed_vdd_sclk_table->entries[i].clk;
-                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
-                       data->dpm_table.sclk_table.count++;
-               }
-       }
-
-       PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
-               "MCLK dependency table is missing. This table is mandatory", return -1);
-       /* Initialize Mclk DPM table based on allow Mclk values */
-       data->dpm_table.mclk_table.count = 0;
-       for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
-               if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
-                       allowed_vdd_mclk_table->entries[i].clk) {
-                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
-                               allowed_vdd_mclk_table->entries[i].clk;
-                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
-                       data->dpm_table.mclk_table.count++;
-               }
-       }
-
-       /* setup PCIE gen speed levels*/
-       tonga_setup_default_pcie_tables(hwmgr);
-
-       /* save a copy of the default DPM table*/
-       memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table));
-
-       return 0;
-}
-
-int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
-               const struct tonga_power_state *bootState)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint8_t count, level;
-
-       count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count);
-       for (level = 0; level < count; level++) {
-               if (pptable_info->vdd_dep_on_sclk->entries[level].clk >=
-                       bootState->performance_levels[0].engine_clock) {
-                       data->smc_state_table.GraphicsBootLevel = level;
-                       break;
-               }
-       }
-
-       count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count);
-       for (level = 0; level < count; level++) {
-               if (pptable_info->vdd_dep_on_mclk->entries[level].clk >=
-                       bootState->performance_levels[0].memory_clock) {
-                       data->smc_state_table.MemoryBootLevel = level;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    pInput  the pointer to input data (PowerState)
- * @return   always 0
- */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       SMU72_Discrete_DpmTable  *table = &(data->smc_state_table);
-       const phw_tonga_ulv_parm *ulv = &(data->ulv);
-       uint8_t i;
-       PECI_RegistryValue reg_value;
-       pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-
-       result = tonga_setup_default_dpm_tables(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to setup default DPM tables!", return result;);
-       memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
-       if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) {
-               tonga_populate_smc_voltage_tables(hwmgr, table);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition)) {
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StepVddc)) {
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-       }
-
-       if (data->is_memory_GDDR5) {
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-       }
-
-       i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
-
-       if (i == 1 || i == 0) {
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL;
-       }
-
-       if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) {
-               PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ULV state!", return result;);
-
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
-       }
-
-       result = tonga_populate_smc_link_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Link Level!", return result;);
-
-       result = tonga_populate_all_graphic_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Graphics Level!", return result;);
-
-       result = tonga_populate_all_memory_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Memory Level!", return result;);
-
-       result = tonga_populate_smv_acpi_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize ACPI Level!", return result;);
-
-       result = tonga_populate_smc_vce_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize VCE Level!", return result;);
-
-       result = tonga_populate_smc_acp_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize ACP Level!", return result;);
-
-       result = tonga_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize SAMU Level!", return result;);
-
-       /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
-       /* need to populate the  ARB settings for the initial state. */
-       result = tonga_program_memory_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to Write ARB settings for the initial state.", return result;);
-
-       result = tonga_populate_smc_uvd_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize UVD Level!", return result;);
-
-       result = tonga_populate_smc_boot_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Boot Level!", return result;);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ClockStretcher)) {
-               result = tonga_populate_clock_stretcher_data_table(hwmgr);
-               PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate Clock Stretcher Data Table!", return result;);
-       }
-       table->GraphicsVoltageChangeEnable  = 1;
-       table->GraphicsThermThrottleEnable  = 1;
-       table->GraphicsInterval = 1;
-       table->VoltageInterval  = 1;
-       table->ThermalInterval  = 1;
-       table->TemperatureLimitHigh =
-               pptable_info->cac_dtp_table->usTargetOperatingTemp *
-               TONGA_Q88_FORMAT_CONVERSION_UNIT;
-       table->TemperatureLimitLow =
-               (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-               TONGA_Q88_FORMAT_CONVERSION_UNIT;
-       table->MemoryVoltageChangeEnable  = 1;
-       table->MemoryInterval  = 1;
-       table->VoltageResponseTime  = 0;
-       table->PhaseResponseTime  = 0;
-       table->MemoryThermThrottleEnable  = 1;
-
-       /*
-       * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had)
-       * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
-       * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay
-       * To avoid it, we set PCIeBootLinkLevel to highest dpm level
-       */
-       PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
-                       "There must be 1 or more PCIE levels defined in PPTable.",
-                       return -1);
-
-       table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
-
-       table->PCIeGenInterval  = 1;
-
-       result = tonga_populate_vr_config(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to populate VRConfig setting!", return result);
-
-       table->ThermGpio  = 17;
-       table->SclkStepSize = 0x4000;
-
-       reg_value = 0;
-       if ((0 == reg_value) &&
-               (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
-                                               &gpio_pin_assignment))) {
-               table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_RegulatorHot);
-       } else {
-               table->VRHotGpio = TONGA_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_RegulatorHot);
-       }
-
-       /* ACDC Switch GPIO */
-       reg_value = 0;
-       if ((0 == reg_value) &&
-               (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-                                               &gpio_pin_assignment))) {
-               table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition);
-       } else {
-               table->AcDcGpio = TONGA_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition);
-       }
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_Falcon_QuickTransition);
-
-       reg_value = 0;
-       if (1 == reg_value) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition);
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_Falcon_QuickTransition);
-       }
-
-       reg_value = 0;
-       if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
-                       THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ThermalOutGPIO);
-
-               table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-
-               table->ThermOutPolarity =
-                       (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
-                       (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0;
-
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-               /* if required, combine VRHot/PCC with thermal out GPIO*/
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_RegulatorHot) &&
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_CombinePCCWithThermalSignal)){
-                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-               }
-       } else {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ThermalOutGPIO);
-
-               table->ThermOutGpio = 17;
-               table->ThermOutPolarity = 1;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-       }
-
-       for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) {
-               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-       }
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-       result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
-                                                                               offsetof(SMU72_Discrete_DpmTable, SystemFlags),
-                                                                               (uint8_t *)&(table->SystemFlags),
-                                                                               sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController),
-                                                                               data->sram_end);
-
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to upload dpm data to SMC memory!", return result;);
-
-       return result;
-}
-
-/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/
-static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
-       return;
-}
-
-int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
-       PPSMC_Result result;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       /* Apply minimum voltage based on DAL's request level */
-       tonga_apply_dal_minimum_voltage_request(hwmgr);
-
-       if (0 == data->sclk_dpm_key_disabled) {
-               /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-               if (tonga_is_dpm_running(hwmgr))
-                       printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
-
-               if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-                       result = smum_send_msg_to_smc_with_parameter(
-                                                               hwmgr->smumgr,
-                               (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                               data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-                       PP_ASSERT_WITH_CODE((0 == result),
-                               "Set Sclk Dpm enable Mask failed", return -1);
-               }
-       }
-
-       if (0 == data->mclk_dpm_key_disabled) {
-               /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-               if (tonga_is_dpm_running(hwmgr))
-                       printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
-
-               if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-                       result = smum_send_msg_to_smc_with_parameter(
-                                                               hwmgr->smumgr,
-                               (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                               data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-                       PP_ASSERT_WITH_CODE((0 == result),
-                               "Set Mclk Dpm enable Mask failed", return -1);
-               }
-       }
-
-       return 0;
-}
-
-
-int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
-       uint32_t level, tmp;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       if (0 == data->pcie_dpm_key_disabled) {
-               /* PCIE */
-               if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++ ;
-
-                       if (0 != level) {
-                               PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
-                                       "force highest pcie dpm state failed!", return -1);
-                       }
-               }
-       }
-
-       if (0 == data->sclk_dpm_key_disabled) {
-               /* SCLK */
-               if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++ ;
-
-                       if (0 != level) {
-                               PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
-                                       "force highest sclk dpm state failed!", return -1);
-                               if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
-                                       CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
-                                       printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
-                                               Curr_Sclk_Index does not match the level \n");
-
-                       }
-               }
-       }
-
-       if (0 == data->mclk_dpm_key_disabled) {
-               /* MCLK */
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
-                       level = 0;
-                       tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-                       while (tmp >>= 1)
-                               level++ ;
-
-                       if (0 != level) {
-                               PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
-                                       "force highest mclk dpm state failed!", return -1);
-                               if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                                       TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
-                                       printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
-                                               Curr_Mclk_Index does not match the level \n");
-                       }
-               }
-       }
-
-       return 0;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
-{
-       cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
-       hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
-       return 0;
-}
-
-/**
- * Initialize Dynamic State Adjustment Rule Settings
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- */
-int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
-       uint32_t table_size;
-       struct phm_clock_voltage_dependency_table *table_clk_vlt;
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       hwmgr->dyn_state.mclk_sclk_ratio = 4;
-       hwmgr->dyn_state.sclk_mclk_delta = 15000;      /* 150 MHz */
-       hwmgr->dyn_state.vddc_vddci_delta = 200;       /* 200mV */
-
-       /* initialize vddc_dep_on_dal_pwrl table */
-       table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
-       table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == table_clk_vlt) {
-               printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
-               return -ENOMEM;
-       } else {
-               table_clk_vlt->count = 4;
-               table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
-               table_clk_vlt->entries[0].v = 0;
-               table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
-               table_clk_vlt->entries[1].v = 720;
-               table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
-               table_clk_vlt->entries[2].v = 810;
-               table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
-               table_clk_vlt->entries[3].v = 900;
-               pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
-               hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
-       }
-
-       return 0;
-}
-
-static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
-               pptable_info->vdd_dep_on_sclk;
-       phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
-               pptable_info->vdd_dep_on_mclk;
-
-       PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
-               "VDD dependency on SCLK table is missing.       \
-               This table is mandatory", return -1);
-       PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
-               "VDD dependency on SCLK table has to have is missing.   \
-               This table is mandatory", return -1);
-
-       PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
-               "VDD dependency on MCLK table is missing.       \
-               This table is mandatory", return -1);
-       PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
-               "VDD dependency on MCLK table has to have is missing.    \
-               This table is mandatory", return -1);
-
-       data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
-       data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-
-       pptable_info->max_clock_voltage_on_ac.sclk =
-               allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
-       pptable_info->max_clock_voltage_on_ac.mclk =
-               allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
-       pptable_info->max_clock_voltage_on_ac.vddc =
-               allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-       pptable_info->max_clock_voltage_on_ac.vddci =
-               allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
-       hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
-               pptable_info->max_clock_voltage_on_ac.sclk;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
-               pptable_info->max_clock_voltage_on_ac.mclk;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
-               pptable_info->max_clock_voltage_on_ac.vddc;
-       hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
-               pptable_info->max_clock_voltage_on_ac.vddci;
-
-       return 0;
-}
-
-int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       int result = 1;
-
-       PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
-                            "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
-                            return result);
-
-       if (0 == data->pcie_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
-                                                            hwmgr->smumgr,
-                                       PPSMC_MSG_PCIeDPM_UnForceLevel)),
-                                          "unforce pcie level failed!",
-                                                               return -1);
-       }
-
-       result = tonga_upload_dpm_level_enable_mask(hwmgr);
-
-       return result;
-}
-
-static uint32_t tonga_get_lowest_enable_level(
-                               struct pp_hwmgr *hwmgr, uint32_t level_mask)
-{
-       uint32_t level = 0;
-
-       while (0 == (level_mask & (1 << level)))
-               level++;
-
-       return level;
-}
-
-static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
-       uint32_t level;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       if (0 == data->pcie_dpm_key_disabled) {
-               /* PCIE */
-               if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
-                       level = tonga_get_lowest_enable_level(hwmgr,
-                                                             data->dpm_level_enable_mask.pcie_dpm_enable_mask);
-                       PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
-                                           "force lowest pcie dpm state failed!", return -1);
-               }
-       }
-
-       if (0 == data->sclk_dpm_key_disabled) {
-               /* SCLK */
-               if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-                       level = tonga_get_lowest_enable_level(hwmgr,
-                                                             data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-
-                       PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
-                                           "force sclk dpm state failed!", return -1);
-
-                       if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
-                                                        CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
-                               printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index.        \
-                               Curr_Sclk_Index does not match the level \n");
-               }
-       }
-
-       if (0 == data->mclk_dpm_key_disabled) {
-               /* MCLK */
-               if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
-                       level = tonga_get_lowest_enable_level(hwmgr,
-                                                             data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-                       PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
-                                           "force lowest mclk dpm state failed!", return -1);
-                       if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                                                        TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
-                               printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
-                                               Curr_Mclk_Index does not match the level \n");
-               }
-       }
-
-       return 0;
-}
-
-static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr)
-{
-       uint8_t entryId;
-       uint8_t voltageId;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
-       phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-       if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-               for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-                       voltageId = sclk_table->entries[entryId].vddInd;
-                       sclk_table->entries[entryId].vddgfx =
-                               pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd;
-               }
-       } else {
-               for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-                       voltageId = sclk_table->entries[entryId].vddInd;
-                       sclk_table->entries[entryId].vddc =
-                               pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
-               }
-       }
-
-       for (entryId = 0; entryId < mclk_table->count; ++entryId) {
-               voltageId = mclk_table->entries[entryId].vddInd;
-               mclk_table->entries[entryId].vddc =
-                       pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
-       }
-
-       for (entryId = 0; entryId < mm_table->count; ++entryId) {
-               voltageId = mm_table->entries[entryId].vddcInd;
-               mm_table->entries[entryId].vddc =
-                       pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
-       }
-
-       return 0;
-
-}
-
-static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-       uint8_t entryId;
-       phm_ppt_v1_voltage_lookup_record v_record;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
-       phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
-
-       if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-               for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-                       if (sclk_table->entries[entryId].vdd_offset & (1 << 15))
-                               v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
-                                       sclk_table->entries[entryId].vdd_offset - 0xFFFF;
-                       else
-                               v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
-                                       sclk_table->entries[entryId].vdd_offset;
-
-                       sclk_table->entries[entryId].vddc =
-                               v_record.us_cac_low = v_record.us_cac_mid =
-                               v_record.us_cac_high = v_record.us_vdd;
-
-                       tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
-               }
-
-               for (entryId = 0; entryId < mclk_table->count; ++entryId) {
-                       if (mclk_table->entries[entryId].vdd_offset & (1 << 15))
-                               v_record.us_vdd = mclk_table->entries[entryId].vddc +
-                                       mclk_table->entries[entryId].vdd_offset - 0xFFFF;
-                       else
-                               v_record.us_vdd = mclk_table->entries[entryId].vddc +
-                                       mclk_table->entries[entryId].vdd_offset;
-
-                       mclk_table->entries[entryId].vddgfx = v_record.us_cac_low =
-                               v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
-                       tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
-               }
-       }
-
-       return 0;
-
-}
-
-static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
-       uint32_t entryId;
-       phm_ppt_v1_voltage_lookup_record v_record;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-       if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-               for (entryId = 0; entryId < mm_table->count; entryId++) {
-                       if (mm_table->entries[entryId].vddgfx_offset & (1 << 15))
-                               v_record.us_vdd = mm_table->entries[entryId].vddc +
-                                       mm_table->entries[entryId].vddgfx_offset - 0xFFFF;
-                       else
-                               v_record.us_vdd = mm_table->entries[entryId].vddc +
-                                       mm_table->entries[entryId].vddgfx_offset;
-
-                       /* Add the calculated VDDGFX to the VDDGFX lookup table */
-                       mm_table->entries[entryId].vddgfx = v_record.us_cac_low =
-                               v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
-                       tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
-               }
-       }
-       return 0;
-}
-
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to changing voltage
- * @param     pointer to leakage table
- */
-static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
-               uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable)
-{
-       uint32_t leakage_index;
-
-       /* search for leakage voltage ID 0xff01 ~ 0xff08 */
-       for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) {
-               /* if this voltage matches a leakage voltage ID */
-               /* patch with actual leakage voltage */
-               if (pLeakageTable->leakage_id[leakage_index] == *voltage) {
-                       *voltage = pLeakageTable->actual_voltage[leakage_index];
-                       break;
-               }
-       }
-
-       if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
-               printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
- * Patch voltage lookup table by EVV leakages.
- *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to voltage lookup table
- * @param     pointer to leakage table
- * @return     always 0
- */
-static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_voltage_lookup_table *lookup_table,
-               phw_tonga_leakage_voltage *pLeakageTable)
-{
-       uint32_t i;
-
-       for (i = 0; i < lookup_table->count; i++) {
-               tonga_patch_with_vdd_leakage(hwmgr,
-                       &lookup_table->entries[i].us_vdd, pLeakageTable);
-       }
-
-       return 0;
-}
-
-static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr,
-               phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc)
-{
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable);
-       hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
-               pptable_info->max_clock_voltage_on_dc.vddc;
-
-       return 0;
-}
-
-static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage(
-               struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable,
-               uint16_t *Vddgfx)
-{
-       tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable);
-       return 0;
-}
-
-int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
-       uint32_t table_size, i, j;
-       phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
-       table_size = lookup_table->count;
-
-       PP_ASSERT_WITH_CODE(0 != lookup_table->count,
-               "Lookup table is empty", return -1);
-
-       /* Sorting voltages */
-       for (i = 0; i < table_size - 1; i++) {
-               for (j = i + 1; j > 0; j--) {
-                       if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) {
-                               tmp_voltage_lookup_record = lookup_table->entries[j-1];
-                               lookup_table->entries[j-1] = lookup_table->entries[j];
-                               lookup_table->entries[j] = tmp_voltage_lookup_record;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-       int result = 0;
-       int tmp_result;
-       tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-               tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
-                       pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
-               if (tmp_result != 0)
-                       result = tmp_result;
-
-               tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr,
-                       &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx);
-               if (tmp_result != 0)
-                       result = tmp_result;
-       } else {
-               tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
-                       pptable_info->vddc_lookup_table, &(data->vddc_leakage));
-               if (tmp_result != 0)
-                       result = tmp_result;
-
-               tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr,
-                       &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc);
-               if (tmp_result != 0)
-                       result = tmp_result;
-       }
-
-       tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
-       if (tmp_result != 0)
-               result = tmp_result;
-
-       tmp_result = tonga_calc_voltage_dependency_tables(hwmgr);
-       if (tmp_result != 0)
-               result = tmp_result;
-
-       tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr);
-       if (tmp_result != 0)
-               result = tmp_result;
-
-       tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table);
-       if (tmp_result != 0)
-               result = tmp_result;
-
-       tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table);
-       if (tmp_result != 0)
-               result = tmp_result;
-
-       return result;
-}
-
-int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       data->low_sclk_interrupt_threshold = 0;
-
-       return 0;
-}
-
-int tonga_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = tonga_read_clock_registers(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to read clock registers!", result = tmp_result);
-
-       tmp_result = tonga_get_memory_type(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to get memory type!", result = tmp_result);
-
-       tmp_result = tonga_enable_acpi_power_management(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to enable ACPI power management!", result = tmp_result);
-
-       tmp_result = tonga_init_power_gate_state(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to init power gate state!", result = tmp_result);
-
-       tmp_result = tonga_get_mc_microcode_version(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to get MC microcode version!", result = tmp_result);
-
-       tmp_result = tonga_init_sclk_threshold(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to init sclk threshold!", result = tmp_result);
-
-       return result;
-}
-
-/**
- * Enable voltage control
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
-       /* enable voltage control */
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
-       return 0;
-}
-
-/**
- * Checks if we want to support voltage control
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- */
-bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr)
-{
-       const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/*---------------------------MC----------------------------*/
-
-uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
-       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
-{
-       bool result = true;
-
-       switch (inReg) {
-       case  mmMC_SEQ_RAS_TIMING:
-               *outReg = mmMC_SEQ_RAS_TIMING_LP;
-               break;
-
-       case  mmMC_SEQ_DLL_STBY:
-               *outReg = mmMC_SEQ_DLL_STBY_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CMD0:
-               *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CMD1:
-               *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CTRL:
-               *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
-               break;
-
-       case mmMC_SEQ_CAS_TIMING:
-               *outReg = mmMC_SEQ_CAS_TIMING_LP;
-               break;
-
-       case mmMC_SEQ_MISC_TIMING:
-               *outReg = mmMC_SEQ_MISC_TIMING_LP;
-               break;
-
-       case mmMC_SEQ_MISC_TIMING2:
-               *outReg = mmMC_SEQ_MISC_TIMING2_LP;
-               break;
-
-       case mmMC_SEQ_PMG_DVS_CMD:
-               *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
-               break;
-
-       case mmMC_SEQ_PMG_DVS_CTL:
-               *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
-               break;
-
-       case mmMC_SEQ_RD_CTL_D0:
-               *outReg = mmMC_SEQ_RD_CTL_D0_LP;
-               break;
-
-       case mmMC_SEQ_RD_CTL_D1:
-               *outReg = mmMC_SEQ_RD_CTL_D1_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_D0:
-               *outReg = mmMC_SEQ_WR_CTL_D0_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_D1:
-               *outReg = mmMC_SEQ_WR_CTL_D1_LP;
-               break;
-
-       case mmMC_PMG_CMD_EMRS:
-               *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS:
-               *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS1:
-               *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
-               break;
-
-       case mmMC_SEQ_PMG_TIMING:
-               *outReg = mmMC_SEQ_PMG_TIMING_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS2:
-               *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_2:
-               *outReg = mmMC_SEQ_WR_CTL_2_LP;
-               break;
-
-       default:
-               result = false;
-               break;
-       }
-
-       return result;
-}
-
-int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table)
-{
-       uint32_t i;
-       uint16_t address;
-
-       for (i = 0; i < table->last; i++) {
-               table->mc_reg_address[i].s0 =
-                       tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
-                       ? address : table->mc_reg_address[i].s1;
-       }
-       return 0;
-}
-
-int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table)
-{
-       uint8_t i, j;
-
-       PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-               "Invalid VramInfo table.", return -1);
-       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
-               "Invalid VramInfo table.", return -1);
-
-       for (i = 0; i < table->last; i++) {
-               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-       }
-       ni_table->last = table->last;
-
-       for (i = 0; i < table->num_entries; i++) {
-               ni_table->mc_reg_table_entry[i].mclk_max =
-                       table->mc_reg_table_entry[i].mclk_max;
-               for (j = 0; j < table->last; j++) {
-                       ni_table->mc_reg_table_entry[i].mc_data[j] =
-                               table->mc_reg_table_entry[i].mc_data[j];
-               }
-       }
-
-       ni_table->num_entries = table->num_entries;
-
-       return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to  mmMC_PMG_CMD_EMRS /_LP[15:0].
- *      Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3.   need to set these data for each clock range
- *
- * @param    hwmgr the address of the powerplay hardware manager.
- * @param    table the address of MCRegTable
- * @return   always 0
- */
-int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table)
-{
-       uint8_t i, j, k;
-       uint32_t temp_reg;
-       const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       for (i = 0, j = table->last; i < table->last; i++) {
-               PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                       "Invalid VramInfo table.", return -1);
-               switch (table->mc_reg_address[i].s1) {
-               /*
-               * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to  mmMC_PMG_CMD_EMRS /_LP[15:0].
-               * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
-               */
-               case mmMC_SEQ_MISC1:
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       ((temp_reg & 0xffff0000)) |
-                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -1);
-
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       (temp_reg & 0xffff0000) |
-                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
-                               if (!data->is_memory_GDDR5) {
-                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
-                               }
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -1);
-
-                       if (!data->is_memory_GDDR5) {
-                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
-                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
-                               for (k = 0; k < table->num_entries; k++) {
-                                       table->mc_reg_table_entry[k].mc_data[j] =
-                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
-                               }
-                               j++;
-                               PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                                       "Invalid VramInfo table.", return -1);
-                       }
-
-                       break;
-
-               case mmMC_SEQ_RESERVE_M:
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       (temp_reg & 0xffff0000) |
-                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -1);
-                       break;
-
-               default:
-                       break;
-               }
-
-       }
-
-       table->last = j;
-
-       return 0;
-}
-
-int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
-{
-       uint8_t i, j;
-       for (i = 0; i < table->last; i++) {
-               for (j = 1; j < table->num_entries; j++) {
-                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
-                               table->mc_reg_table_entry[j].mc_data[i]) {
-                               table->validflag |= (1<<i);
-                               break;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-       pp_atomctrl_mc_reg_table *table;
-       phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table;
-       uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
-
-       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       /* Program additional LP registers that are no longer programmed by VBIOS */
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
-       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
-       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
-       if (0 == result)
-               result = tonga_copy_vbios_smc_reg_table(table, ni_table);
-
-       if (0 == result) {
-               tonga_set_s0_mc_reg_index(ni_table);
-               result = tonga_set_mc_special_registers(hwmgr, ni_table);
-       }
-
-       if (0 == result)
-               tonga_set_valid_flag(ni_table);
-
-       kfree(table);
-       return result;
-}
-
-/*
-* Copy one arb setting to another and then switch the active set.
-* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
-*/
-int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
-               uint32_t arbFreqSrc, uint32_t arbFreqDest)
-{
-       uint32_t mc_arb_dram_timing;
-       uint32_t mc_arb_dram_timing2;
-       uint32_t burst_time;
-       uint32_t mc_cg_config;
-
-       switch (arbFreqSrc) {
-       case MC_CG_ARB_FREQ_F0:
-               mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-               mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-               burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-               break;
-
-       case MC_CG_ARB_FREQ_F1:
-               mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
-               mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
-               burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
-               break;
-
-       default:
-               return -1;
-       }
-
-       switch (arbFreqDest) {
-       case MC_CG_ARB_FREQ_F0:
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
-               PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
-               break;
-
-       case MC_CG_ARB_FREQ_F1:
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
-               cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
-               PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
-               break;
-
-       default:
-               return -1;
-       }
-
-       mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
-       mc_cg_config |= 0x0000000F;
-       cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
-       PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
-
-       return 0;
-}
-
-/**
- * Initial switch from ARB F0->F1
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- * This function is to be called from the SetPowerState table.
- */
-int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
-{
-       return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-/**
- * Initialize the ARB DRAM timing table's index field.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-       const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       uint32_t tmp;
-       int result;
-
-       /*
-       * This is a read-modify-write on the first byte of the ARB table.
-       * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'.
-       * This solution is ugly, but we never write the whole table only individual fields in it.
-       * In reality this field should not be in that structure but in a soft register.
-       */
-       result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-                               data->arb_table_start, &tmp, data->sram_end);
-
-       if (0 != result)
-               return result;
-
-       tmp &= 0x00FFFFFF;
-       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-       return tonga_write_smc_sram_dword(hwmgr->smumgr,
-                       data->arb_table_start,  tmp, data->sram_end);
-}
-
-int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table)
-{
-       const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       uint32_t i, j;
-
-       for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) {
-               if (data->tonga_mc_reg_table.validflag & 1<<j) {
-                       PP_ASSERT_WITH_CODE(i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
-                               "Index of mc_reg_table->address[] array out of boundary", return -1);
-                       mc_reg_table->address[i].s0 =
-                               PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0);
-                       mc_reg_table->address[i].s1 =
-                               PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1);
-                       i++;
-               }
-       }
-
-       mc_reg_table->last = (uint8_t)i;
-
-       return 0;
-}
-
-/*convert register values from driver to SMC format */
-void tonga_convert_mc_registers(
-       const phw_tonga_mc_reg_entry * pEntry,
-       SMU72_Discrete_MCRegisterSet *pData,
-       uint32_t numEntries, uint32_t validflag)
-{
-       uint32_t i, j;
-
-       for (i = 0, j = 0; j < numEntries; j++) {
-               if (validflag & 1<<j) {
-                       pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
-                       i++;
-               }
-       }
-}
-
-/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */
-int tonga_convert_mc_reg_table_entry_to_smc(
-               struct pp_hwmgr *hwmgr,
-               const uint32_t memory_clock,
-               SMU72_Discrete_MCRegisterSet *mc_reg_table_data
-               )
-{
-       const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       uint32_t i = 0;
-
-       for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) {
-               if (memory_clock <=
-                       data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
-                       break;
-               }
-       }
-
-       if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0))
-               --i;
-
-       tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i],
-               mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag);
-
-       return 0;
-}
-
-int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_MCRegisters *mc_reg_table)
-{
-       int result = 0;
-       tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       int res;
-       uint32_t i;
-
-       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
-               res = tonga_convert_mc_reg_table_entry_to_smc(
-                               hwmgr,
-                               data->dpm_table.mclk_table.dpm_levels[i].value,
-                               &mc_reg_table->data[i]
-                               );
-
-               if (0 != res)
-                       result = res;
-       }
-
-       return result;
-}
-
-int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters));
-       result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize MCRegTable for the MC register addresses!", return result;);
-
-       result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize MCRegTable for driver state!", return result;);
-
-       return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
-                       (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end);
-}
-
-/**
- * Programs static screed detection parameters
- *
- * @param   hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       /* Set static screen threshold unit*/
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
-               CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
-               data->static_screen_threshold_unit);
-       /* Set static screen threshold*/
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
-               CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
-               data->static_screen_threshold);
-
-       return 0;
-}
-
-/**
- * Setup display gap for glitch free memory clock switching.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
-       uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
-                                                       CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-
-       display_gap = PHM_SET_FIELD(display_gap,
-                                       CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
-
-       display_gap = PHM_SET_FIELD(display_gap,
-                                       CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-       return 0;
-}
-
-/**
- * Programs activity state transition voting clients
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
-       tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-       /* Clear reset for voting clients before enabling DPM */
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-               SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-               SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-               ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
-       return 0;
-}
-
-
-int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = tonga_check_for_dpm_stopped(hwmgr);
-
-       if (cf_tonga_voltage_control(hwmgr)) {
-               tmp_result = tonga_enable_voltage_control(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to enable voltage control!", result = tmp_result);
-
-               tmp_result = tonga_construct_voltage_tables(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to contruct voltage tables!", result = tmp_result);
-       }
-
-       tmp_result = tonga_initialize_mc_reg_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to initialize MC reg table!", result = tmp_result);
-
-       tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to program static screen threshold parameters!", result = tmp_result);
-
-       tmp_result = tonga_enable_display_gap(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to enable display gap!", result = tmp_result);
-
-       tmp_result = tonga_program_voting_clients(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to program voting clients!", result = tmp_result);
-
-       tmp_result = tonga_process_firmware_header(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to process firmware header!", result = tmp_result);
-
-       tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to initialize switch from ArbF0 to F1!", result = tmp_result);
-
-       tmp_result = tonga_init_smc_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to initialize SMC table!", result = tmp_result);
-
-       tmp_result = tonga_init_arb_table_index(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to initialize ARB table index!", result = tmp_result);
-
-       tmp_result = tonga_populate_initial_mc_reg_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to populate initialize MC Reg table!", result = tmp_result);
-
-       tmp_result = tonga_notify_smc_display_change(hwmgr, false);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to notify no display!", result = tmp_result);
-
-       /* enable SCLK control */
-       tmp_result = tonga_enable_sclk_control(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to enable SCLK control!", result = tmp_result);
-
-       /* enable DPM */
-       tmp_result = tonga_start_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to start DPM!", result = tmp_result);
-
-       return result;
-}
-
-int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = tonga_check_for_dpm_running(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "SMC is still running!", return 0);
-
-       tmp_result = tonga_stop_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to stop DPM!", result = tmp_result);
-
-       tmp_result = tonga_reset_to_default(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-               "Failed to reset to default!", result = tmp_result);
-
-       return result;
-}
-
-int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       result = tonga_set_boot_state(hwmgr);
-       if (0 != result)
-               printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n");
-
-       return result;
-}
-
-int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
-       return phm_hwmgr_backend_fini(hwmgr);
-}
-
-/**
- * Initializes the Volcanic Islands Hardware Manager
- *
- * @param   hwmgr the address of the powerplay hardware manager.
- * @return   1 if success; otherwise appropriate error code.
- */
-int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
-       int result = 0;
-       SMU72_Discrete_DpmTable  *table = NULL;
-       tonga_hwmgr *data;
-       pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phw_tonga_ulv_parm *ulv;
-       struct cgs_system_info sys_info = {0};
-
-       PP_ASSERT_WITH_CODE((NULL != hwmgr),
-               "Invalid Parameter!", return -1;);
-
-       data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = data;
-
-       data->dll_defaule_on = false;
-       data->sram_end = SMC_RAM_END;
-
-       data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
-       data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT;
-       data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT;
-       data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT;
-       data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT;
-       data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT;
-       data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT;
-       data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT;
-
-       data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-       data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA;
-       data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT;
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_DisableVoltageIsland);
-
-       data->sclk_dpm_key_disabled = 0;
-       data->mclk_dpm_key_disabled = 0;
-       data->pcie_dpm_key_disabled = 0;
-       data->pcc_monitor_enabled = 0;
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_UnTabledHardwareInterface);
-
-       data->gpio_debug = 0;
-       data->engine_clock_data = 0;
-       data->memory_clock_data = 0;
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_DynamicPatchPowerState);
-
-       /* need to set voltage control types before EVV patching*/
-       data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE;
-       data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
-       data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
-       data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
-       data->force_pcie_gen = PP_PCIEGenInvalid;
-
-       if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                               VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
-               data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ControlVDDGFX)) {
-               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                       VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
-                       data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
-               }
-       }
-
-       if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ControlVDDGFX);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EnableMVDDControl)) {
-               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                                       VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) {
-                       data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
-               }
-       }
-
-       if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EnableMVDDControl);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ControlVDDCI)) {
-               if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                                       VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
-                       data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
-               else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-                                               VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
-                       data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
-       }
-
-       if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_ControlVDDCI);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_TablelessHardwareInterface);
-
-       if (pptable_info->cac_dtp_table->usClockStretchAmount != 0)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ClockStretcher);
-
-       /* Initializes DPM default values*/
-       tonga_initialize_dpm_defaults(hwmgr);
-
-       /* Get leakage voltage based on leakage ID.*/
-       PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)),
-               "Get EVV Voltage Failed.  Abort Driver loading!", return -1);
-
-       tonga_complete_dependency_tables(hwmgr);
-
-       /* Parse pptable data read from VBIOS*/
-       tonga_set_private_var_based_on_pptale(hwmgr);
-
-       /* ULV Support*/
-       ulv = &(data->ulv);
-       ulv->ulv_supported = false;
-
-       /* Initalize Dynamic State Adjustment Rule Settings*/
-       result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-       if (result)
-               printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
-       data->uvd_enabled = false;
-
-       table = &(data->smc_state_table);
-
-       /*
-       * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
-       * Peak Current Control feature is enabled and we should program PCC HW register
-       */
-       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
-               uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
-                                                                               CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
-
-               switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
-               case 0:
-                       temp_reg = PHM_SET_FIELD(temp_reg,
-                               CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
-                       break;
-               case 1:
-                       temp_reg = PHM_SET_FIELD(temp_reg,
-                               CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
-                       break;
-               case 2:
-                       temp_reg = PHM_SET_FIELD(temp_reg,
-                               CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
-                       break;
-               case 3:
-                       temp_reg = PHM_SET_FIELD(temp_reg,
-                               CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
-                       break;
-               case 4:
-                       temp_reg = PHM_SET_FIELD(temp_reg,
-                               CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
-                       break;
-               default:
-                       printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \
-                               Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n");
-                       break;
-               }
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCNB_PWRMGT_CNTL, temp_reg);
-       }
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_EnableSMU7ThermalManagement);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_SMU7);
-
-       data->vddc_phase_shed_control = false;
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                     PHM_PlatformCaps_UVDPowerGating);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                     PHM_PlatformCaps_VCEPowerGating);
-       sys_info.size = sizeof(struct cgs_system_info);
-       sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
-       result = cgs_query_system_info(hwmgr->device, &sys_info);
-       if (!result) {
-               if (sys_info.value & AMD_PG_SUPPORT_UVD)
-                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                     PHM_PlatformCaps_UVDPowerGating);
-               if (sys_info.value & AMD_PG_SUPPORT_VCE)
-                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                     PHM_PlatformCaps_VCEPowerGating);
-       }
-
-       if (0 == result) {
-               data->is_tlu_enabled = false;
-               hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
-                       TONGA_MAX_HARDWARE_POWERLEVELS;
-               hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
-               hwmgr->platform_descriptor.minimumClocksReductionPercentage  = 50;
-
-               sys_info.size = sizeof(struct cgs_system_info);
-               sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
-               result = cgs_query_system_info(hwmgr->device, &sys_info);
-               if (result)
-                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
-               else
-                       data->pcie_gen_cap = (uint32_t)sys_info.value;
-               if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-                       data->pcie_spc_cap = 20;
-               sys_info.size = sizeof(struct cgs_system_info);
-               sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
-               result = cgs_query_system_info(hwmgr->device, &sys_info);
-               if (result)
-                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
-               else
-                       data->pcie_lane_cap = (uint32_t)sys_info.value;
-       } else {
-               /* Ignore return value in here, we are cleaning up a mess. */
-               tonga_hwmgr_backend_fini(hwmgr);
-       }
-
-       return result;
-}
-
-static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr,
-               enum amd_dpm_forced_level level)
-{
-       int ret = 0;
-
-       switch (level) {
-       case AMD_DPM_FORCED_LEVEL_HIGH:
-               ret = tonga_force_dpm_highest(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       case AMD_DPM_FORCED_LEVEL_LOW:
-               ret = tonga_force_dpm_lowest(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       case AMD_DPM_FORCED_LEVEL_AUTO:
-               ret = tonga_unforce_dpm_levels(hwmgr);
-               if (ret)
-                       return ret;
-               break;
-       default:
-               break;
-       }
-
-       hwmgr->dpm_level = level;
-       return ret;
-}
-
-static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
-                               struct pp_power_state  *prequest_ps,
-                       const struct pp_power_state *pcurrent_ps)
-{
-       struct tonga_power_state *tonga_ps =
-                               cast_phw_tonga_power_state(&prequest_ps->hardware);
-
-       uint32_t sclk;
-       uint32_t mclk;
-       struct PP_Clocks minimum_clocks = {0};
-       bool disable_mclk_switching;
-       bool disable_mclk_switching_for_frame_lock;
-       struct cgs_display_info info = {0};
-       const struct phm_clock_and_voltage_limits *max_limits;
-       uint32_t i;
-       tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       int32_t count;
-       int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
-       data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
-
-       PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2,
-                                "VI should always have 2 performance levels",
-                                );
-
-       max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
-                       &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
-                       &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
-       if (PP_PowerSource_DC == hwmgr->power_source) {
-               for (i = 0; i < tonga_ps->performance_level_count; i++) {
-                       if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk)
-                               tonga_ps->performance_levels[i].memory_clock = max_limits->mclk;
-                       if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk)
-                               tonga_ps->performance_levels[i].engine_clock = max_limits->sclk;
-               }
-       }
-
-       tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
-       tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
-
-       tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
-       /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
-
-               max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
-               stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
-               for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
-                       if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) {
-                               stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk;
-                               break;
-                       }
-               }
-
-               if (count < 0)
-                       stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk;
-
-               stable_pstate_mclk = max_limits->mclk;
-
-               minimum_clocks.engineClock = stable_pstate_sclk;
-               minimum_clocks.memoryClock = stable_pstate_mclk;
-       }
-
-       if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
-               minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
-       if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
-               minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
-       tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
-       if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
-               PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
-                                       "Overdrive sclk exceeds limit",
-                                       hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
-               if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
-                       tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
-       }
-
-       if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
-               PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
-                       "Overdrive mclk exceeds limit",
-                       hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
-               if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
-                       tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
-       }
-
-       disable_mclk_switching_for_frame_lock = phm_cap_enabled(
-                                   hwmgr->platform_descriptor.platformCaps,
-                                   PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
-       disable_mclk_switching = (1 < info.display_count) ||
-                                   disable_mclk_switching_for_frame_lock;
-
-       sclk  = tonga_ps->performance_levels[0].engine_clock;
-       mclk  = tonga_ps->performance_levels[0].memory_clock;
-
-       if (disable_mclk_switching)
-               mclk  = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock;
-
-       if (sclk < minimum_clocks.engineClock)
-               sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
-
-       if (mclk < minimum_clocks.memoryClock)
-               mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
-
-       tonga_ps->performance_levels[0].engine_clock = sclk;
-       tonga_ps->performance_levels[0].memory_clock = mclk;
-
-       tonga_ps->performance_levels[1].engine_clock =
-               (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ?
-                             tonga_ps->performance_levels[1].engine_clock :
-                             tonga_ps->performance_levels[0].engine_clock;
-
-       if (disable_mclk_switching) {
-               if (mclk < tonga_ps->performance_levels[1].memory_clock)
-                       mclk = tonga_ps->performance_levels[1].memory_clock;
-
-               tonga_ps->performance_levels[0].memory_clock = mclk;
-               tonga_ps->performance_levels[1].memory_clock = mclk;
-       } else {
-               if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock)
-                       tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock;
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
-               for (i=0; i < tonga_ps->performance_level_count; i++) {
-                       tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
-                       tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
-                       tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
-                       tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
-               }
-       }
-
-       return 0;
-}
-
-int tonga_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
-       return sizeof(struct tonga_power_state);
-}
-
-static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
-       struct pp_power_state  *ps;
-       struct tonga_power_state  *tonga_ps;
-
-       if (hwmgr == NULL)
-               return -EINVAL;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
-       if (low)
-               return tonga_ps->performance_levels[0].memory_clock;
-       else
-               return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
-}
-
-static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
-       struct pp_power_state  *ps;
-       struct tonga_power_state  *tonga_ps;
-
-       if (hwmgr == NULL)
-               return -EINVAL;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
-       if (low)
-               return tonga_ps->performance_levels[0].engine_clock;
-       else
-               return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
-}
-
-static uint16_t tonga_get_current_pcie_speed(
-                                                  struct pp_hwmgr *hwmgr)
-{
-       uint32_t speed_cntl = 0;
-
-       speed_cntl = cgs_read_ind_register(hwmgr->device,
-                                                  CGS_IND_REG__PCIE,
-                                                  ixPCIE_LC_SPEED_CNTL);
-       return((uint16_t)PHM_GET_FIELD(speed_cntl,
-                       PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int tonga_get_current_pcie_lane_number(
-                                                  struct pp_hwmgr *hwmgr)
-{
-       uint32_t link_width;
-
-       link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
-                                                       CGS_IND_REG__PCIE,
-                                                 PCIE_LC_LINK_WIDTH_CNTL,
-                                                       LC_LINK_WIDTH_RD);
-
-       PP_ASSERT_WITH_CODE((7 >= link_width),
-                       "Invalid PCIe lane width!", return 0);
-
-       return decode_pcie_lane_width(link_width);
-}
-
-static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
-                                       struct pp_hw_power_state *hw_ps)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps;
-       ATOM_FIRMWARE_INFO_V2_2 *fw_info;
-       uint16_t size;
-       uint8_t frev, crev;
-       int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
-       /* First retrieve the Boot clocks and VDDC from the firmware info table.
-        * We assume here that fw_info is unchanged if this call fails.
-        */
-       fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
-                       hwmgr->device, index,
-                       &size, &frev, &crev);
-       if (!fw_info)
-               /* During a test, there is no firmware info table. */
-               return 0;
-
-       /* Patch the state. */
-       data->vbios_boot_state.sclk_bootup_value  = le32_to_cpu(fw_info->ulDefaultEngineClock);
-       data->vbios_boot_state.mclk_bootup_value  = le32_to_cpu(fw_info->ulDefaultMemoryClock);
-       data->vbios_boot_state.mvdd_bootup_value  = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
-       data->vbios_boot_state.vddc_bootup_value  = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
-       data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
-       data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr);
-       data->vbios_boot_state.pcie_lane_bootup_value =
-                       (uint16_t)tonga_get_current_pcie_lane_number(hwmgr);
-
-       /* set boot power state */
-       ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
-       ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
-       ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
-       ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
-       return 0;
-}
-
-static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
-               void *state, struct pp_power_state *power_state,
-               void *pp_table, uint32_t classification_flag)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       struct tonga_power_state  *tonga_ps =
-                       (struct tonga_power_state *)(&(power_state->hardware));
-
-       struct tonga_performance_level *performance_level;
-
-       ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
-
-       ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
-                       (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-
-       ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-                       (ATOM_Tonga_SCLK_Dependency_Table *)
-                       (((unsigned long)powerplay_table) +
-                       le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-
-       ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
-                       (ATOM_Tonga_MCLK_Dependency_Table *)
-                       (((unsigned long)powerplay_table) +
-                       le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
-       /* The following fields are not initialized here: id orderedList allStatesList */
-       power_state->classification.ui_label =
-                       (le16_to_cpu(state_entry->usClassification) &
-                       ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
-                       ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
-       power_state->classification.flags = classification_flag;
-       /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
-       power_state->classification.temporary_state = false;
-       power_state->classification.to_be_deleted = false;
-
-       power_state->validation.disallowOnDC =
-                       (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC));
-
-       power_state->pcie.lanes = 0;
-
-       power_state->display.disableFrameModulation = false;
-       power_state->display.limitRefreshrate = false;
-       power_state->display.enableVariBright =
-                       (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT));
-
-       power_state->validation.supportedPowerLevels = 0;
-       power_state->uvd_clocks.VCLK = 0;
-       power_state->uvd_clocks.DCLK = 0;
-       power_state->temperatures.min = 0;
-       power_state->temperatures.max = 0;
-
-       performance_level = &(tonga_ps->performance_levels
-                       [tonga_ps->performance_level_count++]);
-
-       PP_ASSERT_WITH_CODE(
-                       (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS),
-                       "Performance levels exceeds SMC limit!",
-                       return -1);
-
-       PP_ASSERT_WITH_CODE(
-                       (tonga_ps->performance_level_count <=
-                                       hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
-                       "Performance levels exceeds Driver limit!",
-                       return -1);
-
-       /* Performance levels are arranged from low to high. */
-       performance_level->memory_clock =
-                               le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk);
-
-       performance_level->engine_clock =
-                               le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk);
-
-       performance_level->pcie_gen = get_pcie_gen_support(
-                                                       data->pcie_gen_cap,
-                                            state_entry->ucPCIEGenLow);
-
-       performance_level->pcie_lane = get_pcie_lane_support(
-                                                   data->pcie_lane_cap,
-                                          state_entry->ucPCIELaneHigh);
-
-       performance_level =
-                       &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]);
-
-       performance_level->memory_clock =
-                               le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk);
-
-       performance_level->engine_clock =
-                               le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk);
-
-       performance_level->pcie_gen = get_pcie_gen_support(
-                                                       data->pcie_gen_cap,
-                                           state_entry->ucPCIEGenHigh);
-
-       performance_level->pcie_lane = get_pcie_lane_support(
-                                                   data->pcie_lane_cap,
-                                          state_entry->ucPCIELaneHigh);
-
-       return 0;
-}
-
-static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
-                   unsigned long entry_index, struct pp_power_state *ps)
-{
-       int result;
-       struct tonga_power_state *tonga_ps;
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-                                          table_info->vdd_dep_on_mclk;
-
-       ps->hardware.magic = PhwTonga_Magic;
-
-       tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
-
-       result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps,
-                       tonga_get_pp_table_entry_callback_func);
-
-       /* This is the earliest time we have all the dependency table and the VBIOS boot state
-        * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
-        * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
-        */
-       if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
-               if (dep_mclk_table->entries[0].clk !=
-                               data->vbios_boot_state.mclk_bootup_value)
-                       printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
-                                       "does not match VBIOS boot MCLK level");
-               if (dep_mclk_table->entries[0].vddci !=
-                               data->vbios_boot_state.vddci_bootup_value)
-                       printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
-                                       "does not match VBIOS boot VDDCI level");
-       }
-
-       /* set DC compatible flag if this state supports DC */
-       if (!ps->validation.disallowOnDC)
-               tonga_ps->dc_compatible = true;
-
-       if (ps->classification.flags & PP_StateClassificationFlag_ACPI)
-               data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen;
-       else if (ps->classification.flags & PP_StateClassificationFlag_Boot) {
-               if (data->bacos.best_match == 0xffff) {
-                       /* For V.I. use boot state as base BACO state */
-                       data->bacos.best_match = PP_StateClassificationFlag_Boot;
-                       data->bacos.performance_level = tonga_ps->performance_levels[0];
-               }
-       }
-
-       tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK;
-       tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK;
-
-       if (!result) {
-               uint32_t i;
-
-               switch (ps->classification.ui_label) {
-               case PP_StateUILabel_Performance:
-                       data->use_pcie_performance_levels = true;
-
-                       for (i = 0; i < tonga_ps->performance_level_count; i++) {
-                               if (data->pcie_gen_performance.max <
-                                               tonga_ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_performance.max =
-                                                       tonga_ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_gen_performance.min >
-                                               tonga_ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_performance.min =
-                                                       tonga_ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_lane_performance.max <
-                                               tonga_ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_performance.max =
-                                                       tonga_ps->performance_levels[i].pcie_lane;
-
-                               if (data->pcie_lane_performance.min >
-                                               tonga_ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_performance.min =
-                                                       tonga_ps->performance_levels[i].pcie_lane;
-                       }
-                       break;
-               case PP_StateUILabel_Battery:
-                       data->use_pcie_power_saving_levels = true;
-
-                       for (i = 0; i < tonga_ps->performance_level_count; i++) {
-                               if (data->pcie_gen_power_saving.max <
-                                               tonga_ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_power_saving.max =
-                                                       tonga_ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_gen_power_saving.min >
-                                               tonga_ps->performance_levels[i].pcie_gen)
-                                       data->pcie_gen_power_saving.min =
-                                                       tonga_ps->performance_levels[i].pcie_gen;
-
-                               if (data->pcie_lane_power_saving.max <
-                                               tonga_ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_power_saving.max =
-                                                       tonga_ps->performance_levels[i].pcie_lane;
-
-                               if (data->pcie_lane_power_saving.min >
-                                               tonga_ps->performance_levels[i].pcie_lane)
-                                       data->pcie_lane_power_saving.min =
-                                                       tonga_ps->performance_levels[i].pcie_lane;
-                       }
-                       break;
-               default:
-                       break;
-               }
-       }
-       return 0;
-}
-
-static void
-tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
-       uint32_t sclk, mclk, activity_percent;
-       uint32_t offset;
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
-
-       sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-       smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
-
-       mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-       seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n", mclk/100, sclk/100);
-
-       offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
-       activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
-       activity_percent += 0x80;
-       activity_percent >>= 8;
-
-       seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
-       seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
-       seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-       const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
-       uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
-       struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
-       uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
-       struct PP_Clocks min_clocks = {0};
-       uint32_t i;
-       struct cgs_display_info info = {0};
-
-       data->need_update_smu7_dpm_table = 0;
-
-       for (i = 0; i < psclk_table->count; i++) {
-               if (sclk == psclk_table->dpm_levels[i].value)
-                       break;
-       }
-
-       if (i >= psclk_table->count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-       else {
-       /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/
-               if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
-                       data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
-       }
-
-       for (i=0; i < pmclk_table->count; i++) {
-               if (mclk == pmclk_table->dpm_levels[i].value)
-                       break;
-       }
-
-       if (i >= pmclk_table->count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       if (data->display_timing.num_existing_displays != info.display_count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
-       return 0;
-}
-
-static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps)
-{
-       uint32_t i;
-       uint32_t sclk, max_sclk = 0;
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_dpm_table *pdpm_table = &data->dpm_table;
-
-       for (i = 0; i < hw_ps->performance_level_count; i++) {
-               sclk = hw_ps->performance_levels[i].engine_clock;
-               if (max_sclk < sclk)
-                       max_sclk = sclk;
-       }
-
-       for (i = 0; i < pdpm_table->sclk_table.count; i++) {
-               if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk)
-                       return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ?
-                                       pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value :
-                                       pdpm_table->pcie_speed_table.dpm_levels[i].value);
-       }
-
-       return 0;
-}
-
-static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
-       const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
-
-       uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps);
-       uint16_t current_link_speed;
-
-       if (data->force_pcie_gen == PP_PCIEGenInvalid)
-               current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps);
-       else
-               current_link_speed = data->force_pcie_gen;
-
-       data->force_pcie_gen = PP_PCIEGenInvalid;
-       data->pspp_notify_required = false;
-       if (target_link_speed > current_link_speed) {
-               switch(target_link_speed) {
-               case PP_PCIEGen3:
-                       if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
-                               break;
-                       data->force_pcie_gen = PP_PCIEGen2;
-                       if (current_link_speed == PP_PCIEGen2)
-                               break;
-               case PP_PCIEGen2:
-                       if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
-                               break;
-               default:
-                       data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr);
-                       break;
-               }
-       } else {
-               if (target_link_speed < current_link_speed)
-                       data->pspp_notify_required = true;
-       }
-
-       return 0;
-}
-
-static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if ((0 == data->sclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-               PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-                                   "Trying to freeze SCLK DPM when DPM is disabled",
-                       );
-               PP_ASSERT_WITH_CODE(
-                       0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                         PPSMC_MSG_SCLKDPM_FreezeLevel),
-                       "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
-                       return -1);
-       }
-
-       if ((0 == data->mclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-                DPMTABLE_OD_UPDATE_MCLK)) {
-               PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-                                   "Trying to freeze MCLK DPM when DPM is disabled",
-                       );
-               PP_ASSERT_WITH_CODE(
-                       0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                                       PPSMC_MSG_MCLKDPM_FreezeLevel),
-                       "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
-                       return -1);
-       }
-
-       return 0;
-}
-
-static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
-{
-       int result = 0;
-
-       const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-       const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
-       uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
-       struct tonga_dpm_table *pdpm_table = &data->dpm_table;
-
-       struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
-       uint32_t dpm_count, clock_percent;
-       uint32_t i;
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
-               pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
-                   phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-               /* Need to do calculation based on the golden DPM table
-                * as the Heatmap GPU Clock axis is also based on the default values
-                */
-                       PP_ASSERT_WITH_CODE(
-                               (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
-                               "Divide by 0!",
-                               return -1);
-                       dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
-                       for (i = dpm_count; i > 1; i--) {
-                               if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
-                                       clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
-                                                       pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
-
-                                       pdpm_table->sclk_table.dpm_levels[i].value =
-                                                       pgolden_dpm_table->sclk_table.dpm_levels[i].value +
-                                                       (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
-
-                               } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
-                                       clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
-                                                               pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
-
-                                       pdpm_table->sclk_table.dpm_levels[i].value =
-                                                       pgolden_dpm_table->sclk_table.dpm_levels[i].value -
-                                                       (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
-                               } else
-                                       pdpm_table->sclk_table.dpm_levels[i].value =
-                                                       pgolden_dpm_table->sclk_table.dpm_levels[i].value;
-                       }
-               }
-       }
-
-       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
-               pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
-                       PP_ASSERT_WITH_CODE(
-                                       (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
-                                       "Divide by 0!",
-                                       return -1);
-                       dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
-                       for (i = dpm_count; i > 1; i--) {
-                               if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
-                                               clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
-                                                                   pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
-
-                                               pdpm_table->mclk_table.dpm_levels[i].value =
-                                                                               pgolden_dpm_table->mclk_table.dpm_levels[i].value +
-                                                                               (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
-
-                               } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
-                                               clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
-                                                                   pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
-
-                                               pdpm_table->mclk_table.dpm_levels[i].value =
-                                                                       pgolden_dpm_table->mclk_table.dpm_levels[i].value -
-                                                                       (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
-                               } else
-                                       pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
-                       }
-               }
-       }
-
-       if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
-               result = tonga_populate_all_graphic_levels(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
-                       return result);
-       }
-
-       if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
-               /*populate MCLK dpm table to SMU7 */
-               result = tonga_populate_all_memory_levels(hwmgr);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
-                               return result);
-       }
-
-       return result;
-}
-
-static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
-                         struct tonga_single_dpm_table * pdpm_table,
-                            uint32_t low_limit, uint32_t high_limit)
-{
-       uint32_t i;
-
-       for (i = 0; i < pdpm_table->count; i++) {
-               if ((pdpm_table->dpm_levels[i].value < low_limit) ||
-                   (pdpm_table->dpm_levels[i].value > high_limit))
-                       pdpm_table->dpm_levels[i].enabled = false;
-               else
-                       pdpm_table->dpm_levels[i].enabled = true;
-       }
-       return 0;
-}
-
-static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       uint32_t high_limit_count;
-
-       PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
-                               "power state did not have any performance level",
-                                return -1);
-
-       high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
-
-       tonga_trim_single_dpm_states(hwmgr,
-                                       &(data->dpm_table.sclk_table),
-                                       hw_state->performance_levels[0].engine_clock,
-                                       hw_state->performance_levels[high_limit_count].engine_clock);
-
-       tonga_trim_single_dpm_states(hwmgr,
-                                               &(data->dpm_table.mclk_table),
-                                               hw_state->performance_levels[0].memory_clock,
-                                               hw_state->performance_levels[high_limit_count].memory_clock);
-
-       return 0;
-}
-
-static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
-{
-       int result;
-       const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-
-       result = tonga_trim_dpm_states(hwmgr, tonga_ps);
-       if (0 != result)
-               return result;
-
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
-       data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-       if (data->uvd_enabled)
-               data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
-
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
-       return 0;
-}
-
-int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
-                                 (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable :
-                                 (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable);
-}
-
-int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
-                                 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
-                                 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (!bgate) {
-               data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1);
-               mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0x00FFFFFF;
-               mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) ||
-                   phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                               PPSMC_MSG_UVDDPM_SetEnabledMask,
-                                               (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
-       }
-
-       return tonga_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
-       const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
-
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) {
-               data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1);
-
-               mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
-               mm_boot_level_offset /= 4;
-               mm_boot_level_offset *= 4;
-               mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
-               mm_boot_level_value &= 0xFF00FFFF;
-               mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)(1 << data->smc_state_table.VceBootLevel));
-
-               tonga_enable_disable_vce_dpm(hwmgr, true);
-       } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0)
-               tonga_enable_disable_vce_dpm(hwmgr, false);
-
-       return 0;
-}
-
-static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       uint32_t address;
-       int32_t result;
-
-       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
-               return 0;
-
-
-       memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters));
-
-       result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
-
-       if(result != 0)
-               return result;
-
-
-       address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
-
-       return  tonga_copy_bytes_to_smc(hwmgr->smumgr, address,
-                                (uint8_t *)&data->mc_reg_table.data[0],
-                               sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
-                               data->sram_end);
-}
-
-static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-               return tonga_program_memory_timing_parameters(hwmgr);
-
-       return 0;
-}
-
-static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (0 == data->need_update_smu7_dpm_table)
-               return 0;
-
-       if ((0 == data->sclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
-               PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-                                   "Trying to Unfreeze SCLK DPM when DPM is disabled",
-                       );
-               PP_ASSERT_WITH_CODE(
-                        0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                        PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-                       "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
-                       return -1);
-       }
-
-       if ((0 == data->mclk_dpm_key_disabled) &&
-               (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
-               PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-                                   "Trying to Unfreeze MCLK DPM when DPM is disabled",
-                               );
-               PP_ASSERT_WITH_CODE(
-                        0 == smum_send_msg_to_smc(hwmgr->smumgr,
-                                        PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-                   "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
-                   return -1);
-       }
-
-       data->need_update_smu7_dpm_table = 0;
-
-       return 0;
-}
-
-static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
-       const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-       uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps);
-       uint8_t  request;
-
-       if (data->pspp_notify_required  ||
-           data->pcie_performance_request) {
-               if (target_link_speed == PP_PCIEGen3)
-                       request = PCIE_PERF_REQ_GEN3;
-               else if (target_link_speed == PP_PCIEGen2)
-                       request = PCIE_PERF_REQ_GEN2;
-               else
-                       request = PCIE_PERF_REQ_GEN1;
-
-               if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) {
-                       data->pcie_performance_request = false;
-                       return 0;
-               }
-
-               if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
-                       if (PP_PCIEGen2 == target_link_speed)
-                               printk("PSPP request to switch to Gen2 from Gen3 Failed!");
-                       else
-                               printk("PSPP request to switch to Gen1 from Gen2 Failed!");
-               }
-       }
-
-       data->pcie_performance_request = false;
-       return 0;
-}
-
-static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
-       int tmp_result, result = 0;
-
-       tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
-               tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input);
-               PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
-       }
-
-       tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
-       tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
-
-       tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
-
-       tmp_result = tonga_update_vce_dpm(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
-
-       tmp_result = tonga_update_sclk_threshold(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
-
-       tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
-
-       tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
-
-       tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
-
-       tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
-               tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input);
-               PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
-       }
-
-       return result;
-}
-
-/**
-*  Set maximum target operating fan output PWM
-*
-* @param    pHwMgr:  the address of the powerplay hardware manager.
-* @param    usMaxFanPwm:  max operating fan PWM in percents
-* @return   The response that came from the SMC.
-*/
-static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
-       hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
-       if (phm_is_hw_access_blocked(hwmgr))
-               return 0;
-
-       return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
-}
-
-int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
-{
-       uint32_t num_active_displays = 0;
-       struct cgs_display_info info = {0};
-       info.mode_info = NULL;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       num_active_displays = info.display_count;
-
-       if (num_active_displays > 1)  /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
-               tonga_notify_smc_display_change(hwmgr, false);
-       else
-               tonga_notify_smc_display_change(hwmgr, true);
-
-       return 0;
-}
-
-/**
-* Programs the display gap
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always OK
-*/
-int tonga_program_display_gap(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       uint32_t num_active_displays = 0;
-       uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-       uint32_t display_gap2;
-       uint32_t pre_vbi_time_in_us;
-       uint32_t frame_time_in_us;
-       uint32_t ref_clock;
-       uint32_t refresh_rate = 0;
-       struct cgs_display_info info = {0};
-       struct cgs_mode_info mode_info;
-
-       info.mode_info = &mode_info;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-       num_active_displays = info.display_count;
-
-       display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-       ref_clock = mode_info.ref_clock;
-       refresh_rate = mode_info.refresh_rate;
-
-       if(0 == refresh_rate)
-               refresh_rate = 60;
-
-       frame_time_in_us = 1000000 / refresh_rate;
-
-       pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
-       display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64);
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
-       if (num_active_displays == 1)
-               tonga_notify_smc_display_change(hwmgr, true);
-
-       return 0;
-}
-
-int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
-
-       tonga_program_display_gap(hwmgr);
-
-       /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */
-       return 0;
-}
-
-/**
-*  Set maximum target operating fan output RPM
-*
-* @param    pHwMgr:  the address of the powerplay hardware manager.
-* @param    usMaxFanRpm:  max operating fan RPM value.
-* @return   The response that came from the SMC.
-*/
-static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
-       hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
-
-       if (phm_is_hw_access_blocked(hwmgr))
-               return 0;
-
-       return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
-}
-
-uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr)
-{
-       uint32_t reference_clock;
-       uint32_t tc;
-       uint32_t divide;
-
-       ATOM_FIRMWARE_INFO *fw_info;
-       uint16_t size;
-       uint8_t frev, crev;
-       int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
-       tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
-       if (tc)
-               return TCLK;
-
-       fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
-                                                 &size, &frev, &crev);
-
-       if (!fw_info)
-               return 0;
-
-       reference_clock = le16_to_cpu(fw_info->usReferenceClock);
-
-       divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
-       if (0 != divide)
-               return reference_clock / 4;
-
-       return reference_clock;
-}
-
-int tonga_dpm_set_interrupt_state(void *private_data,
-                                        unsigned src_id, unsigned type,
-                                        int enabled)
-{
-       uint32_t cg_thermal_int;
-       struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
-       if (hwmgr == NULL)
-               return -EINVAL;
-
-       switch (type) {
-       case AMD_THERMAL_IRQ_LOW_TO_HIGH:
-               if (enabled) {
-                       cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-                       cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-               } else {
-                       cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-                       cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-               }
-               break;
-
-       case AMD_THERMAL_IRQ_HIGH_TO_LOW:
-               if (enabled) {
-                       cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-                       cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-               } else {
-                       cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-                       cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
-                       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-               }
-               break;
-       default:
-               break;
-       }
-       return 0;
-}
-
-int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
-                                       const void *thermal_interrupt_info)
-{
-       int result;
-       const struct pp_interrupt_registration_info *info =
-                       (const struct pp_interrupt_registration_info *)thermal_interrupt_info;
-
-       if (info == NULL)
-               return -EINVAL;
-
-       result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
-                               tonga_dpm_set_interrupt_state,
-                               info->call_back, info->context);
-
-       if (result)
-               return -EINVAL;
-
-       result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
-                               tonga_dpm_set_interrupt_state,
-                               info->call_back, info->context);
-
-       if (result)
-               return -EINVAL;
-
-       return 0;
-}
-
-bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       bool is_update_required = false;
-       struct cgs_display_info info = {0,0,NULL};
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       if (data->display_timing.num_existing_displays != info.display_count)
-               is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
-       if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-               cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
-               if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
-                       is_update_required = true;
-*/
-       return is_update_required;
-}
-
-static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1,
-                                                          const struct tonga_performance_level *pl2)
-{
-       return ((pl1->memory_clock == pl2->memory_clock) &&
-                 (pl1->engine_clock == pl2->engine_clock) &&
-                 (pl1->pcie_gen == pl2->pcie_gen) &&
-                 (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
-       const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1);
-       const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2);
-       int i;
-
-       if (equal == NULL || psa == NULL || psb == NULL)
-               return -EINVAL;
-
-       /* If the two states don't even have the same number of performance levels they cannot be the same state. */
-       if (psa->performance_level_count != psb->performance_level_count) {
-               *equal = false;
-               return 0;
-       }
-
-       for (i = 0; i < psa->performance_level_count; i++) {
-               if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
-                       /* If we have found even one performance level pair that is different the states are different. */
-                       *equal = false;
-                       return 0;
-               }
-       }
-
-       /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
-       *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
-       *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
-       *equal &= (psa->sclk_threshold == psb->sclk_threshold);
-       *equal &= (psa->acp_clk == psb->acp_clk);
-
-       return 0;
-}
-
-static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-       if (mode) {
-               /* stop auto-manage */
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl))
-                       tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
-               tonga_fan_ctrl_set_static_mode(hwmgr, mode);
-       } else
-               /* restart auto-manage */
-               tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
-       return 0;
-}
-
-static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
-       if (hwmgr->fan_ctrl_is_in_default_mode)
-               return hwmgr->fan_ctrl_default_mode;
-       else
-               return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                               CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
-               enum pp_clock_type type, uint32_t mask)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-               return -EINVAL;
-
-       switch (type) {
-       case PP_SCLK:
-               if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
-               break;
-       case PP_MCLK:
-               if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
-               break;
-       case PP_PCIE:
-       {
-               uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-               uint32_t level = 0;
-
-               while (tmp >>= 1)
-                       level++;
-
-               if (!data->pcie_dpm_key_disabled)
-                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                       PPSMC_MSG_PCIeDPM_ForceLevel,
-                                       level);
-               break;
-       }
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
-               enum pp_clock_type type, char *buf)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-       struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-       struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
-       int i, now, size = 0;
-       uint32_t clock, pcie_speed;
-
-       switch (type) {
-       case PP_SCLK:
-               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-               for (i = 0; i < sclk_table->count; i++) {
-                       if (clock > sclk_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < sclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
-                                       i, sclk_table->dpm_levels[i].value / 100,
-                                       (i == now) ? "*" : "");
-               break;
-       case PP_MCLK:
-               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-               for (i = 0; i < mclk_table->count; i++) {
-                       if (clock > mclk_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < mclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
-                                       i, mclk_table->dpm_levels[i].value / 100,
-                                       (i == now) ? "*" : "");
-               break;
-       case PP_PCIE:
-               pcie_speed = tonga_get_current_pcie_speed(hwmgr);
-               for (i = 0; i < pcie_table->count; i++) {
-                       if (pcie_speed != pcie_table->dpm_levels[i].value)
-                               continue;
-                       break;
-               }
-               now = i;
-
-               for (i = 0; i < pcie_table->count; i++)
-                       size += sprintf(buf + size, "%d: %s %s\n", i,
-                                       (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
-                                       (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
-                                       (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
-                                       (i == now) ? "*" : "");
-               break;
-       default:
-               break;
-       }
-       return size;
-}
-
-static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-       struct tonga_single_dpm_table *golden_sclk_table =
-                       &(data->golden_dpm_table.sclk_table);
-       int value;
-
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-       return value;
-}
-
-static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_single_dpm_table *golden_sclk_table =
-                       &(data->golden_dpm_table.sclk_table);
-       struct pp_power_state  *ps;
-       struct tonga_power_state  *tonga_ps;
-
-       if (value > 20)
-               value = 20;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
-       tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
-                       value / 100 +
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-       return 0;
-}
-
-static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-       struct tonga_single_dpm_table *golden_mclk_table =
-                       &(data->golden_dpm_table.mclk_table);
-       int value;
-
-       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-       return value;
-}
-
-static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       struct tonga_single_dpm_table *golden_mclk_table =
-                       &(data->golden_dpm_table.mclk_table);
-       struct pp_power_state  *ps;
-       struct tonga_power_state  *tonga_ps;
-
-       if (value > 20)
-               value = 20;
-
-       ps = hwmgr->request_ps;
-
-       if (ps == NULL)
-               return -EINVAL;
-
-       tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
-       tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
-                       value / 100 +
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-       return 0;
-}
-
-static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
-       .backend_init = &tonga_hwmgr_backend_init,
-       .backend_fini = &tonga_hwmgr_backend_fini,
-       .asic_setup = &tonga_setup_asic_task,
-       .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
-       .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
-       .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
-       .force_dpm_level = &tonga_force_dpm_level,
-       .power_state_set = tonga_set_power_state_tasks,
-       .get_power_state_size = tonga_get_power_state_size,
-       .get_mclk = tonga_dpm_get_mclk,
-       .get_sclk = tonga_dpm_get_sclk,
-       .patch_boot_state = tonga_dpm_patch_boot_state,
-       .get_pp_table_entry = tonga_get_pp_table_entry,
-       .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
-       .print_current_perforce_level = tonga_print_current_perforce_level,
-       .powerdown_uvd = tonga_phm_powerdown_uvd,
-       .powergate_uvd = tonga_phm_powergate_uvd,
-       .powergate_vce = tonga_phm_powergate_vce,
-       .disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
-       .update_clock_gatings = tonga_phm_update_clock_gatings,
-       .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
-       .display_config_changed = tonga_display_configuration_changed_task,
-       .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
-       .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output,
-       .get_temperature = tonga_thermal_get_temperature,
-       .stop_thermal_controller = tonga_thermal_stop_thermal_controller,
-       .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info,
-       .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent,
-       .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent,
-       .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default,
-       .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm,
-       .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm,
-       .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller,
-       .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt,
-       .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration,
-       .check_states_equal = tonga_check_states_equal,
-       .set_fan_control_mode = tonga_set_fan_control_mode,
-       .get_fan_control_mode = tonga_get_fan_control_mode,
-       .force_clock_level = tonga_force_clock_level,
-       .print_clock_levels = tonga_print_clock_levels,
-       .get_sclk_od = tonga_get_sclk_od,
-       .set_sclk_od = tonga_set_sclk_od,
-       .get_mclk_od = tonga_get_mclk_od,
-       .set_mclk_od = tonga_set_mclk_od,
-};
-
-int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
-       hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
-       hwmgr->pptable_func = &tonga_pptable_funcs;
-       pp_tonga_thermal_initialize(hwmgr);
-       return 0;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
deleted file mode 100644 (file)
index 3961884..0000000
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_HWMGR_H
-#define TONGA_HWMGR_H
-
-#include "hwmgr.h"
-#include "smu72_discrete.h"
-#include "ppatomctrl.h"
-#include "ppinterrupt.h"
-#include "tonga_powertune.h"
-#include "pp_endian.h"
-
-#define TONGA_MAX_HARDWARE_POWERLEVELS 2
-#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
-
-struct tonga_performance_level {
-       uint32_t        memory_clock;
-       uint32_t        engine_clock;
-       uint16_t    pcie_gen;
-       uint16_t    pcie_lane;
-};
-
-struct _phw_tonga_bacos {
-       uint32_t                          best_match;
-       uint32_t                          baco_flags;
-       struct tonga_performance_level            performance_level;
-};
-typedef struct _phw_tonga_bacos phw_tonga_bacos;
-
-struct _phw_tonga_uvd_clocks {
-       uint32_t   VCLK;
-       uint32_t   DCLK;
-};
-
-typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks;
-
-struct _phw_tonga_vce_clocks {
-       uint32_t   EVCLK;
-       uint32_t   ECCLK;
-};
-
-typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks;
-
-struct tonga_power_state {
-       uint32_t                    magic;
-       phw_tonga_uvd_clocks        uvd_clocks;
-       phw_tonga_vce_clocks        vce_clocks;
-       uint32_t                    sam_clk;
-       uint32_t                    acp_clk;
-       uint16_t                    performance_level_count;
-       bool                        dc_compatible;
-       uint32_t                    sclk_threshold;
-       struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct _phw_tonga_dpm_level {
-       bool            enabled;
-       uint32_t    value;
-       uint32_t    param1;
-};
-typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level;
-
-#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define TONGA_MINIMUM_ENGINE_CLOCK 2500
-
-struct tonga_single_dpm_table {
-       uint32_t count;
-       phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct tonga_dpm_table {
-       struct tonga_single_dpm_table  sclk_table;
-       struct tonga_single_dpm_table  mclk_table;
-       struct tonga_single_dpm_table  pcie_speed_table;
-       struct tonga_single_dpm_table  vddc_table;
-       struct tonga_single_dpm_table  vdd_gfx_table;
-       struct tonga_single_dpm_table  vdd_ci_table;
-       struct tonga_single_dpm_table  mvdd_table;
-};
-typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table;
-
-
-struct _phw_tonga_clock_regisiters {
-       uint32_t  vCG_SPLL_FUNC_CNTL;
-       uint32_t  vCG_SPLL_FUNC_CNTL_2;
-       uint32_t  vCG_SPLL_FUNC_CNTL_3;
-       uint32_t  vCG_SPLL_FUNC_CNTL_4;
-       uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t  vDLL_CNTL;
-       uint32_t  vMCLK_PWRMGT_CNTL;
-       uint32_t  vMPLL_AD_FUNC_CNTL;
-       uint32_t  vMPLL_DQ_FUNC_CNTL;
-       uint32_t  vMPLL_FUNC_CNTL;
-       uint32_t  vMPLL_FUNC_CNTL_1;
-       uint32_t  vMPLL_FUNC_CNTL_2;
-       uint32_t  vMPLL_SS1;
-       uint32_t  vMPLL_SS2;
-};
-typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers;
-
-struct _phw_tonga_voltage_smio_registers {
-       uint32_t vs0_vid_lower_smio_cntl;
-};
-typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers;
-
-
-struct _phw_tonga_mc_reg_entry {
-       uint32_t mclk_max;
-       uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry;
-
-struct _phw_tonga_mc_reg_table {
-       uint8_t   last;               /* number of registers*/
-       uint8_t   num_entries;        /* number of entries in mc_reg_table_entry used*/
-       uint16_t  validflag;          /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
-       phw_tonga_mc_reg_entry    mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
-       SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table;
-
-#define DISABLE_MC_LOADMICROCODE   1
-#define DISABLE_MC_CFGPROGRAMMING  2
-
-/*Ultra Low Voltage parameter structure */
-struct _phw_tonga_ulv_parm{
-       bool    ulv_supported;
-       uint32_t   ch_ulv_parameter;
-       uint32_t   ulv_volt_change_delay;
-       struct tonga_performance_level   ulv_power_level;
-};
-typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm;
-
-#define TONGA_MAX_LEAKAGE_COUNT  8
-
-struct _phw_tonga_leakage_voltage {
-       uint16_t  count;
-       uint16_t  leakage_id[TONGA_MAX_LEAKAGE_COUNT];
-       uint16_t  actual_voltage[TONGA_MAX_LEAKAGE_COUNT];
-};
-typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage;
-
-struct _phw_tonga_display_timing {
-       uint32_t min_clock_insr;
-       uint32_t num_existing_displays;
-};
-typedef struct _phw_tonga_display_timing phw_tonga_display_timing;
-
-struct _phw_tonga_dpmlevel_enable_mask {
-       uint32_t uvd_dpm_enable_mask;
-       uint32_t vce_dpm_enable_mask;
-       uint32_t acp_dpm_enable_mask;
-       uint32_t samu_dpm_enable_mask;
-       uint32_t sclk_dpm_enable_mask;
-       uint32_t mclk_dpm_enable_mask;
-       uint32_t pcie_dpm_enable_mask;
-};
-typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask;
-
-struct _phw_tonga_pcie_perf_range {
-       uint16_t max;
-       uint16_t min;
-};
-typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range;
-
-struct _phw_tonga_vbios_boot_state {
-       uint16_t                                        mvdd_bootup_value;
-       uint16_t                                        vddc_bootup_value;
-       uint16_t                                        vddci_bootup_value;
-       uint16_t                                        vddgfx_bootup_value;
-       uint32_t                                        sclk_bootup_value;
-       uint32_t                                        mclk_bootup_value;
-       uint16_t                                        pcie_gen_bootup_value;
-       uint16_t                                        pcie_lane_bootup_value;
-};
-typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state;
-
-#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
-#define DPMTABLE_UPDATE_SCLK        0x00000004
-#define DPMTABLE_UPDATE_MCLK        0x00000008
-
-/* We need to review which fields are needed. */
-/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
-struct tonga_hwmgr {
-       struct tonga_dpm_table               dpm_table;
-       struct tonga_dpm_table               golden_dpm_table;
-
-       uint32_t                           voting_rights_clients0;
-       uint32_t                           voting_rights_clients1;
-       uint32_t                           voting_rights_clients2;
-       uint32_t                           voting_rights_clients3;
-       uint32_t                           voting_rights_clients4;
-       uint32_t                           voting_rights_clients5;
-       uint32_t                           voting_rights_clients6;
-       uint32_t                           voting_rights_clients7;
-       uint32_t                           static_screen_threshold_unit;
-       uint32_t                           static_screen_threshold;
-       uint32_t                           voltage_control;
-       uint32_t                           vdd_gfx_control;
-
-       uint32_t                           vddc_vddci_delta;
-       uint32_t                           vddc_vddgfx_delta;
-
-       struct pp_interrupt_registration_info    internal_high_thermal_interrupt_info;
-       struct pp_interrupt_registration_info    internal_low_thermal_interrupt_info;
-       struct pp_interrupt_registration_info    smc_to_host_interrupt_info;
-       uint32_t                          active_auto_throttle_sources;
-
-       struct pp_interrupt_registration_info    external_throttle_interrupt;
-       irq_handler_func_t             external_throttle_callback;
-       void                             *external_throttle_context;
-
-       struct pp_interrupt_registration_info    ctf_interrupt_info;
-       irq_handler_func_t             ctf_callback;
-       void                             *ctf_context;
-
-       phw_tonga_clock_registers         clock_registers;
-       phw_tonga_voltage_smio_registers  voltage_smio_registers;
-
-       bool    is_memory_GDDR5;
-       uint16_t                          acpi_vddc;
-       bool    pspp_notify_required;        /* Flag to indicate if PSPP notification to SBIOS is required */
-       uint16_t                          force_pcie_gen;            /* The forced PCI-E speed if not 0xffff */
-       uint16_t                          acpi_pcie_gen;             /* The PCI-E speed at ACPI time */
-       uint32_t                           pcie_gen_cap;             /* The PCI-E speed capabilities bitmap from CAIL */
-       uint32_t                           pcie_lane_cap;            /* The PCI-E lane capabilities bitmap from CAIL */
-       uint32_t                           pcie_spc_cap;             /* Symbol Per Clock Capabilities from registry */
-       phw_tonga_leakage_voltage       vddc_leakage;            /* The Leakage VDDC supported (based on leakage ID).*/
-       phw_tonga_leakage_voltage       vddcgfx_leakage;         /* The Leakage VDDC supported (based on leakage ID). */
-       phw_tonga_leakage_voltage       vddci_leakage;           /* The Leakage VDDCI supported (based on leakage ID). */
-
-       uint32_t                           mvdd_control;
-       uint32_t                           vddc_mask_low;
-       uint32_t                           mvdd_mask_low;
-       uint16_t                          max_vddc_in_pp_table;        /* the maximum VDDC value in the powerplay table*/
-       uint16_t                          min_vddc_in_pp_table;
-       uint16_t                          max_vddci_in_pp_table;       /* the maximum VDDCI value in the powerplay table */
-       uint16_t                          min_vddci_in_pp_table;
-       uint32_t                           mclk_strobe_mode_threshold;
-       uint32_t                           mclk_stutter_mode_threshold;
-       uint32_t                           mclk_edc_enable_threshold;
-       uint32_t                           mclk_edc_wr_enable_threshold;
-       bool    is_uvd_enabled;
-       bool    is_xdma_enabled;
-       phw_tonga_vbios_boot_state      vbios_boot_state;
-
-       bool                         battery_state;
-       bool                         is_tlu_enabled;
-       bool                         pcie_performance_request;
-
-       /* -------------- SMC SRAM Address of firmware header tables ----------------*/
-       uint32_t                           sram_end;                 /* The first address after the SMC SRAM. */
-       uint32_t                           dpm_table_start;          /* The start of the dpm table in the SMC SRAM. */
-       uint32_t                           soft_regs_start;          /* The start of the soft registers in the SMC SRAM. */
-       uint32_t                           mc_reg_table_start;       /* The start of the mc register table in the SMC SRAM. */
-       uint32_t                           fan_table_start;          /* The start of the fan table in the SMC SRAM. */
-       uint32_t                           arb_table_start;          /* The start of the ARB setting table in the SMC SRAM. */
-       SMU72_Discrete_DpmTable         smc_state_table;             /* The carbon copy of the SMC state table. */
-       SMU72_Discrete_MCRegisters      mc_reg_table;
-       SMU72_Discrete_Ulv              ulv_setting;                 /* The carbon copy of ULV setting. */
-       /* -------------- Stuff originally coming from Evergreen --------------------*/
-       phw_tonga_mc_reg_table                  tonga_mc_reg_table;
-       uint32_t                         vdd_ci_control;
-       pp_atomctrl_voltage_table        vddc_voltage_table;
-       pp_atomctrl_voltage_table        vddci_voltage_table;
-       pp_atomctrl_voltage_table        vddgfx_voltage_table;
-       pp_atomctrl_voltage_table        mvdd_voltage_table;
-
-       uint32_t                           mgcg_cgtt_local2;
-       uint32_t                           mgcg_cgtt_local3;
-       uint32_t                           gpio_debug;
-       uint32_t                                                        mc_micro_code_feature;
-       uint32_t                                                        highest_mclk;
-       uint16_t                          acpi_vdd_ci;
-       uint8_t                           mvdd_high_index;
-       uint8_t                           mvdd_low_index;
-       bool                         dll_defaule_on;
-       bool                         performance_request_registered;
-
-       /* ----------------- Low Power Features ---------------------*/
-       phw_tonga_bacos                                 bacos;
-       phw_tonga_ulv_parm              ulv;
-       /* ----------------- CAC Stuff ---------------------*/
-       uint32_t                                        cac_table_start;
-       bool                         cac_configuration_required;    /* TRUE if PP_CACConfigurationRequired == 1 */
-       bool                         driver_calculate_cac_leakage;  /* TRUE if PP_DriverCalculateCACLeakage == 1 */
-       bool                         cac_enabled;
-       /* ----------------- DPM2 Parameters ---------------------*/
-       uint32_t                                        power_containment_features;
-       bool                         enable_bapm_feature;
-       bool                         enable_tdc_limit_feature;
-       bool                         enable_pkg_pwr_tracking_feature;
-       bool                         disable_uvd_power_tune_feature;
-       phw_tonga_pt_defaults           *power_tune_defaults;
-       SMU72_Discrete_PmFuses           power_tune_table;
-       uint32_t                           ul_dte_tj_offset;             /* Fudge factor in DPM table to correct HW DTE errors */
-       uint32_t                           fast_watemark_threshold;      /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
-
-       /* ----------------- Phase Shedding ---------------------*/
-       bool                         vddc_phase_shed_control;
-       /* --------------------- DI/DT --------------------------*/
-       phw_tonga_display_timing       display_timing;
-       /* --------- ReadRegistry data for memory and engine clock margins ---- */
-       uint32_t                           engine_clock_data;
-       uint32_t                           memory_clock_data;
-       /* -------- Thermal Temperature Setting --------------*/
-       phw_tonga_dpmlevel_enable_mask     dpm_level_enable_mask;
-       uint32_t                           need_update_smu7_dpm_table;
-       uint32_t                           sclk_dpm_key_disabled;
-       uint32_t                           mclk_dpm_key_disabled;
-       uint32_t                           pcie_dpm_key_disabled;
-       uint32_t                           min_engine_clocks; /* used to store the previous dal min sclock */
-       phw_tonga_pcie_perf_range       pcie_gen_performance;
-       phw_tonga_pcie_perf_range       pcie_lane_performance;
-       phw_tonga_pcie_perf_range       pcie_gen_power_saving;
-       phw_tonga_pcie_perf_range       pcie_lane_power_saving;
-       bool                            use_pcie_performance_levels;
-       bool                            use_pcie_power_saving_levels;
-       uint32_t                           activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */
-       uint32_t                           mclk_activity_target;
-       uint32_t                           low_sclk_interrupt_threshold;
-       uint32_t                           last_mclk_dpm_enable_mask;
-       bool                                                            uvd_enabled;
-       uint32_t                           pcc_monitor_enabled;
-
-       /* --------- Power Gating States ------------*/
-       bool                           uvd_power_gated;  /* 1: gated, 0:not gated */
-       bool                           vce_power_gated;  /* 1: gated, 0:not gated */
-       bool                           samu_power_gated; /* 1: gated, 0:not gated */
-       bool                           acp_power_gated;  /* 1: gated, 0:not gated */
-       bool                           pg_acp_init;
-};
-
-typedef struct tonga_hwmgr tonga_hwmgr;
-
-#define TONGA_DPM2_NEAR_TDP_DEC          10
-#define TONGA_DPM2_ABOVE_SAFE_INC        5
-#define TONGA_DPM2_BELOW_SAFE_INC        20
-
-#define TONGA_DPM2_LTA_WINDOW_SIZE       7  /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */
-
-#define TONGA_DPM2_LTS_TRUNCATE          0
-
-#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT            80  /* Maximum 100 */
-
-#define TONGA_DPM2_MAXPS_PERCENT_H                   90  /* Maximum 0xFF */
-#define TONGA_DPM2_MAXPS_PERCENT_M                   90  /* Maximum 0xFF */
-
-#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN         50
-
-#define TONGA_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
-#define TONGA_DPM2_SQ_RAMP_MIN_POWER                 0x12
-#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
-#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE  0x1E
-#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO  0xF
-
-#define TONGA_VOLTAGE_CONTROL_NONE                   0x0
-#define TONGA_VOLTAGE_CONTROL_BY_GPIO                0x1
-#define TONGA_VOLTAGE_CONTROL_BY_SVID2               0x2
-#define TONGA_VOLTAGE_CONTROL_MERGED                 0x3
-
-#define TONGA_Q88_FORMAT_CONVERSION_UNIT             256 /*To convert to Q8.8 format for firmware */
-
-#define TONGA_UNUSED_GPIO_PIN                        0x7F
-
-int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
deleted file mode 100644 (file)
index 8e6670b..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_POWERTUNE_H
-#define TONGA_POWERTUNE_H
-
-enum _phw_tonga_ptc_config_reg_type {
-       TONGA_CONFIGREG_MMR = 0,
-       TONGA_CONFIGREG_SMC_IND,
-       TONGA_CONFIGREG_DIDT_IND,
-       TONGA_CONFIGREG_CACHE,
-
-       TONGA_CONFIGREG_MAX
-};
-typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type;
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_BAPM            0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
-
-struct _phw_tonga_pt_config_reg {
-       uint32_t                           Offset;
-       uint32_t                           Mask;
-       uint32_t                           Shift;
-       uint32_t                           Value;
-       phw_tonga_ptc_config_reg_type     Type;
-};
-typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg;
-
-struct _phw_tonga_pt_defaults {
-       uint8_t   svi_load_line_en;
-       uint8_t   svi_load_line_vddC;
-       uint8_t   tdc_vddc_throttle_release_limit_perc;
-       uint8_t   tdc_mawt;
-       uint8_t   tdc_waterfall_ctl;
-       uint8_t   dte_ambient_temp_base;
-       uint32_t  display_cac;
-       uint32_t  bamp_temp_gradient;
-       uint16_t  bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
-       uint16_t  bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
-};
-typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults;
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
deleted file mode 100644 (file)
index f127198..0000000
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_PPTABLE_H
-#define TONGA_PPTABLE_H
-
-/** \file
- * This is a PowerPlay table header file
- */
-#pragma pack(push, 1)
-
-#include "hwmgr.h"
-
-#define ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
-#define ATOM_TONGA_PP_FANPARAMETERS_NOFAN                                 0x80    /* No fan is connected to this controller. */
-
-#define ATOM_TONGA_PP_THERMALCONTROLLER_NONE      0
-#define ATOM_TONGA_PP_THERMALCONTROLLER_LM96163   17
-#define ATOM_TONGA_PP_THERMALCONTROLLER_TONGA     21
-#define ATOM_TONGA_PP_THERMALCONTROLLER_FIJI      22
-
-/*
- * Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
- * We probably should reserve the bit 0x80 for this use.
- * To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
- * The driver can pick the correct internal controller based on the ASIC.
- */
-
-#define ATOM_TONGA_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    /* ADT7473 Fan Control + Internal Thermal Controller */
-#define ATOM_TONGA_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    /* EMC2103 Fan Control + Internal Thermal Controller */
-
-/*/* ATOM_TONGA_POWERPLAYTABLE::ulPlatformCaps */
-#define ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL              0x1            /* This cap indicates whether vddgfx will be a separated power rail. */
-#define ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY                   0x2            /* This cap indicates whether this is a mobile part and CCC need to show Powerplay page. */
-#define ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE            0x4            /* This cap indicates whether power source notificaiton is done by SBIOS directly. */
-#define ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND      0x8            /* Enable the option to overwrite voltage island feature to be disabled, regardless of VddGfx power rail support. */
-#define ____RETIRE16____                                0x10
-#define ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC                 0x20            /* This cap indicates whether power source notificaiton is done by GPIO directly. */
-#define ____RETIRE64____                                0x40
-#define ____RETIRE128____                               0x80
-#define ____RETIRE256____                              0x100
-#define ____RETIRE512____                              0x200
-#define ____RETIRE1024____                             0x400
-#define ____RETIRE2048____                             0x800
-#define ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL             0x1000            /* This cap indicates dynamic MVDD is required. Uncheck to disable it. */
-#define ____RETIRE2000____                            0x2000
-#define ____RETIRE4000____                            0x4000
-#define ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL            0x8000            /* This cap indicates dynamic VDDCI is required. Uncheck to disable it. */
-#define ____RETIRE10000____                          0x10000
-#define ATOM_TONGA_PP_PLATFORM_CAP_BACO                    0x20000            /* Enable to indicate the driver supports BACO state. */
-
-#define ATOM_TONGA_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17         0x100000     /* Enable to indicate the driver supports thermal2GPIO17. */
-#define ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL  0x1000000     /* Enable to indicate if thermal and PCC are sharing the same GPIO */
-#define ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE       0x2000000
-
-/* ATOM_PPLIB_NONCLOCK_INFO::usClassification */
-#define ATOM_PPLIB_CLASSIFICATION_UI_MASK               0x0007
-#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT              0
-#define ATOM_PPLIB_CLASSIFICATION_UI_NONE               0
-#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY            1
-#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED           3
-#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE        5
-/* 2, 4, 6, 7 are reserved */
-
-#define ATOM_PPLIB_CLASSIFICATION_BOOT                  0x0008
-#define ATOM_PPLIB_CLASSIFICATION_THERMAL               0x0010
-#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE    0x0020
-#define ATOM_PPLIB_CLASSIFICATION_REST                  0x0040
-#define ATOM_PPLIB_CLASSIFICATION_FORCED                0x0080
-#define ATOM_PPLIB_CLASSIFICATION_ACPI                  0x1000
-
-/* ATOM_PPLIB_NONCLOCK_INFO::usClassification2 */
-#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
-
-#define ATOM_Tonga_DISALLOW_ON_DC                       0x00004000
-#define ATOM_Tonga_ENABLE_VARIBRIGHT                    0x00008000
-
-#define ATOM_Tonga_TABLE_REVISION_TONGA                 7
-
-typedef struct _ATOM_Tonga_POWERPLAYTABLE {
-       ATOM_COMMON_TABLE_HEADER sHeader;
-
-       UCHAR  ucTableRevision;
-       USHORT usTableSize;                                             /*the size of header structure */
-
-       ULONG   ulGoldenPPID;
-       ULONG   ulGoldenRevision;
-       USHORT  usFormatID;
-
-       USHORT  usVoltageTime;                                   /*in microseconds */
-       ULONG   ulPlatformCaps;                                   /*See ATOM_Tonga_CAPS_* */
-
-       ULONG   ulMaxODEngineClock;                        /*For Overdrive.  */
-       ULONG   ulMaxODMemoryClock;                        /*For Overdrive. */
-
-       USHORT  usPowerControlLimit;
-       USHORT  usUlvVoltageOffset;                               /*in mv units */
-
-       USHORT  usStateArrayOffset;                               /*points to ATOM_Tonga_State_Array */
-       USHORT  usFanTableOffset;                                 /*points to ATOM_Tonga_Fan_Table */
-       USHORT  usThermalControllerOffset;                 /*points to ATOM_Tonga_Thermal_Controller */
-       USHORT  usReserv;                                                  /*CustomThermalPolicy removed for Tonga. Keep this filed as reserved. */
-
-       USHORT  usMclkDependencyTableOffset;       /*points to ATOM_Tonga_MCLK_Dependency_Table */
-       USHORT  usSclkDependencyTableOffset;       /*points to ATOM_Tonga_SCLK_Dependency_Table */
-       USHORT  usVddcLookupTableOffset;                   /*points to ATOM_Tonga_Voltage_Lookup_Table */
-       USHORT  usVddgfxLookupTableOffset;              /*points to ATOM_Tonga_Voltage_Lookup_Table */
-
-       USHORT  usMMDependencyTableOffset;                /*points to ATOM_Tonga_MM_Dependency_Table */
-
-       USHORT  usVCEStateTableOffset;                     /*points to ATOM_Tonga_VCE_State_Table; */
-
-       USHORT  usPPMTableOffset;                                 /*points to ATOM_Tonga_PPM_Table */
-       USHORT  usPowerTuneTableOffset;                   /*points to ATOM_PowerTune_Table */
-
-       USHORT  usHardLimitTableOffset;                    /*points to ATOM_Tonga_Hard_Limit_Table */
-
-       USHORT  usPCIETableOffset;                                /*points to ATOM_Tonga_PCIE_Table */
-
-       USHORT  usGPIOTableOffset;                                /*points to ATOM_Tonga_GPIO_Table */
-
-       USHORT  usReserved[6];                                     /*TODO: modify reserved size to fit structure aligning */
-} ATOM_Tonga_POWERPLAYTABLE;
-
-typedef struct _ATOM_Tonga_State {
-       UCHAR  ucEngineClockIndexHigh;
-       UCHAR  ucEngineClockIndexLow;
-
-       UCHAR  ucMemoryClockIndexHigh;
-       UCHAR  ucMemoryClockIndexLow;
-
-       UCHAR  ucPCIEGenLow;
-       UCHAR  ucPCIEGenHigh;
-
-       UCHAR  ucPCIELaneLow;
-       UCHAR  ucPCIELaneHigh;
-
-       USHORT usClassification;
-       ULONG ulCapsAndSettings;
-       USHORT usClassification2;
-       UCHAR  ucUnused[4];
-} ATOM_Tonga_State;
-
-typedef struct _ATOM_Tonga_State_Array {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;             /* Number of entries. */
-       ATOM_Tonga_State states[1];     /* Dynamically allocate entries. */
-} ATOM_Tonga_State_Array;
-
-typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
-       UCHAR  ucVddcInd;       /* Vddc voltage */
-       USHORT usVddci;
-       USHORT usVddgfxOffset;  /* Offset relative to Vddc voltage */
-       USHORT usMvdd;
-       ULONG ulMclk;
-       USHORT usReserved;
-} ATOM_Tonga_MCLK_Dependency_Record;
-
-typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_MCLK_Dependency_Record entries[1];                           /* Dynamically allocate entries. */
-} ATOM_Tonga_MCLK_Dependency_Table;
-
-typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
-       UCHAR  ucVddInd;                                                                                        /* Base voltage */
-       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
-       ULONG ulSclk;
-       USHORT usEdcCurrent;
-       UCHAR  ucReliabilityTemperature;
-       UCHAR  ucCKSVOffsetandDisable;                                                    /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
-} ATOM_Tonga_SCLK_Dependency_Record;
-
-typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_SCLK_Dependency_Record entries[1];                            /* Dynamically allocate entries. */
-} ATOM_Tonga_SCLK_Dependency_Table;
-
-typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
-       UCHAR  ucVddInd;                                                                                        /* Base voltage */
-       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
-       ULONG ulSclk;
-       USHORT usEdcCurrent;
-       UCHAR  ucReliabilityTemperature;
-       UCHAR  ucCKSVOffsetandDisable;                  /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
-       ULONG  ulSclkOffset;
-} ATOM_Polaris_SCLK_Dependency_Record;
-
-typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                     /* Number of entries. */
-       ATOM_Polaris_SCLK_Dependency_Record entries[1];                          /* Dynamically allocate entries. */
-} ATOM_Polaris_SCLK_Dependency_Table;
-
-typedef struct _ATOM_Tonga_PCIE_Record {
-       UCHAR ucPCIEGenSpeed;
-       UCHAR usPCIELaneWidth;
-       UCHAR ucReserved[2];
-} ATOM_Tonga_PCIE_Record;
-
-typedef struct _ATOM_Tonga_PCIE_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_PCIE_Record entries[1];                                                      /* Dynamically allocate entries. */
-} ATOM_Tonga_PCIE_Table;
-
-typedef struct _ATOM_Polaris10_PCIE_Record {
-       UCHAR ucPCIEGenSpeed;
-       UCHAR usPCIELaneWidth;
-       UCHAR ucReserved[2];
-       ULONG ulPCIE_Sclk;
-} ATOM_Polaris10_PCIE_Record;
-
-typedef struct _ATOM_Polaris10_PCIE_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                         /* Number of entries. */
-       ATOM_Polaris10_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
-} ATOM_Polaris10_PCIE_Table;
-
-
-typedef struct _ATOM_Tonga_MM_Dependency_Record {
-       UCHAR   ucVddcInd;                                                                                       /* VDDC voltage */
-       USHORT  usVddgfxOffset;                                                                   /* Offset relative to VDDC voltage */
-       ULONG  ulDClk;                                                                                          /* UVD D-clock */
-       ULONG  ulVClk;                                                                                          /* UVD V-clock */
-       ULONG  ulEClk;                                                                                          /* VCE clock */
-       ULONG  ulAClk;                                                                                          /* ACP clock */
-       ULONG  ulSAMUClk;                                                                                       /* SAMU clock */
-} ATOM_Tonga_MM_Dependency_Record;
-
-typedef struct _ATOM_Tonga_MM_Dependency_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_MM_Dependency_Record entries[1];                        /* Dynamically allocate entries. */
-} ATOM_Tonga_MM_Dependency_Table;
-
-typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
-       USHORT usVdd;                                                                                      /* Base voltage */
-       USHORT usCACLow;
-       USHORT usCACMid;
-       USHORT usCACHigh;
-} ATOM_Tonga_Voltage_Lookup_Record;
-
-typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_Voltage_Lookup_Record entries[1];                            /* Dynamically allocate entries. */
-} ATOM_Tonga_Voltage_Lookup_Table;
-
-typedef struct _ATOM_Tonga_Fan_Table {
-       UCHAR   ucRevId;                                                 /* Change this if the table format changes or version changes so that the other fields are not the same. */
-       UCHAR   ucTHyst;                                                 /* Temperature hysteresis. Integer. */
-       USHORT  usTMin;                                                  /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
-       USHORT  usTMed;                                                  /* The middle temperature where we change slopes. */
-       USHORT  usTHigh;                                                 /* The high point above TMed for adjusting the second slope. */
-       USHORT  usPWMMin;                                                /* The minimum PWM value in percent (0.01% increments). */
-       USHORT  usPWMMed;                                                /* The PWM value (in percent) at TMed. */
-       USHORT  usPWMHigh;                                               /* The PWM value at THigh. */
-       USHORT  usTMax;                                                  /* The max temperature */
-       UCHAR   ucFanControlMode;                                 /* Legacy or Fuzzy Fan mode */
-       USHORT  usFanPWMMax;                                      /* Maximum allowed fan power in percent */
-       USHORT  usFanOutputSensitivity;           /* Sensitivity of fan reaction to temepature changes */
-       USHORT  usFanRPMMax;                                      /* The default value in RPM */
-       ULONG  ulMinFanSCLKAcousticLimit;          /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
-       UCHAR   ucTargetTemperature;                     /* Advanced fan controller target temperature. */
-       UCHAR   ucMinimumPWMLimit;                        /* The minimum PWM that the advanced fan controller can set.  This should be set to the highest PWM that will run the fan at its lowest RPM. */
-       USHORT  usReserved;
-} ATOM_Tonga_Fan_Table;
-
-typedef struct _ATOM_Fiji_Fan_Table {
-       UCHAR   ucRevId;                                                 /* Change this if the table format changes or version changes so that the other fields are not the same. */
-       UCHAR   ucTHyst;                                                 /* Temperature hysteresis. Integer. */
-       USHORT  usTMin;                                                  /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
-       USHORT  usTMed;                                                  /* The middle temperature where we change slopes. */
-       USHORT  usTHigh;                                                 /* The high point above TMed for adjusting the second slope. */
-       USHORT  usPWMMin;                                                /* The minimum PWM value in percent (0.01% increments). */
-       USHORT  usPWMMed;                                                /* The PWM value (in percent) at TMed. */
-       USHORT  usPWMHigh;                                               /* The PWM value at THigh. */
-       USHORT  usTMax;                                                  /* The max temperature */
-       UCHAR   ucFanControlMode;                                 /* Legacy or Fuzzy Fan mode */
-       USHORT  usFanPWMMax;                                      /* Maximum allowed fan power in percent */
-       USHORT  usFanOutputSensitivity;           /* Sensitivity of fan reaction to temepature changes */
-       USHORT  usFanRPMMax;                                      /* The default value in RPM */
-       ULONG  ulMinFanSCLKAcousticLimit;               /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
-       UCHAR   ucTargetTemperature;                     /* Advanced fan controller target temperature. */
-       UCHAR   ucMinimumPWMLimit;                        /* The minimum PWM that the advanced fan controller can set.  This should be set to the highest PWM that will run the fan at its lowest RPM. */
-       USHORT  usFanGainEdge;
-       USHORT  usFanGainHotspot;
-       USHORT  usFanGainLiquid;
-       USHORT  usFanGainVrVddc;
-       USHORT  usFanGainVrMvdd;
-       USHORT  usFanGainPlx;
-       USHORT  usFanGainHbm;
-       USHORT  usReserved;
-} ATOM_Fiji_Fan_Table;
-
-typedef struct _ATOM_Tonga_Thermal_Controller {
-       UCHAR ucRevId;
-       UCHAR ucType;              /* one of ATOM_TONGA_PP_THERMALCONTROLLER_* */
-       UCHAR ucI2cLine;                /* as interpreted by DAL I2C */
-       UCHAR ucI2cAddress;
-       UCHAR ucFanParameters;  /* Fan Control Parameters. */
-       UCHAR ucFanMinRPM;       /* Fan Minimum RPM (hundreds) -- for display purposes only. */
-       UCHAR ucFanMaxRPM;       /* Fan Maximum RPM (hundreds) -- for display purposes only. */
-       UCHAR ucReserved;
-       UCHAR ucFlags;             /* to be defined */
-} ATOM_Tonga_Thermal_Controller;
-
-typedef struct _ATOM_Tonga_VCE_State_Record {
-       UCHAR  ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Tonga_MM_Dependency_Table' type */
-       UCHAR  ucFlag;          /* 2 bits indicates memory p-states */
-       UCHAR  ucSCLKIndex;             /*index into ATOM_Tonga_SCLK_Dependency_Table */
-       UCHAR  ucMCLKIndex;             /*index into ATOM_Tonga_MCLK_Dependency_Table */
-} ATOM_Tonga_VCE_State_Record;
-
-typedef struct _ATOM_Tonga_VCE_State_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;
-       ATOM_Tonga_VCE_State_Record entries[1];
-} ATOM_Tonga_VCE_State_Table;
-
-typedef struct _ATOM_Tonga_PowerTune_Table {
-       UCHAR  ucRevId;
-       USHORT usTDP;
-       USHORT usConfigurableTDP;
-       USHORT usTDC;
-       USHORT usBatteryPowerLimit;
-       USHORT usSmallPowerLimit;
-       USHORT usLowCACLeakage;
-       USHORT usHighCACLeakage;
-       USHORT usMaximumPowerDeliveryLimit;
-       USHORT usTjMax;
-       USHORT usPowerTuneDataSetID;
-       USHORT usEDCLimit;
-       USHORT usSoftwareShutdownTemp;
-       USHORT usClockStretchAmount;
-       USHORT usReserve[2];
-} ATOM_Tonga_PowerTune_Table;
-
-typedef struct _ATOM_Fiji_PowerTune_Table {
-       UCHAR  ucRevId;
-       USHORT usTDP;
-       USHORT usConfigurableTDP;
-       USHORT usTDC;
-       USHORT usBatteryPowerLimit;
-       USHORT usSmallPowerLimit;
-       USHORT usLowCACLeakage;
-       USHORT usHighCACLeakage;
-       USHORT usMaximumPowerDeliveryLimit;
-       USHORT usTjMax;  /* For Fiji, this is also usTemperatureLimitEdge; */
-       USHORT usPowerTuneDataSetID;
-       USHORT usEDCLimit;
-       USHORT usSoftwareShutdownTemp;
-       USHORT usClockStretchAmount;
-       USHORT usTemperatureLimitHotspot;  /*The following are added for Fiji */
-       USHORT usTemperatureLimitLiquid1;
-       USHORT usTemperatureLimitLiquid2;
-       USHORT usTemperatureLimitVrVddc;
-       USHORT usTemperatureLimitVrMvdd;
-       USHORT usTemperatureLimitPlx;
-       UCHAR  ucLiquid1_I2C_address;  /*Liquid */
-       UCHAR  ucLiquid2_I2C_address;
-       UCHAR  ucLiquid_I2C_Line;
-       UCHAR  ucVr_I2C_address;        /*VR */
-       UCHAR  ucVr_I2C_Line;
-       UCHAR  ucPlx_I2C_address;  /*PLX */
-       UCHAR  ucPlx_I2C_Line;
-       USHORT usReserved;
-} ATOM_Fiji_PowerTune_Table;
-
-#define ATOM_PPM_A_A    1
-#define ATOM_PPM_A_I    2
-typedef struct _ATOM_Tonga_PPM_Table {
-       UCHAR   ucRevId;
-       UCHAR   ucPpmDesign;              /*A+I or A+A */
-       USHORT  usCpuCoreNumber;
-       ULONG  ulPlatformTDP;
-       ULONG  ulSmallACPlatformTDP;
-       ULONG  ulPlatformTDC;
-       ULONG  ulSmallACPlatformTDC;
-       ULONG  ulApuTDP;
-       ULONG  ulDGpuTDP;
-       ULONG  ulDGpuUlvPower;
-       ULONG  ulTjmax;
-} ATOM_Tonga_PPM_Table;
-
-typedef struct _ATOM_Tonga_Hard_Limit_Record {
-       ULONG  ulSCLKLimit;
-       ULONG  ulMCLKLimit;
-       USHORT  usVddcLimit;
-       USHORT  usVddciLimit;
-       USHORT  usVddgfxLimit;
-} ATOM_Tonga_Hard_Limit_Record;
-
-typedef struct _ATOM_Tonga_Hard_Limit_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;
-       ATOM_Tonga_Hard_Limit_Record entries[1];
-} ATOM_Tonga_Hard_Limit_Table;
-
-typedef struct _ATOM_Tonga_GPIO_Table {
-       UCHAR  ucRevId;
-       UCHAR  ucVRHotTriggeredSclkDpmIndex;            /* If VRHot signal is triggered SCLK will be limited to this DPM level */
-       UCHAR  ucReserve[5];
-} ATOM_Tonga_GPIO_Table;
-
-typedef struct _PPTable_Generic_SubTable_Header {
-       UCHAR  ucRevId;
-} PPTable_Generic_SubTable_Header;
-
-
-#pragma pack(pop)
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
deleted file mode 100644 (file)
index cfb647f..0000000
+++ /dev/null
@@ -1,1214 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-
-#include "tonga_processpptables.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "pp_debug.h"
-#include "hwmgr.h"
-#include "cgs_common.h"
-#include "tonga_pptable.h"
-
-/**
- * Private Function used during initialization.
- * @param hwmgr Pointer to the hardware manager.
- * @param setIt A flag indication if the capability should be set (TRUE) or reset (FALSE).
- * @param cap Which capability to set/reset.
- */
-static void set_hw_cap(struct pp_hwmgr *hwmgr, bool setIt, enum phm_platform_caps cap)
-{
-       if (setIt)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
-       else
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
-}
-
-
-/**
- * Private Function used during initialization.
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_caps the bit array (from BIOS) of capability bits.
- * @exception the current implementation always returns 1.
- */
-static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
-{
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE16____),
-               "ATOM_PP_PLATFORM_CAP_ASPM_L1 is not supported!", continue);
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE64____),
-               "ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY is not supported!", continue);
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE512____),
-               "ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL is not supported!", continue);
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE1024____),
-               "ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 is not supported!", continue);
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE2048____),
-               "ATOM_PP_PLATFORM_CAP_HTLINKCONTROL is not supported!", continue);
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY),
-                       PHM_PlatformCaps_PowerPlaySupport
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
-                       PHM_PlatformCaps_BiosPowerSourceControl
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC),
-                       PHM_PlatformCaps_AutomaticDCTransition
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL),
-                       PHM_PlatformCaps_EnableMVDDControl
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL),
-                       PHM_PlatformCaps_ControlVDDCI
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL),
-                       PHM_PlatformCaps_ControlVDDGFX
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_BACO),
-                       PHM_PlatformCaps_BACO
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND),
-                       PHM_PlatformCaps_DisableVoltageIsland
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL),
-                       PHM_PlatformCaps_CombinePCCWithThermalSignal
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE),
-                       PHM_PlatformCaps_LoadPostProductionFirmware
-                 );
-
-       return 0;
-}
-
-/**
- * Private Function to get the PowerPlay Table Address.
- */
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
-{
-       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-
-       u16 size;
-       u8 frev, crev;
-       void *table_address = (void *)hwmgr->soft_pp_table;
-
-       if (!table_address) {
-               table_address = (ATOM_Tonga_POWERPLAYTABLE *)
-                               cgs_atom_get_data_table(hwmgr->device,
-                                               index, &size, &frev, &crev);
-               hwmgr->soft_pp_table = table_address;   /*Cache the result in RAM.*/
-               hwmgr->soft_pp_table_size = size;
-       }
-
-       return table_address;
-}
-
-static int get_vddc_lookup_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_voltage_lookup_table **lookup_table,
-               const ATOM_Tonga_Voltage_Lookup_Table   *vddc_lookup_pp_tables,
-               uint32_t        max_levels
-               )
-{
-       uint32_t table_size, i;
-       phm_ppt_v1_voltage_lookup_table *table;
-
-       PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
-               "Invalid CAC Leakage PowerPlay Table!", return 1);
-
-       table_size = sizeof(uint32_t) +
-               sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
-
-       table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       memset(table, 0x00, table_size);
-
-       table->count = vddc_lookup_pp_tables->ucNumEntries;
-
-       for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
-               table->entries[i].us_calculated = 0;
-               table->entries[i].us_vdd =
-                       vddc_lookup_pp_tables->entries[i].usVdd;
-               table->entries[i].us_cac_low =
-                       vddc_lookup_pp_tables->entries[i].usCACLow;
-               table->entries[i].us_cac_mid =
-                       vddc_lookup_pp_tables->entries[i].usCACMid;
-               table->entries[i].us_cac_high =
-                       vddc_lookup_pp_tables->entries[i].usCACHigh;
-       }
-
-       *lookup_table = table;
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Initialize Platform Power Management Parameter table
- * @param hwmgr Pointer to the hardware manager.
- * @param atom_ppm_table Pointer to PPM table in VBIOS
- */
-static int get_platform_power_management_table(
-               struct pp_hwmgr *hwmgr,
-               ATOM_Tonga_PPM_Table *atom_ppm_table)
-{
-       struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
-       struct phm_ppt_v1_information *pp_table_information =
-               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (NULL == ptr)
-               return -ENOMEM;
-
-       ptr->ppm_design
-               = atom_ppm_table->ucPpmDesign;
-       ptr->cpu_core_number
-               = atom_ppm_table->usCpuCoreNumber;
-       ptr->platform_tdp
-               = atom_ppm_table->ulPlatformTDP;
-       ptr->small_ac_platform_tdp
-               = atom_ppm_table->ulSmallACPlatformTDP;
-       ptr->platform_tdc
-               = atom_ppm_table->ulPlatformTDC;
-       ptr->small_ac_platform_tdc
-               = atom_ppm_table->ulSmallACPlatformTDC;
-       ptr->apu_tdp
-               = atom_ppm_table->ulApuTDP;
-       ptr->dgpu_tdp
-               = atom_ppm_table->ulDGpuTDP;
-       ptr->dgpu_ulv_power
-               = atom_ppm_table->ulDGpuUlvPower;
-       ptr->tj_max
-               = atom_ppm_table->ulTjmax;
-
-       pp_table_information->ppm_parameter_table = ptr;
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Initialize TDP limits for DPM2
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_table Pointer to the PowerPlay Table.
- */
-static int init_dpm_2_parameters(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
-               )
-{
-       int result = 0;
-       struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       ATOM_Tonga_PPM_Table *atom_ppm_table;
-       uint32_t disable_ppm = 0;
-       uint32_t disable_power_control = 0;
-
-       pp_table_information->us_ulv_voltage_offset =
-               le16_to_cpu(powerplay_table->usUlvVoltageOffset);
-
-       pp_table_information->ppm_parameter_table = NULL;
-       pp_table_information->vddc_lookup_table = NULL;
-       pp_table_information->vddgfx_lookup_table = NULL;
-       /* TDP limits */
-       hwmgr->platform_descriptor.TDPODLimit =
-               le16_to_cpu(powerplay_table->usPowerControlLimit);
-       hwmgr->platform_descriptor.TDPAdjustment = 0;
-       hwmgr->platform_descriptor.VidAdjustment = 0;
-       hwmgr->platform_descriptor.VidAdjustmentPolarity = 0;
-       hwmgr->platform_descriptor.VidMinLimit = 0;
-       hwmgr->platform_descriptor.VidMaxLimit = 1500000;
-       hwmgr->platform_descriptor.VidStep = 6250;
-
-       disable_power_control = 0;
-       if (0 == disable_power_control) {
-               /* enable TDP overdrive (PowerControl) feature as well if supported */
-               if (hwmgr->platform_descriptor.TDPODLimit != 0)
-                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerControl);
-       }
-
-       if (0 != powerplay_table->usVddcLookupTableOffset) {
-               const ATOM_Tonga_Voltage_Lookup_Table *pVddcCACTable =
-                       (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
-                       le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
-
-               result = get_vddc_lookup_table(hwmgr,
-                       &pp_table_information->vddc_lookup_table, pVddcCACTable, 16);
-       }
-
-       if (0 != powerplay_table->usVddgfxLookupTableOffset) {
-               const ATOM_Tonga_Voltage_Lookup_Table *pVddgfxCACTable =
-                       (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
-                       le16_to_cpu(powerplay_table->usVddgfxLookupTableOffset));
-
-               result = get_vddc_lookup_table(hwmgr,
-                       &pp_table_information->vddgfx_lookup_table, pVddgfxCACTable, 16);
-       }
-
-       disable_ppm = 0;
-       if (0 == disable_ppm) {
-               atom_ppm_table = (ATOM_Tonga_PPM_Table *)
-                       (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
-
-               if (0 != powerplay_table->usPPMTableOffset) {
-                       if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
-                               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_EnablePlatformPowerManagement);
-                       }
-               }
-       }
-
-       return result;
-}
-
-static int get_valid_clk(
-               struct pp_hwmgr *hwmgr,
-               struct phm_clock_array **clk_table,
-               const phm_ppt_v1_clock_voltage_dependency_table  * clk_volt_pp_table
-               )
-{
-       uint32_t table_size, i;
-       struct phm_clock_array *table;
-
-       PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
-               "Invalid PowerPlay Table!", return -1);
-
-       table_size = sizeof(uint32_t) +
-               sizeof(uint32_t) * clk_volt_pp_table->count;
-
-       table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       memset(table, 0x00, table_size);
-
-       table->count = (uint32_t)clk_volt_pp_table->count;
-
-       for (i = 0; i < table->count; i++)
-               table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
-
-       *clk_table = table;
-
-       return 0;
-}
-
-static int get_hard_limits(
-               struct pp_hwmgr *hwmgr,
-               struct phm_clock_and_voltage_limits *limits,
-               const ATOM_Tonga_Hard_Limit_Table * limitable
-               )
-{
-       PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
-
-       /* currently we always take entries[0] parameters */
-       limits->sclk = (uint32_t)limitable->entries[0].ulSCLKLimit;
-       limits->mclk = (uint32_t)limitable->entries[0].ulMCLKLimit;
-       limits->vddc = (uint16_t)limitable->entries[0].usVddcLimit;
-       limits->vddci = (uint16_t)limitable->entries[0].usVddciLimit;
-       limits->vddgfx = (uint16_t)limitable->entries[0].usVddgfxLimit;
-
-       return 0;
-}
-
-static int get_mclk_voltage_dependency_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
-               const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table
-               )
-{
-       uint32_t table_size, i;
-       phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
-
-       PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
-
-       table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-               * mclk_dep_table->ucNumEntries;
-
-       mclk_table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == mclk_table)
-               return -ENOMEM;
-
-       memset(mclk_table, 0x00, table_size);
-
-       mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
-
-       for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
-               mclk_table->entries[i].vddInd =
-                       mclk_dep_table->entries[i].ucVddcInd;
-               mclk_table->entries[i].vdd_offset =
-                       mclk_dep_table->entries[i].usVddgfxOffset;
-               mclk_table->entries[i].vddci =
-                       mclk_dep_table->entries[i].usVddci;
-               mclk_table->entries[i].mvdd =
-                       mclk_dep_table->entries[i].usMvdd;
-               mclk_table->entries[i].clk =
-                       mclk_dep_table->entries[i].ulMclk;
-       }
-
-       *pp_tonga_mclk_dep_table = mclk_table;
-
-       return 0;
-}
-
-static int get_sclk_voltage_dependency_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
-               const PPTable_Generic_SubTable_Header *sclk_dep_table
-               )
-{
-       uint32_t table_size, i;
-       phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
-
-       if (sclk_dep_table->ucRevId < 1) {
-               const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
-                           (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
-
-               PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
-                       "Invalid PowerPlay Table!", return -1);
-
-               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-                       * tonga_table->ucNumEntries;
-
-               sclk_table = kzalloc(table_size, GFP_KERNEL);
-
-               if (NULL == sclk_table)
-                       return -ENOMEM;
-
-               memset(sclk_table, 0x00, table_size);
-
-               sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
-
-               for (i = 0; i < tonga_table->ucNumEntries; i++) {
-                       sclk_table->entries[i].vddInd =
-                               tonga_table->entries[i].ucVddInd;
-                       sclk_table->entries[i].vdd_offset =
-                               tonga_table->entries[i].usVddcOffset;
-                       sclk_table->entries[i].clk =
-                               tonga_table->entries[i].ulSclk;
-                       sclk_table->entries[i].cks_enable =
-                               (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-                       sclk_table->entries[i].cks_voffset =
-                               (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-               }
-       } else {
-               const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
-                           (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
-
-               PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
-                       "Invalid PowerPlay Table!", return -1);
-
-               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-                       * polaris_table->ucNumEntries;
-
-               sclk_table = kzalloc(table_size, GFP_KERNEL);
-
-               if (NULL == sclk_table)
-                       return -ENOMEM;
-
-               memset(sclk_table, 0x00, table_size);
-
-               sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
-
-               for (i = 0; i < polaris_table->ucNumEntries; i++) {
-                       sclk_table->entries[i].vddInd =
-                               polaris_table->entries[i].ucVddInd;
-                       sclk_table->entries[i].vdd_offset =
-                               polaris_table->entries[i].usVddcOffset;
-                       sclk_table->entries[i].clk =
-                               polaris_table->entries[i].ulSclk;
-                       sclk_table->entries[i].cks_enable =
-                               (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-                       sclk_table->entries[i].cks_voffset =
-                               (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-                       sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
-               }
-       }
-       *pp_tonga_sclk_dep_table = sclk_table;
-
-       return 0;
-}
-
-static int get_pcie_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
-               const PPTable_Generic_SubTable_Header * pTable
-               )
-{
-       uint32_t table_size, i, pcie_count;
-       phm_ppt_v1_pcie_table *pcie_table;
-       struct phm_ppt_v1_information *pp_table_information =
-               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (pTable->ucRevId < 1) {
-               const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
-               PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
-                       "Invalid PowerPlay Table!", return -1);
-
-               table_size = sizeof(uint32_t) +
-                       sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
-
-               pcie_table = kzalloc(table_size, GFP_KERNEL);
-
-               if (pcie_table == NULL)
-                       return -ENOMEM;
-
-               memset(pcie_table, 0x00, table_size);
-
-               /*
-               * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
-               * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
-               */
-               pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
-               if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
-                       pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
-               else
-                       printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
-                       Disregarding the excess entries... \n");
-
-               pcie_table->count = pcie_count;
-
-               for (i = 0; i < pcie_count; i++) {
-                       pcie_table->entries[i].gen_speed =
-                               atom_pcie_table->entries[i].ucPCIEGenSpeed;
-                       pcie_table->entries[i].lane_width =
-                               atom_pcie_table->entries[i].usPCIELaneWidth;
-               }
-
-               *pp_tonga_pcie_table = pcie_table;
-       } else {
-               /* Polaris10/Polaris11 and newer. */
-               const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
-               PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
-                       "Invalid PowerPlay Table!", return -1);
-
-               table_size = sizeof(uint32_t) +
-                       sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
-
-               pcie_table = kzalloc(table_size, GFP_KERNEL);
-
-               if (pcie_table == NULL)
-                       return -ENOMEM;
-
-               memset(pcie_table, 0x00, table_size);
-
-               /*
-               * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
-               * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
-               */
-               pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
-               if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
-                       pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
-               else
-                       printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
-                       Disregarding the excess entries... \n");
-
-               pcie_table->count = pcie_count;
-
-               for (i = 0; i < pcie_count; i++) {
-                       pcie_table->entries[i].gen_speed =
-                               atom_pcie_table->entries[i].ucPCIEGenSpeed;
-                       pcie_table->entries[i].lane_width =
-                               atom_pcie_table->entries[i].usPCIELaneWidth;
-                       pcie_table->entries[i].pcie_sclk =
-                               atom_pcie_table->entries[i].ulPCIE_Sclk;
-               }
-
-               *pp_tonga_pcie_table = pcie_table;
-       }
-
-       return 0;
-}
-
-static int get_cac_tdp_table(
-               struct pp_hwmgr *hwmgr,
-               struct phm_cac_tdp_table **cac_tdp_table,
-               const PPTable_Generic_SubTable_Header * table
-               )
-{
-       uint32_t table_size;
-       struct phm_cac_tdp_table *tdp_table;
-
-       table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
-       tdp_table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == tdp_table)
-               return -ENOMEM;
-
-       memset(tdp_table, 0x00, table_size);
-
-       hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == hwmgr->dyn_state.cac_dtp_table) {
-               kfree(tdp_table);
-               return -ENOMEM;
-       }
-
-       memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
-
-       if (table->ucRevId < 3) {
-               const ATOM_Tonga_PowerTune_Table *tonga_table =
-                       (ATOM_Tonga_PowerTune_Table *)table;
-               tdp_table->usTDP = tonga_table->usTDP;
-               tdp_table->usConfigurableTDP =
-                       tonga_table->usConfigurableTDP;
-               tdp_table->usTDC = tonga_table->usTDC;
-               tdp_table->usBatteryPowerLimit =
-                       tonga_table->usBatteryPowerLimit;
-               tdp_table->usSmallPowerLimit =
-                       tonga_table->usSmallPowerLimit;
-               tdp_table->usLowCACLeakage =
-                       tonga_table->usLowCACLeakage;
-               tdp_table->usHighCACLeakage =
-                       tonga_table->usHighCACLeakage;
-               tdp_table->usMaximumPowerDeliveryLimit =
-                       tonga_table->usMaximumPowerDeliveryLimit;
-               tdp_table->usDefaultTargetOperatingTemp =
-                       tonga_table->usTjMax;
-               tdp_table->usTargetOperatingTemp =
-                       tonga_table->usTjMax; /*Set the initial temp to the same as default */
-               tdp_table->usPowerTuneDataSetID =
-                       tonga_table->usPowerTuneDataSetID;
-               tdp_table->usSoftwareShutdownTemp =
-                       tonga_table->usSoftwareShutdownTemp;
-               tdp_table->usClockStretchAmount =
-                       tonga_table->usClockStretchAmount;
-       } else {   /* Fiji and newer */
-               const ATOM_Fiji_PowerTune_Table *fijitable =
-                       (ATOM_Fiji_PowerTune_Table *)table;
-               tdp_table->usTDP = fijitable->usTDP;
-               tdp_table->usConfigurableTDP = fijitable->usConfigurableTDP;
-               tdp_table->usTDC = fijitable->usTDC;
-               tdp_table->usBatteryPowerLimit = fijitable->usBatteryPowerLimit;
-               tdp_table->usSmallPowerLimit = fijitable->usSmallPowerLimit;
-               tdp_table->usLowCACLeakage = fijitable->usLowCACLeakage;
-               tdp_table->usHighCACLeakage = fijitable->usHighCACLeakage;
-               tdp_table->usMaximumPowerDeliveryLimit =
-                       fijitable->usMaximumPowerDeliveryLimit;
-               tdp_table->usDefaultTargetOperatingTemp =
-                       fijitable->usTjMax;
-               tdp_table->usTargetOperatingTemp =
-                       fijitable->usTjMax; /*Set the initial temp to the same as default */
-               tdp_table->usPowerTuneDataSetID =
-                       fijitable->usPowerTuneDataSetID;
-               tdp_table->usSoftwareShutdownTemp =
-                       fijitable->usSoftwareShutdownTemp;
-               tdp_table->usClockStretchAmount =
-                       fijitable->usClockStretchAmount;
-               tdp_table->usTemperatureLimitHotspot =
-                       fijitable->usTemperatureLimitHotspot;
-               tdp_table->usTemperatureLimitLiquid1 =
-                       fijitable->usTemperatureLimitLiquid1;
-               tdp_table->usTemperatureLimitLiquid2 =
-                       fijitable->usTemperatureLimitLiquid2;
-               tdp_table->usTemperatureLimitVrVddc =
-                       fijitable->usTemperatureLimitVrVddc;
-               tdp_table->usTemperatureLimitVrMvdd =
-                       fijitable->usTemperatureLimitVrMvdd;
-               tdp_table->usTemperatureLimitPlx =
-                       fijitable->usTemperatureLimitPlx;
-               tdp_table->ucLiquid1_I2C_address =
-                       fijitable->ucLiquid1_I2C_address;
-               tdp_table->ucLiquid2_I2C_address =
-                       fijitable->ucLiquid2_I2C_address;
-               tdp_table->ucLiquid_I2C_Line =
-                       fijitable->ucLiquid_I2C_Line;
-               tdp_table->ucVr_I2C_address = fijitable->ucVr_I2C_address;
-               tdp_table->ucVr_I2C_Line = fijitable->ucVr_I2C_Line;
-               tdp_table->ucPlx_I2C_address = fijitable->ucPlx_I2C_address;
-               tdp_table->ucPlx_I2C_Line = fijitable->ucPlx_I2C_Line;
-       }
-
-       *cac_tdp_table = tdp_table;
-
-       return 0;
-}
-
-static int get_mm_clock_voltage_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_mm_clock_voltage_dependency_table **tonga_mm_table,
-               const ATOM_Tonga_MM_Dependency_Table * mm_dependency_table
-               )
-{
-       uint32_t table_size, i;
-       const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
-
-       PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
-       table_size = sizeof(uint32_t) +
-               sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
-               * mm_dependency_table->ucNumEntries;
-       mm_table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == mm_table)
-               return -ENOMEM;
-
-       memset(mm_table, 0x00, table_size);
-
-       mm_table->count = mm_dependency_table->ucNumEntries;
-
-       for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
-               mm_dependency_record = &mm_dependency_table->entries[i];
-               mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd;
-               mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset;
-               mm_table->entries[i].aclk = mm_dependency_record->ulAClk;
-               mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk;
-               mm_table->entries[i].eclk = mm_dependency_record->ulEClk;
-               mm_table->entries[i].vclk = mm_dependency_record->ulVClk;
-               mm_table->entries[i].dclk = mm_dependency_record->ulDClk;
-       }
-
-       *tonga_mm_table = mm_table;
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Initialize clock voltage dependency
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_table Pointer to the PowerPlay Table.
- */
-static int init_clock_voltage_dependency(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
-               )
-{
-       int result = 0;
-       struct phm_ppt_v1_information *pp_table_information =
-               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       const ATOM_Tonga_MM_Dependency_Table *mm_dependency_table =
-               (const ATOM_Tonga_MM_Dependency_Table *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usMMDependencyTableOffset));
-       const PPTable_Generic_SubTable_Header *pPowerTuneTable =
-               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usPowerTuneTableOffset));
-       const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
-               (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-       const PPTable_Generic_SubTable_Header *sclk_dep_table =
-               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-       const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
-               (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usHardLimitTableOffset));
-       const PPTable_Generic_SubTable_Header *pcie_table =
-               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usPCIETableOffset));
-
-       pp_table_information->vdd_dep_on_sclk = NULL;
-       pp_table_information->vdd_dep_on_mclk = NULL;
-       pp_table_information->mm_dep_table = NULL;
-       pp_table_information->pcie_table = NULL;
-
-       if (powerplay_table->usMMDependencyTableOffset != 0)
-               result = get_mm_clock_voltage_table(hwmgr,
-               &pp_table_information->mm_dep_table, mm_dependency_table);
-
-       if (result == 0 && powerplay_table->usPowerTuneTableOffset != 0)
-               result = get_cac_tdp_table(hwmgr,
-               &pp_table_information->cac_dtp_table, pPowerTuneTable);
-
-       if (result == 0 && powerplay_table->usSclkDependencyTableOffset != 0)
-               result = get_sclk_voltage_dependency_table(hwmgr,
-               &pp_table_information->vdd_dep_on_sclk, sclk_dep_table);
-
-       if (result == 0 && powerplay_table->usMclkDependencyTableOffset != 0)
-               result = get_mclk_voltage_dependency_table(hwmgr,
-               &pp_table_information->vdd_dep_on_mclk, mclk_dep_table);
-
-       if (result == 0 && powerplay_table->usPCIETableOffset != 0)
-               result = get_pcie_table(hwmgr,
-               &pp_table_information->pcie_table, pcie_table);
-
-       if (result == 0 && powerplay_table->usHardLimitTableOffset != 0)
-               result = get_hard_limits(hwmgr,
-               &pp_table_information->max_clock_voltage_on_dc, pHardLimits);
-
-       hwmgr->dyn_state.max_clock_voltage_on_dc.sclk =
-               pp_table_information->max_clock_voltage_on_dc.sclk;
-       hwmgr->dyn_state.max_clock_voltage_on_dc.mclk =
-               pp_table_information->max_clock_voltage_on_dc.mclk;
-       hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
-               pp_table_information->max_clock_voltage_on_dc.vddc;
-       hwmgr->dyn_state.max_clock_voltage_on_dc.vddci =
-               pp_table_information->max_clock_voltage_on_dc.vddci;
-
-       if (result == 0 && (NULL != pp_table_information->vdd_dep_on_mclk)
-               && (0 != pp_table_information->vdd_dep_on_mclk->count))
-               result = get_valid_clk(hwmgr, &pp_table_information->valid_mclk_values,
-               pp_table_information->vdd_dep_on_mclk);
-
-       if (result == 0 && (NULL != pp_table_information->vdd_dep_on_sclk)
-               && (0 != pp_table_information->vdd_dep_on_sclk->count))
-               result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
-               pp_table_information->vdd_dep_on_sclk);
-
-       return result;
-}
-
-/** Retrieves the (signed) Overdrive limits from VBIOS.
- * The max engine clock, memory clock and max temperature come from the firmware info table.
- *
- * The information is placed into the platform descriptor.
- *
- * @param hwmgr source of the VBIOS table and owner of the platform descriptor to be updated.
- * @param powerplay_table the address of the PowerPlay table.
- *
- * @return 1 as long as the firmware info table was present and of a supported version.
- */
-static int init_over_drive_limits(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
-{
-       hwmgr->platform_descriptor.overdriveLimit.engineClock =
-               le16_to_cpu(powerplay_table->ulMaxODEngineClock);
-       hwmgr->platform_descriptor.overdriveLimit.memoryClock =
-               le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
-
-       hwmgr->platform_descriptor.minOverdriveVDDC = 0;
-       hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
-       hwmgr->platform_descriptor.overdriveVDDCStep = 0;
-
-       if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 \
-               && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ACOverdriveSupport);
-       }
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Inspect the PowerPlay table for obvious signs of corruption.
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_table Pointer to the PowerPlay Table.
- * @exception This implementation always returns 1.
- */
-static int init_thermal_controller(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
-               )
-{
-       const PPTable_Generic_SubTable_Header *fan_table;
-       ATOM_Tonga_Thermal_Controller *thermal_controller;
-
-       thermal_controller = (ATOM_Tonga_Thermal_Controller *)
-               (((unsigned long)powerplay_table) +
-               le16_to_cpu(powerplay_table->usThermalControllerOffset));
-       PP_ASSERT_WITH_CODE((0 != powerplay_table->usThermalControllerOffset),
-               "Thermal controller table not set!", return -1);
-
-       hwmgr->thermal_controller.ucType = thermal_controller->ucType;
-       hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
-       hwmgr->thermal_controller.ucI2cAddress = thermal_controller->ucI2cAddress;
-
-       hwmgr->thermal_controller.fanInfo.bNoFan =
-               (0 != (thermal_controller->ucFanParameters & ATOM_TONGA_PP_FANPARAMETERS_NOFAN));
-
-       hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
-               thermal_controller->ucFanParameters &
-               ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
-
-       hwmgr->thermal_controller.fanInfo.ulMinRPM
-               = thermal_controller->ucFanMinRPM * 100UL;
-       hwmgr->thermal_controller.fanInfo.ulMaxRPM
-               = thermal_controller->ucFanMaxRPM * 100UL;
-
-       set_hw_cap(
-                       hwmgr,
-                       ATOM_TONGA_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
-                       PHM_PlatformCaps_ThermalController
-                 );
-
-       if (0 == powerplay_table->usFanTableOffset)
-               return 0;
-
-       fan_table = (const PPTable_Generic_SubTable_Header *)
-               (((unsigned long)powerplay_table) +
-               le16_to_cpu(powerplay_table->usFanTableOffset));
-
-       PP_ASSERT_WITH_CODE((0 != powerplay_table->usFanTableOffset),
-               "Fan table not set!", return -1);
-       PP_ASSERT_WITH_CODE((0 < fan_table->ucRevId),
-               "Unsupported fan table format!", return -1);
-
-       hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
-               = 100000;
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_MicrocodeFanControl);
-
-       if (fan_table->ucRevId < 8) {
-               const ATOM_Tonga_Fan_Table *tonga_fan_table =
-                       (ATOM_Tonga_Fan_Table *)fan_table;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
-                       = tonga_fan_table->ucTHyst;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMin
-                       = tonga_fan_table->usTMin;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMed
-                       = tonga_fan_table->usTMed;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
-                       = tonga_fan_table->usTHigh;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
-                       = tonga_fan_table->usPWMMin;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
-                       = tonga_fan_table->usPWMMed;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
-                       = tonga_fan_table->usPWMHigh;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
-                       = 10900;                  /* hard coded */
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
-                       = tonga_fan_table->usTMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
-                       = tonga_fan_table->ucFanControlMode;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
-                       = tonga_fan_table->usFanPWMMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
-                       = 4836;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
-                       = tonga_fan_table->usFanOutputSensitivity;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
-                       = tonga_fan_table->usFanRPMMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
-                       = (tonga_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places.  SMC wants MHz. */
-               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
-                       = tonga_fan_table->ucTargetTemperature;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
-                       = tonga_fan_table->ucMinimumPWMLimit;
-       } else {
-               const ATOM_Fiji_Fan_Table *fiji_fan_table =
-                       (ATOM_Fiji_Fan_Table *)fan_table;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
-                       = fiji_fan_table->ucTHyst;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMin
-                       = fiji_fan_table->usTMin;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMed
-                       = fiji_fan_table->usTMed;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
-                       = fiji_fan_table->usTHigh;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
-                       = fiji_fan_table->usPWMMin;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
-                       = fiji_fan_table->usPWMMed;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
-                       = fiji_fan_table->usPWMHigh;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
-                       = fiji_fan_table->usTMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
-                       = fiji_fan_table->ucFanControlMode;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
-                       = fiji_fan_table->usFanPWMMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
-                       = 4836;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
-                       = fiji_fan_table->usFanOutputSensitivity;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
-                       = fiji_fan_table->usFanRPMMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
-                       = (fiji_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places.  SMC wants MHz. */
-               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
-                       = fiji_fan_table->ucTargetTemperature;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
-                       = fiji_fan_table->ucMinimumPWMLimit;
-
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge
-                       = fiji_fan_table->usFanGainEdge;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot
-                       = fiji_fan_table->usFanGainHotspot;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid
-                       = fiji_fan_table->usFanGainLiquid;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc
-                       = fiji_fan_table->usFanGainVrVddc;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd
-                       = fiji_fan_table->usFanGainVrMvdd;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx
-                       = fiji_fan_table->usFanGainPlx;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm
-                       = fiji_fan_table->usFanGainHbm;
-       }
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Inspect the PowerPlay table for obvious signs of corruption.
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_table Pointer to the PowerPlay Table.
- * @exception 2 if the powerplay table is incorrect.
- */
-static int check_powerplay_tables(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
-               )
-{
-       const ATOM_Tonga_State_Array *state_arrays;
-
-       state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)powerplay_table) +
-               le16_to_cpu(powerplay_table->usStateArrayOffset));
-
-       PP_ASSERT_WITH_CODE((ATOM_Tonga_TABLE_REVISION_TONGA <=
-               powerplay_table->sHeader.ucTableFormatRevision),
-               "Unsupported PPTable format!", return -1);
-       PP_ASSERT_WITH_CODE((0 != powerplay_table->usStateArrayOffset),
-               "State table is not set!", return -1);
-       PP_ASSERT_WITH_CODE((0 < powerplay_table->sHeader.usStructureSize),
-               "Invalid PowerPlay Table!", return -1);
-       PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
-
-       return 0;
-}
-
-int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
-{
-       int result = 0;
-       const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
-
-       hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v1_information), GFP_KERNEL);
-
-       PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
-                           "Failed to allocate hwmgr->pptable!", return -ENOMEM);
-
-       memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information));
-
-       powerplay_table = get_powerplay_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((NULL != powerplay_table),
-               "Missing PowerPlay Table!", return -1);
-
-       result = check_powerplay_tables(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "check_powerplay_tables failed", return result);
-
-       result = set_platform_caps(hwmgr,
-                                  le32_to_cpu(powerplay_table->ulPlatformCaps));
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "set_platform_caps failed", return result);
-
-       result = init_thermal_controller(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "init_thermal_controller failed", return result);
-
-       result = init_over_drive_limits(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "init_over_drive_limits failed", return result);
-
-       result = init_clock_voltage_dependency(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "init_clock_voltage_dependency failed", return result);
-
-       result = init_dpm_2_parameters(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "init_dpm_2_parameters failed", return result);
-
-       return result;
-}
-
-int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
-{
-       struct phm_ppt_v1_information *pp_table_information =
-               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       kfree(pp_table_information->vdd_dep_on_sclk);
-       pp_table_information->vdd_dep_on_sclk = NULL;
-
-       kfree(pp_table_information->vdd_dep_on_mclk);
-       pp_table_information->vdd_dep_on_mclk = NULL;
-
-       kfree(pp_table_information->valid_mclk_values);
-       pp_table_information->valid_mclk_values = NULL;
-
-       kfree(pp_table_information->valid_sclk_values);
-       pp_table_information->valid_sclk_values = NULL;
-
-       kfree(pp_table_information->vddc_lookup_table);
-       pp_table_information->vddc_lookup_table = NULL;
-
-       kfree(pp_table_information->vddgfx_lookup_table);
-       pp_table_information->vddgfx_lookup_table = NULL;
-
-       kfree(pp_table_information->mm_dep_table);
-       pp_table_information->mm_dep_table = NULL;
-
-       kfree(pp_table_information->cac_dtp_table);
-       pp_table_information->cac_dtp_table = NULL;
-
-       kfree(hwmgr->dyn_state.cac_dtp_table);
-       hwmgr->dyn_state.cac_dtp_table = NULL;
-
-       kfree(pp_table_information->ppm_parameter_table);
-       pp_table_information->ppm_parameter_table = NULL;
-
-       kfree(pp_table_information->pcie_table);
-       pp_table_information->pcie_table = NULL;
-
-       kfree(hwmgr->pptable);
-       hwmgr->pptable = NULL;
-
-       return 0;
-}
-
-const struct pp_table_func tonga_pptable_funcs = {
-       .pptable_init = tonga_pp_tables_initialize,
-       .pptable_fini = tonga_pp_tables_uninitialize,
-};
-
-int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
-{
-       const ATOM_Tonga_State_Array * state_arrays;
-       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((NULL != pp_table),
-                       "Missing PowerPlay Table!", return -1);
-       PP_ASSERT_WITH_CODE((pp_table->sHeader.ucTableFormatRevision >=
-                       ATOM_Tonga_TABLE_REVISION_TONGA),
-                       "Incorrect PowerPlay table revision!", return -1);
-
-       state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
-                       le16_to_cpu(pp_table->usStateArrayOffset));
-
-       return (uint32_t)(state_arrays->ucNumEntries);
-}
-
-/**
-* Private function to convert flags stored in the BIOS to software flags in PowerPlay.
-*/
-static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
-               uint16_t classification, uint16_t classification2)
-{
-       uint32_t result = 0;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT)
-               result |= PP_StateClassificationFlag_Boot;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL)
-               result |= PP_StateClassificationFlag_Thermal;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
-               result |= PP_StateClassificationFlag_LimitedPowerSource;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_REST)
-               result |= PP_StateClassificationFlag_Rest;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED)
-               result |= PP_StateClassificationFlag_Forced;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI)
-               result |= PP_StateClassificationFlag_ACPI;
-
-       if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
-               result |= PP_StateClassificationFlag_LimitedPowerSource_2;
-
-       return result;
-}
-
-/**
-* Create a Power State out of an entry in the PowerPlay table.
-* This function is called by the hardware back-end.
-* @param hwmgr Pointer to the hardware manager.
-* @param entry_index The index of the entry to be extracted from the table.
-* @param power_state The address of the PowerState instance being created.
-* @return -1 if the entry cannot be retrieved.
-*/
-int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
-               uint32_t entry_index, struct pp_power_state *power_state,
-               int (*call_back_func)(struct pp_hwmgr *, void *,
-                               struct pp_power_state *, void *, uint32_t))
-{
-       int result = 0;
-       const ATOM_Tonga_State_Array * state_arrays;
-       const ATOM_Tonga_State *state_entry;
-       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
-       power_state->classification.bios_index = entry_index;
-
-       if (pp_table->sHeader.ucTableFormatRevision >=
-                       ATOM_Tonga_TABLE_REVISION_TONGA) {
-               state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
-                               le16_to_cpu(pp_table->usStateArrayOffset));
-
-               PP_ASSERT_WITH_CODE((0 < pp_table->usStateArrayOffset),
-                               "Invalid PowerPlay Table State Array Offset.", return -1);
-               PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
-                               "Invalid PowerPlay Table State Array.", return -1);
-               PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
-                               "Invalid PowerPlay Table State Array Entry.", return -1);
-
-               state_entry = &(state_arrays->states[entry_index]);
-
-               result = call_back_func(hwmgr, (void *)state_entry, power_state,
-                               (void *)pp_table,
-                               make_classification_flags(hwmgr,
-                                       le16_to_cpu(state_entry->usClassification),
-                                       le16_to_cpu(state_entry->usClassification2)));
-       }
-
-       if (!result && (power_state->classification.flags &
-                       PP_StateClassificationFlag_Boot))
-               result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
-
-       return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
deleted file mode 100644 (file)
index d24b888..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
-
-#include "hwmgr.h"
-
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-               struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-                               struct pp_power_state *, void *, uint32_t));
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
deleted file mode 100644 (file)
index 47ef1ca..0000000
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <asm/div64.h>
-#include "tonga_thermal.h"
-#include "tonga_hwmgr.h"
-#include "tonga_smumgr.h"
-#include "tonga_ppsmc.h"
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-/**
-* Get Fan Speed Control Parameters.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pSpeed is the address of the structure where the result is to be placed.
-* @exception Always succeeds except if we cannot zero out the output structure.
-*/
-int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info)
-{
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       fan_speed_info->supports_percent_read = true;
-       fan_speed_info->supports_percent_write = true;
-       fan_speed_info->min_percent = 0;
-       fan_speed_info->max_percent = 100;
-
-       if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
-               fan_speed_info->supports_rpm_read = true;
-               fan_speed_info->supports_rpm_write = true;
-               fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
-               fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
-       } else {
-               fan_speed_info->min_rpm = 0;
-               fan_speed_info->max_rpm = 0;
-       }
-
-       return 0;
-}
-
-/**
-* Get Fan Speed in percent.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pSpeed is the address of the structure where the result is to be placed.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
-       uint32_t duty100;
-       uint32_t duty;
-       uint64_t tmp64;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-       duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
-       if (0 == duty100)
-               return -EINVAL;
-
-
-       tmp64 = (uint64_t)duty * 100;
-       do_div(tmp64, duty100);
-       *speed = (uint32_t)tmp64;
-
-       if (*speed > 100)
-               *speed = 100;
-
-       return 0;
-}
-
-/**
-* Get Fan Speed in RPM.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the address of the structure where the result is to be placed.
-* @exception Returns not supported if no fan is found or if pulses per revolution are not set
-*/
-int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
-       return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param    hwmgr  the address of the powerplay hardware manager.
-*           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
-       if (hwmgr->fan_ctrl_is_in_default_mode) {
-               hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
-               hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
-               hwmgr->fan_ctrl_is_in_default_mode = false;
-       }
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
-       return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
-       if (!hwmgr->fan_ctrl_is_in_default_mode) {
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
-               hwmgr->fan_ctrl_is_in_default_mode = true;
-       }
-
-       return 0;
-}
-
-int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
-               result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ?  0 : -EINVAL;
-/*
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM))
-                       hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM);
-               else
-                       hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM);
-*/
-       } else {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
-               result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ?  0 : -EINVAL;
-       }
-/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
-       if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0)
-               result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \
-                                                               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL);
-*/
-       return result;
-}
-
-
-int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-       return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ?  0 : -EINVAL;
-}
-
-/**
-* Set Fan Speed in percent.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
-       uint32_t duty100;
-       uint32_t duty;
-       uint64_t tmp64;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return -EINVAL;
-
-       if (speed > 100)
-               speed = 100;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
-               tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (0 == duty100)
-               return -EINVAL;
-
-       tmp64 = (uint64_t)speed * duty100;
-       do_div(tmp64, 100);
-       duty = (uint32_t)tmp64;
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
-       return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               return 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
-               result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-               if (0 == result)
-                       result = tonga_fan_ctrl_start_smc_fan_control(hwmgr);
-       } else
-               result = tonga_fan_ctrl_set_default_mode(hwmgr);
-
-       return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
-       return 0;
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
-       int temp;
-
-       temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
-/* Bit 9 means the reading is lower than the lowest usable value. */
-       if (0 != (0x200 & temp))
-               temp = TONGA_THERMAL_MAXIMUM_TEMP_READING;
-       else
-               temp = (temp & 0x1ff);
-
-       temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-       return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param    hwmgr The address of the hardware manager.
-* @param    range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
-{
-       uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-       if (low < low_temp)
-               low = low_temp;
-       if (high > high_temp)
-               high = high_temp;
-
-       if (low > high)
-               return -EINVAL;
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
-       return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-       if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
-               PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                                               CG_TACH_CTRL, EDGE_PER_REV,
-                                               hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
-
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
-       return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
-       uint32_t alert;
-
-       alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
-       alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-       /* send message to SMU to enable internal thermal interrupts */
-       return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param    hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
-       uint32_t alert;
-
-       alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
-       alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
-       PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-       /* send message to SMU to disable internal thermal interrupts */
-       return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param    hwmgr The address of the hardware manager.
-*/
-int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-       int result = tonga_thermal_disable_alert(hwmgr);
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan)
-               tonga_fan_ctrl_set_default_mode(hwmgr);
-
-       return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-       SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-       uint32_t duty100;
-       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-       uint16_t fdo_min, slope1, slope2;
-       uint32_t reference_clock;
-       int res;
-       uint64_t tmp64;
-
-       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
-               return 0;
-
-       if (0 == data->fan_table_start) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (0 == duty100) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
-       do_div(tmp64, 10000);
-       fdo_min = (uint16_t)tmp64;
-
-       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
-       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
-       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-       fan_table.Slope1 = cpu_to_be16(slope1);
-       fan_table.Slope2 = cpu_to_be16(slope2);
-
-       fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
-       fan_table.HystUp = cpu_to_be16(1);
-
-       fan_table.HystSlope = cpu_to_be16(1);
-
-       fan_table.TempRespLim = cpu_to_be16(5);
-
-       reference_clock = tonga_get_xclk(hwmgr);
-
-       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
-       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-       fan_table.FanControl_GL_Flag = 1;
-
-       res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
-/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
-       if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
-               res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
-                                               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
-
-       if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
-               res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
-                                       hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
-
-       if (0 != res)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-*/
-       return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
-               tonga_fan_ctrl_start_smc_fan_control(hwmgr);
-               tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-       }
-
-       return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-       struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
-       if (range == NULL)
-               return -EINVAL;
-
-       return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from initialize thermal controller routine
-*/
-int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-    return tonga_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from enable alert routine
-*/
-int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-       return tonga_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from disable alert routine
-*/
-static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-       return tonga_thermal_disable_alert(hwmgr);
-}
-
-static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
-       { NULL, tf_tonga_thermal_initialize },
-       { NULL, tf_tonga_thermal_set_temperature_range },
-       { NULL, tf_tonga_thermal_enable_alert },
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
-       { NULL, tf_tonga_thermal_setup_fan_table},
-       { NULL, tf_tonga_thermal_start_smc_fan_control},
-       { NULL, NULL }
-};
-
-static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
-       0,
-       PHM_MasterTableFlag_None,
-       tonga_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
-       { NULL, tf_tonga_thermal_disable_alert},
-       { NULL, tf_tonga_thermal_set_temperature_range},
-       { NULL, tf_tonga_thermal_enable_alert},
-       { NULL, NULL }
-};
-
-static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
-       0,
-       PHM_MasterTableFlag_None,
-       tonga_thermal_set_temperature_range_master_list
-};
-
-int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-       if (!hwmgr->thermal_controller.fanInfo.bNoFan)
-               tonga_fan_ctrl_set_default_mode(hwmgr);
-       return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param    hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-       int result;
-
-       result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
-
-       if (0 == result) {
-               result = phm_construct_table(hwmgr,
-                                               &tonga_thermal_start_thermal_controller_master,
-                                               &(hwmgr->start_thermal_controller));
-               if (0 != result)
-                       phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
-       }
-
-       if (0 == result)
-               hwmgr->fan_ctrl_is_in_default_mode = true;
-       return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
deleted file mode 100644 (file)
index aa335f2..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_THERMAL_H
-#define TONGA_THERMAL_H
-
-#include "hwmgr.h"
-
-#define TONGA_THERMAL_HIGH_ALERT_MASK         0x1
-#define TONGA_THERMAL_LOW_ALERT_MASK          0x2
-
-#define TONGA_THERMAL_MINIMUM_TEMP_READING    -256
-#define TONGA_THERMAL_MAXIMUM_TEMP_READING    255
-
-#define TONGA_THERMAL_MINIMUM_ALERT_TEMP      0
-#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP      255
-
-#define FDO_PWM_MODE_STATIC  1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-
-#endif
-
index b764c8c05ec8127d64cc7b57ea233eb8cf581c3e..3fb5e57a378bc7df6fea185e975e1f7f5413bebe 100644 (file)
 #include "amd_shared.h"
 #include "cgs_common.h"
 
+enum amd_pp_sensors {
+       AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+       AMDGPU_PP_SENSOR_VDDNB,
+       AMDGPU_PP_SENSOR_VDDGFX,
+       AMDGPU_PP_SENSOR_UVD_VCLK,
+       AMDGPU_PP_SENSOR_UVD_DCLK,
+       AMDGPU_PP_SENSOR_VCE_ECCLK,
+       AMDGPU_PP_SENSOR_GPU_LOAD,
+       AMDGPU_PP_SENSOR_GFX_MCLK,
+       AMDGPU_PP_SENSOR_GPU_TEMP,
+       AMDGPU_PP_SENSOR_VCE_POWER,
+       AMDGPU_PP_SENSOR_UVD_POWER,
+};
 
 enum amd_pp_event {
        AMD_PP_EVENT_INITIALIZE = 0,
@@ -131,9 +144,8 @@ struct amd_pp_init {
        struct cgs_device *device;
        uint32_t chip_family;
        uint32_t chip_id;
-       uint32_t rev_id;
-       bool powercontainment_enabled;
 };
+
 enum amd_pp_display_config_type{
        AMD_PP_DisplayConfigType_None = 0,
        AMD_PP_DisplayConfigType_DP54 ,
@@ -261,6 +273,7 @@ enum amd_pp_clock_type {
 struct amd_pp_clocks {
        uint32_t count;
        uint32_t clock[MAX_NUM_CLOCKS];
+       uint32_t latency[MAX_NUM_CLOCKS];
 };
 
 
@@ -332,8 +345,6 @@ struct amd_powerplay_funcs {
        int (*powergate_uvd)(void *handle, bool gate);
        int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
                                   void *input, void *output);
-       void (*print_current_performance_level)(void *handle,
-                                                     struct seq_file *m);
        int (*set_fan_control_mode)(void *handle, uint32_t mode);
        int (*get_fan_control_mode)(void *handle);
        int (*set_fan_speed_percent)(void *handle, uint32_t percent);
@@ -347,6 +358,7 @@ struct amd_powerplay_funcs {
        int (*set_sclk_od)(void *handle, uint32_t value);
        int (*get_mclk_od)(void *handle);
        int (*set_mclk_od)(void *handle, uint32_t value);
+       int (*read_sensor)(void *handle, int idx, int32_t *value);
 };
 
 struct amd_powerplay {
@@ -378,4 +390,6 @@ int amd_powerplay_get_clock_by_type(void *handle,
 int amd_powerplay_get_display_mode_validation_clocks(void *handle,
                struct amd_pp_simple_clock_info *output);
 
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
+
 #endif /* _AMD_POWERPLAY_H_ */
index 962cb538595117421c0b08d5a3fa4b94ceb1f2b1..d4495839c64ce9a9eaa7b6608ce1fb2d8b0b5545 100644 (file)
@@ -341,7 +341,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
 extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
 extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
 extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
-extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
 extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
 extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
 extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
index bf0d2accf7bff5d23e8e89895c468481b03fdb46..4f0fedd1e9d37085e43e2fea63674aecaf74534f 100644 (file)
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
 #include "hwmgr_ppt.h"
+#include "power_state.h"
 
 struct pp_instance;
 struct pp_hwmgr;
-struct pp_hw_power_state;
-struct pp_power_state;
-struct PP_VCEState;
 struct phm_fan_speed_info;
 struct pp_atomctrl_voltage_table;
 
+extern int amdgpu_powercontainment;
+extern int amdgpu_sclk_deep_sleep_en;
+extern unsigned amdgpu_pp_feature_mask;
+
+#define VOLTAGE_SCALE 4
+
+uint8_t convert_to_vid(uint16_t vddc);
 
 enum DISPLAY_GAP {
        DISPLAY_GAP_VBLANK_OR_WM = 0,   /* Wait for vblank or MCHG watermark. */
@@ -49,7 +54,6 @@ enum DISPLAY_GAP {
 };
 typedef enum DISPLAY_GAP DISPLAY_GAP;
 
-
 struct vi_dpm_level {
        bool enabled;
        uint32_t value;
@@ -71,6 +75,19 @@ enum PP_Result {
 #define PCIE_PERF_REQ_GEN2         3
 #define PCIE_PERF_REQ_GEN3         4
 
+enum PP_FEATURE_MASK {
+       PP_SCLK_DPM_MASK = 0x1,
+       PP_MCLK_DPM_MASK = 0x2,
+       PP_PCIE_DPM_MASK = 0x4,
+       PP_SCLK_DEEP_SLEEP_MASK = 0x8,
+       PP_POWER_CONTAINMENT_MASK = 0x10,
+       PP_UVD_HANDSHAKE_MASK = 0x20,
+       PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
+       PP_VBI_TIME_SUPPORT_MASK = 0x80,
+       PP_ULV_MASK = 0x100,
+       PP_ENABLE_GFX_CG_THRU_SMU = 0x200
+};
+
 enum PHM_BackEnd_Magic {
        PHM_Dummy_Magic       = 0xAA5555AA,
        PHM_RV770_Magic       = 0xDCBAABCD,
@@ -294,8 +311,6 @@ struct pp_hwmgr_func {
        int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
        int (*power_state_set)(struct pp_hwmgr *hwmgr,
                                                const void *state);
-       void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr,
-                                                       struct seq_file *m);
        int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
        int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
        int (*display_config_changed)(struct pp_hwmgr *hwmgr);
@@ -342,6 +357,7 @@ struct pp_hwmgr_func {
        int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
        int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
        int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+       int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value);
 };
 
 struct pp_table_func {
@@ -351,7 +367,7 @@ struct pp_table_func {
        int (*pptable_get_vce_state_table_entry)(
                                                struct pp_hwmgr *hwmgr,
                                                unsigned long i,
-                                               struct PP_VCEState *vce_state,
+                                               struct pp_vce_state *vce_state,
                                                void **clock_info,
                                                unsigned long *flag);
 };
@@ -570,22 +586,43 @@ struct phm_microcode_version_info {
        uint32_t NB;
 };
 
+#define PP_MAX_VCE_LEVELS 6
+
+enum PP_VCE_LEVEL {
+       PP_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
+       PP_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
+       PP_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
+       PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+       PP_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
+       PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+
+enum PP_TABLE_VERSION {
+       PP_TABLE_V0 = 0,
+       PP_TABLE_V1,
+       PP_TABLE_V2,
+       PP_TABLE_MAX
+};
+
 /**
  * The main hardware manager structure.
  */
 struct pp_hwmgr {
        uint32_t chip_family;
        uint32_t chip_id;
-       uint32_t hw_revision;
-       uint32_t sub_sys_id;
-       uint32_t sub_vendor_id;
 
+       uint32_t pp_table_version;
        void *device;
        struct pp_smumgr *smumgr;
        const void *soft_pp_table;
        uint32_t soft_pp_table_size;
        void *hardcode_pp_table;
        bool need_pp_table_upload;
+
+       struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+       uint32_t num_vce_state_tables;
+
        enum amd_dpm_forced_level dpm_level;
        bool block_hw_access;
        struct phm_gfx_arbiter gfx_arbiter;
@@ -614,7 +651,6 @@ struct pp_hwmgr {
        uint32_t num_ps;
        struct pp_thermal_controller_info thermal_controller;
        bool fan_ctrl_is_in_default_mode;
-       bool powercontainment_enabled;
        uint32_t fan_ctrl_default_mode;
        uint32_t tmin;
        struct phm_microcode_version_info microcode_version_info;
@@ -624,6 +660,7 @@ struct pp_hwmgr {
        struct pp_power_state    *boot_ps;
        struct pp_power_state    *uvd_ps;
        struct amd_pp_display_configuration display_config;
+       uint32_t feature_mask;
 };
 
 
@@ -637,16 +674,7 @@ extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr);
 extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
                                uint32_t value, uint32_t mask);
 
-extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
-                               uint32_t index, uint32_t value, uint32_t mask);
 
-extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr,
-               uint32_t indirect_port, uint32_t index);
-
-extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr,
-               uint32_t indirect_port,
-               uint32_t index,
-               uint32_t value);
 
 extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
                                uint32_t indirect_port,
@@ -654,12 +682,7 @@ extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
                                uint32_t value,
                                uint32_t mask);
 
-extern void phm_wait_for_indirect_register_unequal(
-                               struct pp_hwmgr *hwmgr,
-                               uint32_t indirect_port,
-                               uint32_t index,
-                               uint32_t value,
-                               uint32_t mask);
+
 
 extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
 extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
@@ -673,6 +696,8 @@ extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, st
 extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
 extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
 extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+               uint32_t voltage);
 extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
 extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
 extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
@@ -683,6 +708,10 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
 extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
 extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
 
+extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+                               uint32_t sclk, uint16_t id, uint16_t *voltage);
+
 #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
 
 #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
@@ -697,44 +726,6 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
         PHM_FIELD_SHIFT(reg, field))
 
 
-#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask)       \
-       phm_wait_on_register(hwmgr, index, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask)       \
-       phm_wait_for_register_unequal(hwmgr, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)        \
-       phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask)        \
-       phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)   \
-       phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask)   \
-       phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-/* Operations on named registers. */
-
-#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask)     \
-       PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask)     \
-       PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask)      \
-       PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask)      \
-       PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
-       PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
-       PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
 /* Operations on named fields. */
 
 #define PHM_READ_FIELD(device, reg, field)     \
@@ -762,60 +753,16 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
                        PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg),     \
                                reg, field, fieldval))
 
-#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval)    \
-       PHM_WAIT_REGISTER(hwmgr, reg, (fieldval)        \
-                       << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval)     \
-       PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
-                       << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval)        \
-       PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval)    \
-                       << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)        \
+       phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
 
-#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval)    \
-       PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval)        \
-                       << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
 
-#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval)     \
-       PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \
-                       << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask)      \
+       PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
 
-#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval)        \
-       PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval)    \
+#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval)     \
+       PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
                        << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
 
-/* Operations on arrays of registers & fields. */
-
-#define PHM_READ_ARRAY_REGISTER(device, reg, offset)   \
-       cgs_read_register(device, mm##reg + (offset))
-
-#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value)   \
-       cgs_write_register(device, mm##reg + (offset), value)
-
-#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask)       \
-       PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask)       \
-       PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \
-       PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field)
-
-#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue)   \
-       PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset,    \
-                       PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset),      \
-                               reg, field, fieldvalue))
-
-#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue)    \
-       PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset),        \
-                       (fieldvalue) << PHM_FIELD_SHIFT(reg, field),    \
-                       PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue)    \
-       PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset),        \
-                       (fieldvalue) << PHM_FIELD_SHIFT(reg, field),    \
-                       PHM_FIELD_MASK(reg, field))
 
 #endif /* _HWMGR_H_ */
index f497e7d98e6d639828efc299e2f05b24abfd12ed..0de4436123121b8ae2b07a20988709c9be456648 100644 (file)
@@ -23,8 +23,7 @@
 #ifndef _POLARIS10_PWRVIRUS_H
 #define _POLARIS10_PWRVIRUS_H
 
-#define mmSMC_IND_INDEX_11                              0x01AC
-#define mmSMC_IND_DATA_11                               0x01AD
+
 #define mmCP_HYP_MEC1_UCODE_ADDR       0xf81a
 #define mmCP_HYP_MEC1_UCODE_DATA       0xf81b
 #define mmCP_HYP_MEC2_UCODE_ADDR       0xf81c
index a3f0ce4d58355395ad04b1f287369a15f162daa0..9ceaed9ac52aac4ed9c1874ef441965f26cd3ca4 100644 (file)
@@ -158,7 +158,7 @@ struct pp_power_state {
 
 
 /*Structure to hold a VCE state entry*/
-struct PP_VCEState {
+struct pp_vce_state {
        uint32_t evclk;
        uint32_t ecclk;
        uint32_t sclk;
@@ -171,30 +171,28 @@ enum PP_MMProfilingState {
        PP_MMProfilingState_Stopped
 };
 
-struct PP_Clock_Engine_Request {
-       unsigned long clientType;
-       unsigned long ctxid;
+struct pp_clock_engine_request {
+       unsigned long client_type;
+       unsigned long ctx_id;
        uint64_t  context_handle;
        unsigned long sclk;
-       unsigned long sclkHardMin;
+       unsigned long sclk_hard_min;
        unsigned long mclk;
        unsigned long iclk;
        unsigned long evclk;
        unsigned long ecclk;
-       unsigned long ecclkHardMin;
+       unsigned long ecclk_hard_min;
        unsigned long vclk;
        unsigned long dclk;
-       unsigned long samclk;
-       unsigned long acpclk;
-       unsigned long sclkOverdrive;
-       unsigned long mclkOverdrive;
+       unsigned long sclk_over_drive;
+       unsigned long mclk_over_drive;
        unsigned long sclk_threshold;
        unsigned long flag;
        unsigned long vclk_ceiling;
        unsigned long dclk_ceiling;
        unsigned long num_cus;
-       unsigned long pmflag;
-       enum PP_MMProfilingState MMProfilingState;
+       unsigned long pm_flag;
+       enum PP_MMProfilingState mm_profiling_state;
 };
 
 #endif
index d7d83b7c7f955148f8deb3c3ce6a108db40883d6..bfdbec10cdd5d9dddb2d55ec196684f3711b3b5f 100644 (file)
@@ -43,5 +43,8 @@
        } while (0)
 
 
+#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n)   \
+       (type *)((char *)&(ptr)->member + (sizeof(type) * (n)))
+
 #endif /* PP_DEBUG_H */
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
new file mode 100644 (file)
index 0000000..71c9b2d
--- /dev/null
@@ -0,0 +1,510 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_H
+#define SMU71_H
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__VARIANT__ICELAND 1
+#define SMU__DGPU_ONLY 1
+#define SMU__DYNAMIC_MCARB_SETTINGS 1
+
+enum SID_OPTION {
+  SID_OPTION_HI,
+  SID_OPTION_LO,
+  SID_OPTION_COUNT
+};
+
+typedef struct {
+  uint32_t high;
+  uint32_t low;
+} data_64_t;
+
+typedef struct {
+  data_64_t high;
+  data_64_t low;
+} data_128_t;
+
+#define SMU7_CONTEXT_ID_SMC        1
+#define SMU7_CONTEXT_ID_VBIOS      2
+
+#define SMU71_MAX_LEVELS_VDDC            8
+#define SMU71_MAX_LEVELS_VDDCI           4
+#define SMU71_MAX_LEVELS_MVDD            4
+#define SMU71_MAX_LEVELS_VDDNB           8
+
+#define SMU71_MAX_LEVELS_GRAPHICS        SMU__NUM_SCLK_DPM_STATE
+#define SMU71_MAX_LEVELS_MEMORY          SMU__NUM_MCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_GIO             SMU__NUM_LCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_LINK            SMU__NUM_PCIE_DPM_LEVELS
+#define SMU71_MAX_ENTRIES_SMIO           32
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL    0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL      0
+
+#define GPIO_CLAMP_MODE_VRHOT      1
+#define GPIO_CLAMP_MODE_THERM      2
+#define GPIO_CLAMP_MODE_DC         4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT  6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK   (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT  9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK   (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT  12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK   (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT  15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK   (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT  18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK   (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT  21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK   (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+#if defined SMU__DGPU_ONLY
+#define SMU71_DTE_ITERATIONS 5
+#define SMU71_DTE_SOURCES 3
+#define SMU71_DTE_SINKS 1
+#define SMU71_NUM_CPU_TES 0
+#define SMU71_NUM_GPU_TES 1
+#define SMU71_NUM_NON_TES 2
+
+#endif
+
+#if defined SMU__FUSION_ONLY
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+#endif
+
+struct SMU71_PIDController
+{
+    uint32_t Ki;
+    int32_t LFWindupUpperLim;
+    int32_t LFWindupLowerLim;
+    uint32_t StatePrecision;
+    uint32_t LfPrecision;
+    uint32_t LfOffset;
+    uint32_t MaxState;
+    uint32_t MaxLfFraction;
+    uint32_t StateShift;
+};
+
+typedef struct SMU71_PIDController SMU71_PIDController;
+
+struct SMU7_LocalDpmScoreboard
+{
+    uint32_t PercentageBusy;
+
+    int32_t  PIDError;
+    int32_t  PIDIntegral;
+    int32_t  PIDOutput;
+
+    uint32_t SigmaDeltaAccum;
+    uint32_t SigmaDeltaOutput;
+    uint32_t SigmaDeltaLevel;
+
+    uint32_t UtilizationSetpoint;
+
+    uint8_t  TdpClampMode;
+    uint8_t  TdcClampMode;
+    uint8_t  ThermClampMode;
+    uint8_t  VoltageBusy;
+
+    int8_t   CurrLevel;
+    int8_t   TargLevel;
+    uint8_t  LevelChangeInProgress;
+    uint8_t  UpHyst;
+
+    uint8_t  DownHyst;
+    uint8_t  VoltageDownHyst;
+    uint8_t  DpmEnable;
+    uint8_t  DpmRunning;
+
+    uint8_t  DpmForce;
+    uint8_t  DpmForceLevel;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+
+    uint32_t MinimumPerfSclk;
+
+    uint8_t  AcpiReq;
+    uint8_t  AcpiAck;
+    uint8_t  GfxClkSlow;
+    uint8_t  GpioClampMode;
+
+    uint8_t  FpsFilterWeight;
+    uint8_t  EnabledLevelsChange;
+    uint8_t  DteClampMode;
+    uint8_t  FpsClampMode;
+
+    uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS];
+    uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS];
+
+    void     (*TargetStateCalculator)(uint8_t);
+    void     (*SavedTargetStateCalculator)(uint8_t);
+
+    uint16_t AutoDpmInterval;
+    uint16_t AutoDpmRange;
+
+    uint8_t  FpsEnabled;
+    uint8_t  MaxPerfLevel;
+    uint8_t  AllowLowClkInterruptToHost;
+    uint8_t  FpsRunning;
+
+    uint32_t MaxAllowedFrequency;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+struct SMU7_VoltageScoreboard
+{
+    uint16_t CurrentVoltage;
+    uint16_t HighestVoltage;
+    uint16_t MaxVid;
+    uint8_t  HighestVidOffset;
+    uint8_t  CurrentVidOffset;
+#if defined (SMU__DGPU_ONLY)
+    uint8_t  CurrentPhases;
+    uint8_t  HighestPhases;
+#else
+    uint8_t  AvsOffset;
+    uint8_t  AvsOffsetApplied;
+#endif
+    uint8_t  ControllerBusy;
+    uint8_t  CurrentVid;
+    uint16_t RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+#if defined (SMU__DGPU_ONLY)
+    uint8_t  RequestedPhases[SMU7_MAX_VOLTAGE_CLIENTS];
+#endif
+    uint8_t  EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+    uint8_t  TargetIndex;
+    uint8_t  Delay;
+    uint8_t  ControllerEnable;
+    uint8_t  ControllerRunning;
+    uint16_t CurrentStdVoltageHiSidd;
+    uint16_t CurrentStdVoltageLoSidd;
+#if defined (SMU__DGPU_ONLY)
+    uint16_t RequestedVddci;
+    uint16_t CurrentVddci;
+    uint16_t HighestVddci;
+    uint8_t  CurrentVddciVid;
+    uint8_t  TargetVddciIndex;
+#endif
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+struct SMU7_PCIeLinkSpeedScoreboard
+{
+    uint8_t     DpmEnable;
+    uint8_t     DpmRunning;
+    uint8_t     DpmForce;
+    uint8_t     DpmForceLevel;
+
+    uint8_t     CurrentLinkSpeed;
+    uint8_t     EnabledLevelsChange;
+    uint16_t    AutoDpmInterval;
+
+    uint16_t    AutoDpmRange;
+    uint16_t    AutoDpmCount;
+
+    uint8_t     DpmMode;
+    uint8_t     AcpiReq;
+    uint8_t     AcpiAck;
+    uint8_t     CurrentLinkLevel;
+
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+// -------------------------------------------------------- CAC table ------------------------------------------------------
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I  7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard
+{
+    uint16_t   MinVoltage;
+    uint16_t   MaxVoltage;
+
+    uint32_t   AvgGpuPower;
+
+    uint16_t   VddcLeakagePower[SID_OPTION_COUNT];
+    uint16_t   VddcSclkConstantPower[SID_OPTION_COUNT];
+    uint16_t   VddcSclkDynamicPower[SID_OPTION_COUNT];
+    uint16_t   VddcNonSclkDynamicPower[SID_OPTION_COUNT];
+    uint16_t   VddcTotalPower[SID_OPTION_COUNT];
+    uint16_t   VddcTotalCurrent[SID_OPTION_COUNT];
+    uint16_t   VddcLoadVoltage[SID_OPTION_COUNT];
+    uint16_t   VddcNoLoadVoltage[SID_OPTION_COUNT];
+
+    uint16_t   DisplayPhyPower;
+    uint16_t   PciePhyPower;
+
+    uint16_t   VddciTotalPower;
+    uint16_t   Vddr1TotalPower;
+
+    uint32_t   RocPower;
+
+    uint32_t   last_power;
+    uint32_t   enableWinAvg;
+
+    uint32_t   lkg_acc;
+    uint16_t   VoltLkgeScaler;
+    uint16_t   TempLkgeScaler;
+
+    uint32_t   uvd_cac_dclk;
+    uint32_t   uvd_cac_vclk;
+    uint32_t   vce_cac_eclk;
+    uint32_t   samu_cac_samclk;
+    uint32_t   display_cac_dispclk;
+    uint32_t   acp_cac_aclk;
+    uint32_t   unb_cac;
+
+    uint32_t   WinTime;
+
+    uint16_t  GpuPwr_MAWt;
+    uint16_t  FilteredVddcTotalPower;
+
+    uint8_t   CalculationRepeats;
+    uint8_t   WaterfallUp;
+    uint8_t   WaterfallDown;
+    uint8_t   WaterfallLimit;
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+// --------------------------------------------------------------------------------------------------
+
+struct SMU7_ThermalScoreboard
+{
+   int16_t  GpuLimit;
+   int16_t  GpuHyst;
+   uint16_t CurrGnbTemp;
+   uint16_t FilteredGnbTemp;
+   uint8_t  ControllerEnable;
+   uint8_t  ControllerRunning;
+   uint8_t  WaterfallUp;
+   uint8_t  WaterfallDown;
+   uint8_t  WaterfallLimit;
+   uint8_t  padding[3];
+};
+
+typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
+
+// For FeatureEnables:
+#define SMU7_SCLK_DPM_CONFIG_MASK                        0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK              0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK              0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK                        0x08
+#define SMU7_UVD_DPM_CONFIG_MASK                         0x10
+#define SMU7_VCE_DPM_CONFIG_MASK                         0x20
+#define SMU7_ACP_DPM_CONFIG_MASK                         0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK                        0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK                    0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE                  0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE                  0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE                  0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE                  0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE                  0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE                  0x00020000
+
+// All 'soft registers' should be uint32_t.
+struct SMU71_SoftRegisters
+{
+    uint32_t        RefClockFrequency;
+    uint32_t        PmTimerPeriod;
+    uint32_t        FeatureEnables;
+#if defined (SMU__DGPU_ONLY)
+    uint32_t        PreVBlankGap;
+    uint32_t        VBlankTimeout;
+    uint32_t        TrainTimeGap;
+    uint32_t        MvddSwitchTime;
+    uint32_t        LongestAcpiTrainTime;
+    uint32_t        AcpiDelay;
+    uint32_t        G5TrainTime;
+    uint32_t        DelayMpllPwron;
+    uint32_t        VoltageChangeTimeout;
+#endif
+    uint32_t        HandshakeDisables;
+
+    uint8_t         DisplayPhy1Config;
+    uint8_t         DisplayPhy2Config;
+    uint8_t         DisplayPhy3Config;
+    uint8_t         DisplayPhy4Config;
+
+    uint8_t         DisplayPhy5Config;
+    uint8_t         DisplayPhy6Config;
+    uint8_t         DisplayPhy7Config;
+    uint8_t         DisplayPhy8Config;
+
+    uint32_t        AverageGraphicsActivity;
+    uint32_t        AverageMemoryActivity;
+    uint32_t        AverageGioActivity;
+
+    uint8_t         SClkDpmEnabledLevels;
+    uint8_t         MClkDpmEnabledLevels;
+    uint8_t         LClkDpmEnabledLevels;
+    uint8_t         PCIeDpmEnabledLevels;
+
+    uint32_t        DRAM_LOG_ADDR_H;
+    uint32_t        DRAM_LOG_ADDR_L;
+    uint32_t        DRAM_LOG_PHY_ADDR_H;
+    uint32_t        DRAM_LOG_PHY_ADDR_L;
+    uint32_t        DRAM_LOG_BUFF_SIZE;
+    uint32_t        UlvEnterCount;
+    uint32_t        UlvTime;
+    uint32_t        UcodeLoadStatus;
+    uint8_t         DPMFreezeAndForced;
+    uint8_t         Activity_Weight;
+    uint8_t         Reserved8[2];
+    uint32_t        Reserved;
+};
+
+typedef struct SMU71_SoftRegisters SMU71_SoftRegisters;
+
+struct SMU71_Firmware_Header
+{
+    uint32_t Digest[5];
+    uint32_t Version;
+    uint32_t HeaderSize;
+    uint32_t Flags;
+    uint32_t EntryPoint;
+    uint32_t CodeSize;
+    uint32_t ImageSize;
+
+    uint32_t Rtos;
+    uint32_t SoftRegisters;
+    uint32_t DpmTable;
+    uint32_t FanTable;
+    uint32_t CacConfigTable;
+    uint32_t CacStatusTable;
+
+    uint32_t mcRegisterTable;
+
+    uint32_t mcArbDramTimingTable;
+
+    uint32_t PmFuseTable;
+    uint32_t Globals;
+    uint32_t UvdDpmTable;
+    uint32_t AcpDpmTable;
+    uint32_t VceDpmTable;
+    uint32_t SamuDpmTable;
+    uint32_t UlvSettings;
+    uint32_t Reserved[37];
+    uint32_t Signature;
+};
+
+typedef struct SMU71_Firmware_Header SMU71_Firmware_Header;
+
+struct SMU7_HystController_Data
+{
+    uint8_t waterfall_up;
+    uint8_t waterfall_down;
+    uint8_t pstate;
+    uint8_t clamp_mode;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+#define SMU71_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum  DisplayConfig {
+    PowerDown = 1,
+    DP54x4,
+    DP54x2,
+    DP54x1,
+    DP27x4,
+    DP27x2,
+    DP27x1,
+    HDMI297,
+    HDMI162,
+    LVDS,
+    DP324x4,
+    DP324x2,
+    DP324x1
+};
+
+//#define SX_BLOCK_COUNT 8
+//#define MC_BLOCK_COUNT 1
+//#define CPL_BLOCK_COUNT 27
+
+#if defined SMU__VARIANT__ICELAND
+  #define SX_BLOCK_COUNT 8
+  #define MC_BLOCK_COUNT 1
+  #define CPL_BLOCK_COUNT 29
+#endif
+
+struct SMU7_Local_Cac {
+  uint8_t BlockId;
+  uint8_t SignalId;
+  uint8_t Threshold;
+  uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+  SMU7_Local_Cac SxLocalCac[SX_BLOCK_COUNT];
+  SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+  SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
new file mode 100644 (file)
index 0000000..c0e3936
--- /dev/null
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_DISCRETE_H
+#define SMU71_DISCRETE_H
+
+#include "smu71.h"
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define VDDC_ON_SVI2  0x1
+#define VDDCI_ON_SVI2 0x2
+#define MVDD_ON_SVI2  0x4
+
+struct SMU71_Discrete_VoltageLevel
+{
+    uint16_t    Voltage;
+    uint16_t    StdVoltageHiSidd;
+    uint16_t    StdVoltageLoSidd;
+    uint8_t     Smio;
+    uint8_t     padding;
+};
+
+typedef struct SMU71_Discrete_VoltageLevel SMU71_Discrete_VoltageLevel;
+
+struct SMU71_Discrete_GraphicsLevel
+{
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+
+    uint32_t    SclkFrequency;
+
+    uint8_t     pcieDpmLevel;
+    uint8_t     DeepSleepDivId;
+    uint16_t    ActivityLevel;
+
+    uint32_t    CgSpllFuncCntl3;
+    uint32_t    CgSpllFuncCntl4;
+    uint32_t    SpllSpreadSpectrum;
+    uint32_t    SpllSpreadSpectrum2;
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+    uint8_t     SclkDid;
+    uint8_t     DisplayWatermark;
+    uint8_t     EnabledForActivity;
+    uint8_t     EnabledForThrottle;
+    uint8_t     UpHyst;
+    uint8_t     DownHyst;
+    uint8_t     VoltageDownHyst;
+    uint8_t     PowerThrottle;
+};
+
+typedef struct SMU71_Discrete_GraphicsLevel SMU71_Discrete_GraphicsLevel;
+
+struct SMU71_Discrete_ACPILevel
+{
+    uint32_t    Flags;
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+    uint32_t    SclkFrequency;
+    uint8_t     SclkDid;
+    uint8_t     DisplayWatermark;
+    uint8_t     DeepSleepDivId;
+    uint8_t     padding;
+    uint32_t    CgSpllFuncCntl;
+    uint32_t    CgSpllFuncCntl2;
+    uint32_t    CgSpllFuncCntl3;
+    uint32_t    CgSpllFuncCntl4;
+    uint32_t    SpllSpreadSpectrum;
+    uint32_t    SpllSpreadSpectrum2;
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+};
+
+typedef struct SMU71_Discrete_ACPILevel SMU71_Discrete_ACPILevel;
+
+struct SMU71_Discrete_Ulv
+{
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+    uint16_t    VddcOffset;
+    uint8_t     VddcOffsetVid;
+    uint8_t     VddcPhase;
+    uint32_t    Reserved;
+};
+
+typedef struct SMU71_Discrete_Ulv SMU71_Discrete_Ulv;
+
+struct SMU71_Discrete_MemoryLevel
+{
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+    uint32_t    MinVddci;
+    uint32_t    MinMvdd;
+
+    uint32_t    MclkFrequency;
+
+    uint8_t     EdcReadEnable;
+    uint8_t     EdcWriteEnable;
+    uint8_t     RttEnable;
+    uint8_t     StutterEnable;
+
+    uint8_t     StrobeEnable;
+    uint8_t     StrobeRatio;
+    uint8_t     EnabledForThrottle;
+    uint8_t     EnabledForActivity;
+
+    uint8_t     UpHyst;
+    uint8_t     DownHyst;
+    uint8_t     VoltageDownHyst;
+    uint8_t     padding;
+
+    uint16_t    ActivityLevel;
+    uint8_t     DisplayWatermark;
+    uint8_t     padding1;
+
+    uint32_t    MpllFuncCntl;
+    uint32_t    MpllFuncCntl_1;
+    uint32_t    MpllFuncCntl_2;
+    uint32_t    MpllAdFuncCntl;
+    uint32_t    MpllDqFuncCntl;
+    uint32_t    MclkPwrmgtCntl;
+    uint32_t    DllCntl;
+    uint32_t    MpllSs1;
+    uint32_t    MpllSs2;
+};
+
+typedef struct SMU71_Discrete_MemoryLevel SMU71_Discrete_MemoryLevel;
+
+struct SMU71_Discrete_LinkLevel
+{
+    uint8_t     PcieGenSpeed;           ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3
+    uint8_t     PcieLaneCount;          ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
+    uint8_t     EnabledForActivity;
+    uint8_t     SPC;
+    uint32_t    DownThreshold;
+    uint32_t    UpThreshold;
+    uint32_t    Reserved;
+};
+
+typedef struct SMU71_Discrete_LinkLevel SMU71_Discrete_LinkLevel;
+
+
+#ifdef SMU__DYNAMIC_MCARB_SETTINGS
+// MC ARB DRAM Timing registers.
+struct SMU71_Discrete_MCArbDramTimingTableEntry
+{
+    uint32_t McArbDramTiming;
+    uint32_t McArbDramTiming2;
+    uint8_t  McArbBurstTime;
+    uint8_t  padding[3];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTableEntry SMU71_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU71_Discrete_MCArbDramTimingTable
+{
+    SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTable SMU71_Discrete_MCArbDramTimingTable;
+#endif
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU71_Discrete_UvdLevel
+{
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint16_t MinVddc;
+    uint8_t  MinVddcPhases;
+    uint8_t  VclkDivider;
+    uint8_t  DclkDivider;
+    uint8_t  padding[3];
+};
+
+typedef struct SMU71_Discrete_UvdLevel SMU71_Discrete_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU71_Discrete_ExtClkLevel
+{
+    uint32_t Frequency;
+    uint16_t MinVoltage;
+    uint8_t  MinPhases;
+    uint8_t  Divider;
+};
+
+typedef struct SMU71_Discrete_ExtClkLevel SMU71_Discrete_ExtClkLevel;
+
+// Everything that we need to keep track of about the current state.
+// Use this instead of copies of the GraphicsLevel and MemoryLevel structures to keep track of state parameters
+// that need to be checked later.
+// We don't need to cache everything about a state, just a few parameters.
+struct SMU71_Discrete_StateInfo
+{
+    uint32_t SclkFrequency;
+    uint32_t MclkFrequency;
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint32_t SamclkFrequency;
+    uint32_t AclkFrequency;
+    uint32_t EclkFrequency;
+    uint16_t MvddVoltage;
+    uint16_t padding16;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+    uint8_t  McRegIndex;
+    uint8_t  SeqIndex;
+    uint8_t  SclkDid;
+    int8_t   SclkIndex;
+    int8_t   MclkIndex;
+    uint8_t  PCIeGen;
+
+};
+
+typedef struct SMU71_Discrete_StateInfo SMU71_Discrete_StateInfo;
+
+
+struct SMU71_Discrete_DpmTable
+{
+    // Multi-DPM controller settings
+    SMU71_PIDController                  GraphicsPIDController;
+    SMU71_PIDController                  MemoryPIDController;
+    SMU71_PIDController                  LinkPIDController;
+
+    uint32_t                            SystemFlags;
+
+    // SMIO masks for voltage and phase controls
+    uint32_t                            SmioMaskVddcVid;
+    uint32_t                            SmioMaskVddcPhase;
+    uint32_t                            SmioMaskVddciVid;
+    uint32_t                            SmioMaskMvddVid;
+
+    uint32_t                            VddcLevelCount;
+    uint32_t                            VddciLevelCount;
+    uint32_t                            MvddLevelCount;
+
+    SMU71_Discrete_VoltageLevel          VddcLevel               [SMU71_MAX_LEVELS_VDDC];
+    SMU71_Discrete_VoltageLevel          VddciLevel              [SMU71_MAX_LEVELS_VDDCI];
+    SMU71_Discrete_VoltageLevel          MvddLevel               [SMU71_MAX_LEVELS_MVDD];
+
+    uint8_t                             GraphicsDpmLevelCount;
+    uint8_t                             MemoryDpmLevelCount;
+    uint8_t                             LinkLevelCount;
+    uint8_t                             MasterDeepSleepControl;
+
+    uint32_t                            Reserved[5];
+
+    // State table entries for each DPM state
+    SMU71_Discrete_GraphicsLevel         GraphicsLevel           [SMU71_MAX_LEVELS_GRAPHICS];
+    SMU71_Discrete_MemoryLevel           MemoryACPILevel;
+    SMU71_Discrete_MemoryLevel           MemoryLevel             [SMU71_MAX_LEVELS_MEMORY];
+    SMU71_Discrete_LinkLevel             LinkLevel               [SMU71_MAX_LEVELS_LINK];
+    SMU71_Discrete_ACPILevel             ACPILevel;
+
+    uint32_t                            SclkStepSize;
+    uint32_t                            Smio                    [SMU71_MAX_ENTRIES_SMIO];
+
+    uint8_t                             GraphicsBootLevel;
+    uint8_t                             GraphicsVoltageChangeEnable;
+    uint8_t                             GraphicsThermThrottleEnable;
+    uint8_t                             GraphicsInterval;
+
+    uint8_t                             VoltageInterval;
+    uint8_t                             ThermalInterval;
+    uint16_t                            TemperatureLimitHigh;
+
+    uint16_t                            TemperatureLimitLow;
+    uint8_t                             MemoryBootLevel;
+    uint8_t                             MemoryVoltageChangeEnable;
+
+    uint8_t                             MemoryInterval;
+    uint8_t                             MemoryThermThrottleEnable;
+    uint8_t                             MergedVddci;
+    uint8_t                             padding2;
+
+    uint16_t                            VoltageResponseTime;
+    uint16_t                            PhaseResponseTime;
+
+    uint8_t                             PCIeBootLinkLevel;
+    uint8_t                             PCIeGenInterval;
+    uint8_t                             DTEInterval;
+    uint8_t                             DTEMode;
+
+    uint8_t                             SVI2Enable;
+    uint8_t                             VRHotGpio;
+    uint8_t                             AcDcGpio;
+    uint8_t                             ThermGpio;
+
+    uint32_t                            DisplayCac;
+
+    uint16_t                            MaxPwr;
+    uint16_t                            NomPwr;
+
+    uint16_t                            FpsHighThreshold;
+    uint16_t                            FpsLowThreshold;
+
+    uint16_t                            BAPMTI_R  [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+    uint16_t                            BAPMTI_RC [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+
+    uint8_t                             DTEAmbientTempBase;
+    uint8_t                             DTETjOffset;
+    uint8_t                             GpuTjMax;
+    uint8_t                             GpuTjHyst;
+
+    uint16_t                            BootVddc;
+    uint16_t                            BootVddci;
+
+    uint16_t                            BootMVdd;
+    uint16_t                            padding;
+
+    uint32_t                            BAPM_TEMP_GRADIENT;
+
+    uint32_t                            LowSclkInterruptThreshold;
+    uint32_t                            VddGfxReChkWait;
+
+    uint16_t                            PPM_PkgPwrLimit;
+    uint16_t                            PPM_TemperatureLimit;
+
+    uint16_t                            DefaultTdp;
+    uint16_t                            TargetTdp;
+};
+
+typedef struct SMU71_Discrete_DpmTable SMU71_Discrete_DpmTable;
+
+// --------------------------------------------------- AC Timing Parameters ------------------------------------------------
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU71_MAX_LEVELS_MEMORY
+
+struct SMU71_Discrete_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMU71_Discrete_MCRegisterAddress SMU71_Discrete_MCRegisterAddress;
+
+struct SMU71_Discrete_MCRegisterSet
+{
+    uint32_t value[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU71_Discrete_MCRegisterSet SMU71_Discrete_MCRegisterSet;
+
+struct SMU71_Discrete_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMU71_Discrete_MCRegisterAddress     address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+    SMU71_Discrete_MCRegisterSet         data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU71_Discrete_MCRegisters SMU71_Discrete_MCRegisters;
+
+
+// --------------------------------------------------- Fan Table -----------------------------------------------------------
+struct SMU71_Discrete_FanTable
+{
+    uint16_t FdoMode;
+    int16_t  TempMin;
+    int16_t  TempMed;
+    int16_t  TempMax;
+    int16_t  Slope1;
+    int16_t  Slope2;
+    int16_t  FdoMin;
+    int16_t  HystUp;
+    int16_t  HystDown;
+    int16_t  HystSlope;
+    int16_t  TempRespLim;
+    int16_t  TempCurr;
+    int16_t  SlopeCurr;
+    int16_t  PwmCurr;
+    uint32_t RefreshPeriod;
+    int16_t  FdoMax;
+    uint8_t  TempSrc;
+    int8_t   Padding;
+};
+
+typedef struct SMU71_Discrete_FanTable SMU71_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG             4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT         (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+struct SMU71_MclkDpmScoreboard
+{
+
+    uint32_t PercentageBusy;
+
+    int32_t  PIDError;
+    int32_t  PIDIntegral;
+    int32_t  PIDOutput;
+
+    uint32_t SigmaDeltaAccum;
+    uint32_t SigmaDeltaOutput;
+    uint32_t SigmaDeltaLevel;
+
+    uint32_t UtilizationSetpoint;
+
+    uint8_t  TdpClampMode;
+    uint8_t  TdcClampMode;
+    uint8_t  ThermClampMode;
+    uint8_t  VoltageBusy;
+
+    int8_t   CurrLevel;
+    int8_t   TargLevel;
+    uint8_t  LevelChangeInProgress;
+    uint8_t  UpHyst;
+
+    uint8_t  DownHyst;
+    uint8_t  VoltageDownHyst;
+    uint8_t  DpmEnable;
+    uint8_t  DpmRunning;
+
+    uint8_t  DpmForce;
+    uint8_t  DpmForceLevel;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+
+    uint32_t MinimumPerfMclk;
+
+    uint8_t  AcpiReq;
+    uint8_t  AcpiAck;
+    uint8_t  MclkSwitchInProgress;
+    uint8_t  MclkSwitchCritical;
+
+    uint8_t  TargetMclkIndex;
+    uint8_t  TargetMvddIndex;
+    uint8_t  MclkSwitchResult;
+
+    uint8_t  EnabledLevelsChange;
+
+    uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_MEMORY];
+    uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_MEMORY];
+
+    void     (*TargetStateCalculator)(uint8_t);
+    void     (*SavedTargetStateCalculator)(uint8_t);
+
+    uint16_t AutoDpmInterval;
+    uint16_t AutoDpmRange;
+
+    uint16_t  MclkSwitchingTime;
+    uint8_t padding[2];
+};
+
+typedef struct SMU71_MclkDpmScoreboard SMU71_MclkDpmScoreboard;
+
+struct SMU71_UlvScoreboard
+{
+    uint8_t     EnterUlv;
+    uint8_t     ExitUlv;
+    uint8_t     UlvActive;
+    uint8_t     WaitingForUlv;
+    uint8_t     UlvEnable;
+    uint8_t     UlvRunning;
+    uint8_t     UlvMasterEnable;
+    uint8_t     padding;
+    uint32_t    UlvAbortedCount;
+    uint32_t    UlvTimeStamp;
+};
+
+typedef struct SMU71_UlvScoreboard SMU71_UlvScoreboard;
+
+struct SMU71_VddGfxScoreboard
+{
+    uint8_t     VddGfxEnable;
+    uint8_t     VddGfxActive;
+    uint8_t     padding[2];
+
+    uint32_t    VddGfxEnteredCount;
+    uint32_t    VddGfxAbortedCount;
+};
+
+typedef struct SMU71_VddGfxScoreboard SMU71_VddGfxScoreboard;
+
+struct SMU71_AcpiScoreboard {
+  uint32_t SavedInterruptMask[2];
+  uint8_t LastACPIRequest;
+  uint8_t CgBifResp;
+  uint8_t RequestType;
+  uint8_t Padding;
+  SMU71_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU71_AcpiScoreboard SMU71_AcpiScoreboard;
+
+
+struct SMU71_Discrete_PmFuses {
+  // dw0-dw1
+  uint8_t BapmVddCVidHiSidd[8];
+
+  // dw2-dw3
+  uint8_t BapmVddCVidLoSidd[8];
+
+  // dw4-dw5
+  uint8_t VddCVid[8];
+
+  // dw6
+  uint8_t SviLoadLineEn;
+  uint8_t SviLoadLineVddC;
+  uint8_t SviLoadLineTrimVddC;
+  uint8_t SviLoadLineOffsetVddC;
+
+  // dw7
+  uint16_t TDC_VDDC_PkgLimit;
+  uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+  uint8_t TDC_MAWt;
+
+  // dw8
+  uint8_t TdcWaterfallCtl;
+  uint8_t LPMLTemperatureMin;
+  uint8_t LPMLTemperatureMax;
+  uint8_t Reserved;
+
+  // dw9-dw12
+  uint8_t LPMLTemperatureScaler[16];
+
+  // dw13-dw14
+  int16_t FuzzyFan_ErrorSetDelta;
+  int16_t FuzzyFan_ErrorRateSetDelta;
+  int16_t FuzzyFan_PwmSetDelta;
+  uint16_t Reserved6;
+
+  // dw15
+  uint8_t GnbLPML[16];
+
+  // dw15
+  uint8_t GnbLPMLMaxVid;
+  uint8_t GnbLPMLMinVid;
+  uint8_t Reserved1[2];
+
+  // dw16
+  uint16_t BapmVddCBaseLeakageHiSidd;
+  uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU71_Discrete_PmFuses SMU71_Discrete_PmFuses;
+
+struct SMU71_Discrete_Log_Header_Table {
+  uint32_t    version;
+  uint32_t    asic_id;
+  uint16_t    flags;
+  uint16_t    entry_size;
+  uint32_t    total_size;
+  uint32_t    num_of_entries;
+  uint8_t     type;
+  uint8_t     mode;
+  uint8_t     filler_0[2];
+  uint32_t    filler_1[2];
+};
+
+typedef struct SMU71_Discrete_Log_Header_Table SMU71_Discrete_Log_Header_Table;
+
+struct SMU71_Discrete_Log_Cntl {
+    uint8_t             Enabled;
+    uint8_t             Type;
+    uint8_t             padding[2];
+    uint32_t            BufferSize;
+    uint32_t            SamplesLogged;
+    uint32_t            SampleSize;
+    uint32_t            AddrL;
+    uint32_t            AddrH;
+};
+
+typedef struct SMU71_Discrete_Log_Cntl SMU71_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+  #define CAC_ACC_NW_NUM_OF_SIGNALS 83
+#endif
+
+
+struct SMU71_Discrete_Cac_Collection_Table {
+  uint32_t temperature;
+  uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+  uint32_t filler[4];
+};
+
+typedef struct SMU71_Discrete_Cac_Collection_Table SMU71_Discrete_Cac_Collection_Table;
+
+struct SMU71_Discrete_Cac_Verification_Table {
+  uint32_t VddcTotalPower;
+  uint32_t VddcLeakagePower;
+  uint32_t VddcConstantPower;
+  uint32_t VddcGfxDynamicPower;
+  uint32_t VddcUvdDynamicPower;
+  uint32_t VddcVceDynamicPower;
+  uint32_t VddcAcpDynamicPower;
+  uint32_t VddcPcieDynamicPower;
+  uint32_t VddcDceDynamicPower;
+  uint32_t VddcCurrent;
+  uint32_t VddcVoltage;
+  uint32_t VddciTotalPower;
+  uint32_t VddciLeakagePower;
+  uint32_t VddciConstantPower;
+  uint32_t VddciDynamicPower;
+  uint32_t Vddr1TotalPower;
+  uint32_t Vddr1LeakagePower;
+  uint32_t Vddr1ConstantPower;
+  uint32_t Vddr1DynamicPower;
+  uint32_t spare[8];
+  uint32_t temperature;
+};
+
+typedef struct SMU71_Discrete_Cac_Verification_Table SMU71_Discrete_Cac_Verification_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
new file mode 100644 (file)
index 0000000..65eb630
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _PP_COMMON_H
+#define _PP_COMMON_H
+
+#include "smu7_ppsmc.h"
+#include "cgs_common.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+#include "smu74.h"
+#include "smu74_discrete.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
new file mode 100644 (file)
index 0000000..bce0009
--- /dev/null
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef DGPU_VI_PP_SMC_H
+#define DGPU_VI_PP_SMC_H
+
+
+#pragma pack(push, 1)
+
+#define PPSMC_MSG_SetGBDroopSettings          ((uint16_t) 0x305)
+
+#define PPSMC_SWSTATE_FLAG_DC                           0x01
+#define PPSMC_SWSTATE_FLAG_UVD                          0x02
+#define PPSMC_SWSTATE_FLAG_VCE                          0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL             0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL             0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE                 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC                        0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC                       0x02
+#define PPSMC_SYSTEMFLAG_GDDR5                          0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP               0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT                  0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG           0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK              0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK     0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE   0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE  0x01
+
+
+#define PPSMC_DPM2FLAGS_TDPCLMP                         0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT                         0x02
+#define PPSMC_DPM2FLAGS_OCP                             0x04
+
+
+#define PPSMC_DISPLAY_WATERMARK_LOW                     0
+#define PPSMC_DISPLAY_WATERMARK_HIGH                    1
+
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP    0x01
+#define PPSMC_STATEFLAG_POWERBOOST         0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
+#define PPSMC_STATEFLAG_POWERSHIFT         0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN   0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS   0x40
+
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+       FAN_CONTROL_FUZZY,
+       FAN_CONTROL_TABLE
+};
+
+
+#define PPSMC_Result_OK             ((uint16_t)0x01)
+#define PPSMC_Result_NoMore         ((uint16_t)0x02)
+
+#define PPSMC_Result_NotNow         ((uint16_t)0x03)
+#define PPSMC_Result_Failed         ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd     ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT      ((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+
+#define PPSMC_MSG_Halt                      ((uint16_t)0x10)
+#define PPSMC_MSG_Resume                    ((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel            ((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled        ((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled         ((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled         ((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt    ((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC               ((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp                   ((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown                 ((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters          ((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState           ((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast       ((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState      ((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel             ((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh                 ((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh         ((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower      ((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower    ((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac                 ((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac                ((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart          ((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop           ((uint16_t)0x56)
+#define PPSMC_CACHistoryStart               ((uint16_t)0x57)
+#define PPSMC_CACHistoryStop                ((uint16_t)0x58)
+#define PPSMC_TDPClampingActive             ((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive           ((uint16_t)0x5A)
+#define PPSMC_StartFanControl               ((uint16_t)0x5B)
+#define PPSMC_StopFanControl                ((uint16_t)0x5C)
+#define PPSMC_NoDisplay                     ((uint16_t)0x5D)
+#define PPSMC_HasDisplay                    ((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF               ((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON                ((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV                 ((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV                ((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV                  ((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV                   ((uint16_t)0x65)
+#define PPSMC_PowerShiftActive              ((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive            ((uint16_t)0x6B)
+#define PPSMC_OCPActive                     ((uint16_t)0x6C)
+#define PPSMC_OCPInactive                   ((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable          ((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable         ((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start  ((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop   ((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState    ((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState       ((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start       ((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop        ((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState   ((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState  ((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest        ((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping          ((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln   ((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib    ((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly         ((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
+
+#define PPSMC_MSG_ExtremitiesTest_Start     ((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop      ((uint16_t)0x7F)
+#define PPSMC_FlushDataCache                ((uint16_t)0x80)
+#define PPSMC_FlushInstrCache               ((uint16_t)0x81)
+
+#define PPSMC_MSG_SetEnabledLevels          ((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels           ((uint16_t)0x83)
+
+#define PPSMC_MSG_ResetToDefaults           ((uint16_t)0x84)
+
+#define PPSMC_MSG_SetForcedLevelsAndJump      ((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode           ((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE                   ((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE                  ((uint16_t)0x88)
+
+#define PPSMC_MSG_SmcSpaceSetAddress          ((uint16_t)0x89)
+#define PPSM_MSG_SmcSpaceWriteDWordInc        ((uint16_t)0x8A)
+#define PPSM_MSG_SmcSpaceWriteWordInc         ((uint16_t)0x8B)
+#define PPSM_MSG_SmcSpaceWriteByteInc         ((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK                       ((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test                        ((uint16_t) 0x100)
+#define PPSMC_MSG_DPM_Voltage_Pwrmgt          ((uint16_t) 0x101)
+#define PPSMC_MSG_DPM_Config                  ((uint16_t) 0x102)
+#define PPSMC_MSG_PM_Controller_Start         ((uint16_t) 0x103)
+#define PPSMC_MSG_DPM_ForceState              ((uint16_t) 0x104)
+#define PPSMC_MSG_PG_PowerDownSIMD            ((uint16_t) 0x105)
+#define PPSMC_MSG_PG_PowerUpSIMD              ((uint16_t) 0x106)
+#define PPSMC_MSG_PM_Controller_Stop          ((uint16_t) 0x107)
+#define PPSMC_MSG_PG_SIMD_Config              ((uint16_t) 0x108)
+#define PPSMC_MSG_Voltage_Cntl_Enable         ((uint16_t) 0x109)
+#define PPSMC_MSG_Thermal_Cntl_Enable         ((uint16_t) 0x10a)
+#define PPSMC_MSG_Reset_Service               ((uint16_t) 0x10b)
+#define PPSMC_MSG_VCEPowerOFF                 ((uint16_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON                  ((uint16_t) 0x10f)
+#define PPSMC_MSG_DPM_Disable_VCE_HS          ((uint16_t) 0x110)
+#define PPSMC_MSG_DPM_Enable_VCE_HS           ((uint16_t) 0x111)
+#define PPSMC_MSG_DPM_N_LevelsDisabled        ((uint16_t) 0x112)
+#define PPSMC_MSG_DCEPowerOFF                 ((uint16_t) 0x113)
+#define PPSMC_MSG_DCEPowerON                  ((uint16_t) 0x114)
+#define PPSMC_MSG_PCIE_DDIPowerDown           ((uint16_t) 0x117)
+#define PPSMC_MSG_PCIE_DDIPowerUp             ((uint16_t) 0x118)
+#define PPSMC_MSG_PCIE_CascadePLLPowerDown    ((uint16_t) 0x119)
+#define PPSMC_MSG_PCIE_CascadePLLPowerUp      ((uint16_t) 0x11a)
+#define PPSMC_MSG_SYSPLLPowerOff              ((uint16_t) 0x11b)
+#define PPSMC_MSG_SYSPLLPowerOn               ((uint16_t) 0x11c)
+#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
+#define PPSMC_MSG_DCE_AllowVoltageAdjustment  ((uint16_t) 0x11e)
+#define PPSMC_MSG_DISPLAYPHYStatusNotify      ((uint16_t) 0x11f)
+#define PPSMC_MSG_EnableBAPM                  ((uint16_t) 0x120)
+#define PPSMC_MSG_DisableBAPM                 ((uint16_t) 0x121)
+#define PPSMC_MSG_Spmi_Enable                 ((uint16_t) 0x122)
+#define PPSMC_MSG_Spmi_Timer                  ((uint16_t) 0x123)
+#define PPSMC_MSG_LCLK_DPM_Config             ((uint16_t) 0x124)
+#define PPSMC_MSG_VddNB_Request               ((uint16_t) 0x125)
+#define PPSMC_MSG_PCIE_DDIPhyPowerDown        ((uint32_t) 0x126)
+#define PPSMC_MSG_PCIE_DDIPhyPowerUp          ((uint32_t) 0x127)
+#define PPSMC_MSG_MCLKDPM_Config              ((uint16_t) 0x128)
+
+#define PPSMC_MSG_UVDDPM_Config               ((uint16_t) 0x129)
+#define PPSMC_MSG_VCEDPM_Config               ((uint16_t) 0x12A)
+#define PPSMC_MSG_ACPDPM_Config               ((uint16_t) 0x12B)
+#define PPSMC_MSG_SAMUDPM_Config              ((uint16_t) 0x12C)
+#define PPSMC_MSG_UVDDPM_SetEnabledMask       ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask       ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask       ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask      ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState          ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel       ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable        ((uint16_t) 0x133)
+#define PPSMC_MSG_SetTDPLimit                 ((uint16_t) 0x134)
+#define PPSMC_MSG_Voltage_Cntl_Disable        ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable              ((uint16_t) 0x136)
+#define PPSMC_MSG_ACPPowerOFF                 ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON                  ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF                 ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON                  ((uint16_t) 0x13a)
+#define PPSMC_MSG_SDMAPowerOFF                ((uint16_t) 0x13b)
+#define PPSMC_MSG_SDMAPowerON                 ((uint16_t) 0x13c)
+#define PPSMC_MSG_PCIeDPM_Disable             ((uint16_t) 0x13d)
+#define PPSMC_MSG_IOMMUPowerOFF               ((uint16_t) 0x13e)
+#define PPSMC_MSG_IOMMUPowerON                ((uint16_t) 0x13f)
+#define PPSMC_MSG_NBDPM_Enable                ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable               ((uint16_t) 0x141)
+#define PPSMC_MSG_NBDPM_ForceNominal          ((uint16_t) 0x142)
+#define PPSMC_MSG_NBDPM_ForcePerformance      ((uint16_t) 0x143)
+#define PPSMC_MSG_NBDPM_UnForce               ((uint16_t) 0x144)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask      ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask      ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel          ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel        ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt     ((uint16_t) 0x149)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt    ((uint16_t) 0x14a)
+#define PPSMC_MSG_SwitchToAC                  ((uint16_t) 0x14b)
+#define PPSMC_MSG_XDMAPowerOFF                ((uint16_t) 0x14c)
+#define PPSMC_MSG_XDMAPowerON                 ((uint16_t) 0x14d)
+
+#define PPSMC_MSG_DPM_Enable                  ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable                 ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable              ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable             ((uint16_t) 0x151)
+#define PPSMC_MSG_LCLKDPM_Enable              ((uint16_t) 0x152)
+#define PPSMC_MSG_LCLKDPM_Disable             ((uint16_t) 0x153)
+#define PPSMC_MSG_UVDDPM_Enable               ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable              ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable              ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable             ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable               ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable              ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable               ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable              ((uint16_t) 0x15b)
+#define PPSMC_MSG_LCLKDPM_SetEnabledMask      ((uint16_t) 0x15c)
+#define PPSMC_MSG_DPM_FPS_Mode                ((uint16_t) 0x15d)
+#define PPSMC_MSG_DPM_Activity_Mode           ((uint16_t) 0x15e)
+#define PPSMC_MSG_VddC_Request                ((uint16_t) 0x15f)
+#define PPSMC_MSG_MCLKDPM_GetEnabledMask      ((uint16_t) 0x160)
+#define PPSMC_MSG_LCLKDPM_GetEnabledMask      ((uint16_t) 0x161)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask      ((uint16_t) 0x162)
+#define PPSMC_MSG_UVDDPM_GetEnabledMask       ((uint16_t) 0x163)
+#define PPSMC_MSG_SAMUDPM_GetEnabledMask      ((uint16_t) 0x164)
+#define PPSMC_MSG_ACPDPM_GetEnabledMask       ((uint16_t) 0x165)
+#define PPSMC_MSG_VCEDPM_GetEnabledMask       ((uint16_t) 0x166)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask      ((uint16_t) 0x167)
+#define PPSMC_MSG_PCIeDPM_GetEnabledMask      ((uint16_t) 0x168)
+#define PPSMC_MSG_TDCLimitEnable              ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable             ((uint16_t) 0x16a)
+#define PPSMC_MSG_DPM_AutoRotate_Mode         ((uint16_t) 0x16b)
+#define PPSMC_MSG_DISPCLK_FROM_FCH            ((uint16_t) 0x16c)
+#define PPSMC_MSG_DISPCLK_FROM_DFS            ((uint16_t) 0x16d)
+#define PPSMC_MSG_DPREFCLK_FROM_FCH           ((uint16_t) 0x16e)
+#define PPSMC_MSG_DPREFCLK_FROM_DFS           ((uint16_t) 0x16f)
+#define PPSMC_MSG_PmStatusLogStart            ((uint16_t) 0x170)
+#define PPSMC_MSG_PmStatusLogSample           ((uint16_t) 0x171)
+#define PPSMC_MSG_SCLK_AutoDPM_ON             ((uint16_t) 0x172)
+#define PPSMC_MSG_MCLK_AutoDPM_ON             ((uint16_t) 0x173)
+#define PPSMC_MSG_LCLK_AutoDPM_ON             ((uint16_t) 0x174)
+#define PPSMC_MSG_UVD_AutoDPM_ON              ((uint16_t) 0x175)
+#define PPSMC_MSG_SAMU_AutoDPM_ON             ((uint16_t) 0x176)
+#define PPSMC_MSG_ACP_AutoDPM_ON              ((uint16_t) 0x177)
+#define PPSMC_MSG_VCE_AutoDPM_ON              ((uint16_t) 0x178)
+#define PPSMC_MSG_PCIe_AutoDPM_ON             ((uint16_t) 0x179)
+#define PPSMC_MSG_MASTER_AutoDPM_ON           ((uint16_t) 0x17a)
+#define PPSMC_MSG_MASTER_AutoDPM_OFF          ((uint16_t) 0x17b)
+#define PPSMC_MSG_DYNAMICDISPPHYPOWER         ((uint16_t) 0x17c)
+#define PPSMC_MSG_CAC_COLLECTION_ON           ((uint16_t) 0x17d)
+#define PPSMC_MSG_CAC_COLLECTION_OFF          ((uint16_t) 0x17e)
+#define PPSMC_MSG_CAC_CORRELATION_ON          ((uint16_t) 0x17f)
+#define PPSMC_MSG_CAC_CORRELATION_OFF         ((uint16_t) 0x180)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON        ((uint16_t) 0x181)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF       ((uint16_t) 0x182)
+#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT     ((uint16_t) 0x184)
+#define PPSMC_MSG_PkgPwrLimitEnable           ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable          ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit              ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp       ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel         ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel       ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel         ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel       ((uint16_t) 0x18C)
+#define PPSMC_MSG_START_DRAM_LOGGING          ((uint16_t) 0x18D)
+#define PPSMC_MSG_STOP_DRAM_LOGGING           ((uint16_t) 0x18E)
+#define PPSMC_MSG_MASTER_DeepSleep_ON         ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF        ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp             ((uint16_t) 0x191)
+#define PPSMC_MSG_DisableACDCGPIOInterrupt    ((uint16_t) 0x192)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddc       ((uint16_t) 0x193)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddci      ((uint16_t) 0x194)
+#define PPSMC_MSG_SetVidOffset_1              ((uint16_t) 0x195)
+#define PPSMC_MSG_SetVidOffset_2              ((uint16_t) 0x207)
+#define PPSMC_MSG_GetVidOffset_1              ((uint16_t) 0x196)
+#define PPSMC_MSG_GetVidOffset_2              ((uint16_t) 0x208)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable    ((uint16_t) 0x197)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable   ((uint16_t) 0x198)
+#define PPSMC_MSG_SetTjMax                    ((uint16_t) 0x199)
+#define PPSMC_MSG_SetFanPwmMax                ((uint16_t) 0x19A)
+#define PPSMC_MSG_WaitForMclkSwitchFinish     ((uint16_t) 0x19B)
+#define PPSMC_MSG_ENABLE_THERMAL_DPM          ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM         ((uint16_t) 0x19D)
+
+#define PPSMC_MSG_API_GetSclkFrequency        ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency        ((uint16_t) 0x201)
+#define PPSMC_MSG_API_GetSclkBusy             ((uint16_t) 0x202)
+#define PPSMC_MSG_API_GetMclkBusy             ((uint16_t) 0x203)
+#define PPSMC_MSG_API_GetAsicPower            ((uint16_t) 0x204)
+#define PPSMC_MSG_SetFanRpmMax                ((uint16_t) 0x205)
+#define PPSMC_MSG_SetFanSclkTarget            ((uint16_t) 0x206)
+#define PPSMC_MSG_SetFanMinPwm                ((uint16_t) 0x209)
+#define PPSMC_MSG_SetFanTemperatureTarget     ((uint16_t) 0x20A)
+
+#define PPSMC_MSG_BACO_StartMonitor           ((uint16_t) 0x240)
+#define PPSMC_MSG_BACO_Cancel                 ((uint16_t) 0x241)
+#define PPSMC_MSG_EnableVddGfx                ((uint16_t) 0x242)
+#define PPSMC_MSG_DisableVddGfx               ((uint16_t) 0x243)
+#define PPSMC_MSG_UcodeAddressLow             ((uint16_t) 0x244)
+#define PPSMC_MSG_UcodeAddressHigh            ((uint16_t) 0x245)
+#define PPSMC_MSG_UcodeLoadStatus             ((uint16_t) 0x246)
+
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI            ((uint16_t) 0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO            ((uint16_t) 0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI            ((uint16_t) 0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO            ((uint16_t) 0x253)
+#define PPSMC_MSG_LoadUcodes                  ((uint16_t) 0x254)
+#define PPSMC_MSG_PowerStateNotify            ((uint16_t) 0x255)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI      ((uint16_t) 0x256)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO      ((uint16_t) 0x257)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI          ((uint16_t) 0x258)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO          ((uint16_t) 0x259)
+#define PPSMC_MSG_LoadVBios                   ((uint16_t) 0x25A)
+#define PPSMC_MSG_GetUcodeVersion             ((uint16_t) 0x25B)
+#define DMCUSMC_MSG_PSREntry                  ((uint16_t) 0x25C)
+#define DMCUSMC_MSG_PSRExit                   ((uint16_t) 0x25D)
+#define PPSMC_MSG_EnableClockGatingFeature    ((uint16_t) 0x260)
+#define PPSMC_MSG_DisableClockGatingFeature   ((uint16_t) 0x261)
+#define PPSMC_MSG_IsDeviceRunning             ((uint16_t) 0x262)
+#define PPSMC_MSG_LoadMetaData                ((uint16_t) 0x263)
+#define PPSMC_MSG_TMON_AutoCaliberate_Enable  ((uint16_t) 0x264)
+#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
+#define PPSMC_MSG_GetTelemetry1Slope          ((uint16_t) 0x266)
+#define PPSMC_MSG_GetTelemetry1Offset         ((uint16_t) 0x267)
+#define PPSMC_MSG_GetTelemetry2Slope          ((uint16_t) 0x268)
+#define PPSMC_MSG_GetTelemetry2Offset         ((uint16_t) 0x269)
+#define PPSMC_MSG_EnableAvfs                  ((uint16_t) 0x26A)
+#define PPSMC_MSG_DisableAvfs                 ((uint16_t) 0x26B)
+
+#define PPSMC_MSG_PerformBtc                  ((uint16_t) 0x26C)
+#define PPSMC_MSG_VftTableIsValid             ((uint16_t) 0x275)
+#define PPSMC_MSG_UseNewGPIOScheme            ((uint16_t) 0x277)
+#define PPSMC_MSG_GetEnabledPsm               ((uint16_t) 0x400)
+#define PPSMC_MSG_AgmStartPsm                 ((uint16_t) 0x401)
+#define PPSMC_MSG_AgmReadPsm                  ((uint16_t) 0x402)
+#define PPSMC_MSG_AgmResetPsm                 ((uint16_t) 0x403)
+#define PPSMC_MSG_ReadVftCell                 ((uint16_t) 0x404)
+
+#define PPSMC_MSG_GFX_CU_PG_ENABLE            ((uint16_t) 0x280)
+#define PPSMC_MSG_GFX_CU_PG_DISABLE           ((uint16_t) 0x281)
+#define PPSMC_MSG_GetCurrPkgPwr               ((uint16_t) 0x282)
+
+#define PPSMC_MSG_SetGpuPllDfsForSclk         ((uint16_t) 0x300)
+#define PPSMC_MSG_Didt_Block_Function            ((uint16_t) 0x301)
+
+#define PPSMC_MSG_SetVBITimeout               ((uint16_t) 0x306)
+
+#define PPSMC_MSG_SecureSRBMWrite             ((uint16_t) 0x600)
+#define PPSMC_MSG_SecureSRBMRead              ((uint16_t) 0x601)
+#define PPSMC_MSG_SetAddress                  ((uint16_t) 0x800)
+#define PPSMC_MSG_GetData                     ((uint16_t) 0x801)
+#define PPSMC_MSG_SetData                     ((uint16_t) 0x802)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL          0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT     0x00000002
+#define PPSMC_EVENT_STATUS_DC               0x00000004
+
+#pragma pack(pop)
+
+#endif
+
index 3c235f0177cd9e0667b9d0deab7860efec7ac061..2139072065cc7da6da4b5bf65fbba19c37b967ca 100644 (file)
@@ -28,6 +28,7 @@
 
 struct pp_smumgr;
 struct pp_instance;
+struct pp_hwmgr;
 
 #define smu_lower_32_bits(n) ((uint32_t)(n))
 #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
@@ -53,6 +54,45 @@ enum AVFS_BTC_STATUS {
        AVFS_BTC_SMUMSG_ERROR
 };
 
+enum SMU_TABLE {
+       SMU_UVD_TABLE = 0,
+       SMU_VCE_TABLE,
+       SMU_SAMU_TABLE,
+       SMU_BIF_TABLE,
+};
+
+enum SMU_TYPE {
+       SMU_SoftRegisters = 0,
+       SMU_Discrete_DpmTable,
+};
+
+enum SMU_MEMBER {
+       HandshakeDisables = 0,
+       VoltageChangeTimeout,
+       AverageGraphicsActivity,
+       PreVBlankGap,
+       VBlankTimeout,
+       UcodeLoadStatus,
+       UvdBootLevel,
+       VceBootLevel,
+       SamuBootLevel,
+       LowSclkInterruptThreshold,
+};
+
+
+enum SMU_MAC_DEFINITION {
+       SMU_MAX_LEVELS_GRAPHICS = 0,
+       SMU_MAX_LEVELS_MEMORY,
+       SMU_MAX_LEVELS_LINK,
+       SMU_MAX_ENTRIES_SMIO,
+       SMU_MAX_LEVELS_VDDC,
+       SMU_MAX_LEVELS_VDDGFX,
+       SMU_MAX_LEVELS_VDDCI,
+       SMU_MAX_LEVELS_MVDD,
+       SMU_UVD_MCLK_HANDSHAKE_DISABLE,
+};
+
+
 struct pp_smumgr_func {
        int (*smu_init)(struct pp_smumgr *smumgr);
        int (*smu_fini)(struct pp_smumgr *smumgr);
@@ -69,12 +109,23 @@ struct pp_smumgr_func {
        int (*download_pptable_settings)(struct pp_smumgr *smumgr,
                                         void **table);
        int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
+       int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
+       int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
+       int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
+       int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
+       int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
+       int (*init_smc_table)(struct pp_hwmgr *hwmgr);
+       int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
+       int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
+       int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
+       uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
+       uint32_t (*get_mac_definition)(uint32_t value);
+       bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
 };
 
 struct pp_smumgr {
        uint32_t chip_family;
        uint32_t chip_id;
-       uint32_t hw_revision;
        void *device;
        void *backend;
        uint32_t usec_timeout;
@@ -122,6 +173,30 @@ extern int smu_allocate_memory(void *device, uint32_t size,
 
 extern int smu_free_memory(void *device, void *handle);
 
+extern int cz_smum_init(struct pp_smumgr *smumgr);
+extern int iceland_smum_init(struct pp_smumgr *smumgr);
+extern int tonga_smum_init(struct pp_smumgr *smumgr);
+extern int fiji_smum_init(struct pp_smumgr *smumgr);
+extern int polaris10_smum_init(struct pp_smumgr *smumgr);
+
+extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+
+extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
+extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result);
+extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result);
+extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
+extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
+                               uint32_t type, uint32_t member);
+extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
+
+extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
+
 #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 
 #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
index f10fb64ef9813820c3b0e42060c985ac3e80620d..51ff08301651740237327f3a14feb4bb6201bc3b 100644 (file)
@@ -2,7 +2,9 @@
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
 
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
+         polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
+         smu7_smumgr.o iceland_smc.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
index 87c023e518ab4e7b7f7a881ebb970dc560095607..5a44485526d2668f35c5fbad98d6d0196828223b 100644 (file)
@@ -89,13 +89,8 @@ static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
        if (result != 0)
                return result;
 
-       result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+       return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
                                        SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-
-       if (result != 0)
-               return result;
-
-       return 0;
 }
 
 static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
@@ -106,12 +101,12 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
 
        if (0 != (3 & smc_address)) {
                printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
-               return -1;
+               return -EINVAL;
        }
 
        if (limit <= (smc_address + 3)) {
                printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
-               return -1;
+               return -EINVAL;
        }
 
        cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
@@ -129,9 +124,10 @@ static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
                return -EINVAL;
 
        result = cz_set_smc_sram_address(smumgr, smc_address, limit);
-       cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+       if (!result)
+               cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
 
-       return 0;
+       return result;
 }
 
 static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
@@ -148,7 +144,6 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
 static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
 {
        struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
-       int result = 0;
        uint32_t smc_address;
 
        if (!smumgr->reload_fw) {
@@ -177,11 +172,9 @@ static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
        cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
                                cz_smu->toc_entry_power_profiling_index);
 
-       result = cz_send_msg_to_smc_with_parameter(smumgr,
+       return cz_send_msg_to_smc_with_parameter(smumgr,
                                        PPSMC_MSG_ExecuteJob,
                                        cz_smu->toc_entry_initialize_index);
-
-       return result;
 }
 
 static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
@@ -195,9 +188,6 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
        if (smumgr == NULL || smumgr->device == NULL)
                return -EINVAL;
 
-       return cgs_read_register(smumgr->device,
-                                       mmSMU_MP1_SRBM2P_ARG_0);
-
        cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
 
        for (i = 0; i < smumgr->usec_timeout; i++) {
@@ -275,7 +265,10 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
        if (smumgr->chip_id == CHIP_STONEY)
                fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
 
-       cz_request_smu_load_fw(smumgr);
+       ret = cz_request_smu_load_fw(smumgr);
+       if (ret)
+               printk(KERN_ERR "[ powerplay] SMU firmware load failed\n");
+
        cz_check_fw_load_finish(smumgr, fw_to_check);
 
        ret = cz_load_mec_firmware(smumgr);
@@ -566,10 +559,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
 
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-       if (smumgr->chip_id == CHIP_STONEY)
-               cz_smu_populate_single_ucode_load_task(smumgr,
-                               CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-       else
+       if (smumgr->chip_id != CHIP_STONEY)
                cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
@@ -580,10 +570,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-       if (smumgr->chip_id == CHIP_STONEY)
-               cz_smu_populate_single_ucode_load_task(smumgr,
-                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-       else
+       if (smumgr->chip_id != CHIP_STONEY)
                cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
@@ -610,19 +597,12 @@ static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
        struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
 
        cz_smu->toc_entry_used_count = 0;
-
        cz_smu_initialize_toc_empty_job_list(smumgr);
-
        cz_smu_construct_toc_for_rlc_aram_save(smumgr);
-
        cz_smu_construct_toc_for_vddgfx_enter(smumgr);
-
        cz_smu_construct_toc_for_vddgfx_exit(smumgr);
-
        cz_smu_construct_toc_for_power_profiling(smumgr);
-
        cz_smu_construct_toc_for_bootup(smumgr);
-
        cz_smu_construct_toc_for_clock_table(smumgr);
 
        return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
new file mode 100644 (file)
index 0000000..76310ac
--- /dev/null
@@ -0,0 +1,2374 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "fiji_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "fiji_smumgr.h"
+#include "pppcielanes.h"
+#include "smu7_ppsmc.h"
+#include "smu73.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "smu7_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define VDDC_VDDCI_DELTA            300
+#define MC_CG_ARB_FREQ_F1           0x0b
+
+/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
+ * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
+ */
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
+                               {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and
+ * [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
+       { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+       { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
+ * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
+ */
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
+                               {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+               /*sviLoadLIneEn,  SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
+               {1,               0xF,             0xFD,
+               /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
+               0x19,        5,               45}
+};
+
+/* PPGen has the gain setting generated in x * 100 unit
+ * This function is to convert the unit to x * 4096(0x1000) unit.
+ *  This is the unit expected by SMC firmware
+ */
+static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+               struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+               uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
+{
+       uint32_t i;
+       uint16_t vddci;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       *voltage = *mvdd = 0;
+
+
+       /* clock - voltage dependency table is empty table */
+       if (dep_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < dep_table->count; i++) {
+               /* find first sclk bigger than request */
+               if (dep_table->entries[i].clk >= clock) {
+                       *voltage |= (dep_table->entries[i].vddc *
+                                       VOLTAGE_SCALE) << VDDC_SHIFT;
+                       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+                               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       else if (dep_table->entries[i].vddci)
+                               *voltage |= (dep_table->entries[i].vddci *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       else {
+                               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               (dep_table->entries[i].vddc -
+                                                               VDDC_VDDCI_DELTA));
+                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       }
+
+                       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+                               *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+                                       VOLTAGE_SCALE;
+                       else if (dep_table->entries[i].mvdd)
+                               *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+                                       VOLTAGE_SCALE;
+
+                       *voltage |= 1 << PHASES_SHIFT;
+                       return 0;
+               }
+       }
+
+       /* sclk is bigger than max sclk in the dependence table */
+       *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+       else if (dep_table->entries[i-1].vddci) {
+               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                               (dep_table->entries[i].vddc -
+                                               VDDC_VDDCI_DELTA));
+               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+       }
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+               *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+       else if (dep_table->entries[i].mvdd)
+               *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+       return 0;
+}
+
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+       uint32_t tmp;
+       tmp = raw_setting * 4096 / 100;
+       return (uint16_t)tmp;
+}
+
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
+{
+       switch (line) {
+       case SMU7_I2CLineID_DDC1:
+               *scl = SMU7_I2C_DDC1CLK;
+               *sda = SMU7_I2C_DDC1DATA;
+               break;
+       case SMU7_I2CLineID_DDC2:
+               *scl = SMU7_I2C_DDC2CLK;
+               *sda = SMU7_I2C_DDC2DATA;
+               break;
+       case SMU7_I2CLineID_DDC3:
+               *scl = SMU7_I2C_DDC3CLK;
+               *sda = SMU7_I2C_DDC3DATA;
+               break;
+       case SMU7_I2CLineID_DDC4:
+               *scl = SMU7_I2C_DDC4CLK;
+               *sda = SMU7_I2C_DDC4DATA;
+               break;
+       case SMU7_I2CLineID_DDC5:
+               *scl = SMU7_I2C_DDC5CLK;
+               *sda = SMU7_I2C_DDC5DATA;
+               break;
+       case SMU7_I2CLineID_DDC6:
+               *scl = SMU7_I2C_DDC6CLK;
+               *sda = SMU7_I2C_DDC6DATA;
+               break;
+       case SMU7_I2CLineID_SCLSDA:
+               *scl = SMU7_I2C_SCL;
+               *sda = SMU7_I2C_SDA;
+               break;
+       case SMU7_I2CLineID_DDCVGA:
+               *scl = SMU7_I2C_DDCVGACLK;
+               *sda = SMU7_I2C_DDCVGADATA;
+               break;
+       default:
+               *scl = 0;
+               *sda = 0;
+               break;
+       }
+}
+
+static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (table_info &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID)
+               smu_data->power_tune_defaults =
+                               &fiji_power_tune_data_set_array
+                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+       else
+               smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
+
+}
+
+static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       SMU73_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+       struct pp_advance_fan_control_parameters *fan_table =
+                       &hwmgr->thermal_controller.advanceFanControlParameters;
+       uint8_t uc_scl, uc_sda;
+
+       /* TDP number of fraction bits are changed from 8 to 7 for Fiji
+        * as requested by SMC team
+        */
+       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+                       (uint16_t)(cac_dtp_table->usTDP * 128));
+       dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+                       (uint16_t)(cac_dtp_table->usTDP * 128));
+
+       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+                       "Target Operating Temp is out of Range!",
+                       );
+
+       dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+       dpm_table->GpuTjHyst = 8;
+
+       dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
+
+       /* The following are for new Fiji Multi-input fan/thermal control */
+       dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTargetOperatingTemp * 256);
+       dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitHotspot * 256);
+       dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitLiquid1 * 256);
+       dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitLiquid2 * 256);
+       dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitVrVddc * 256);
+       dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitVrMvdd * 256);
+       dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitPlx * 256);
+
+       dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainEdge));
+       dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainHotspot));
+       dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainLiquid));
+       dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainVrVddc));
+       dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
+       dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainPlx));
+       dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainHbm));
+
+       dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
+       dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
+       dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
+       dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
+
+       get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
+       dpm_table->Liquid_I2C_LineSCL = uc_scl;
+       dpm_table->Liquid_I2C_LineSDA = uc_sda;
+
+       get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
+       dpm_table->Vr_I2C_LineSCL = uc_scl;
+       dpm_table->Vr_I2C_LineSDA = uc_sda;
+
+       get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
+       dpm_table->Plx_I2C_LineSCL = uc_scl;
+       dpm_table->Plx_I2C_LineSDA = uc_sda;
+
+       return 0;
+}
+
+
+static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+
+static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       /* TDC number of fraction bits are changed from 8 to 7
+        * for Fiji as requested by SMC team
+        */
+       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+       return 0;
+}
+
+static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       fuse_table_offset +
+                       offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+                               return -EINVAL);
+       else {
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+               smu_data->power_tune_table.LPMLTemperatureMin =
+                               (uint8_t)((temp >> 16) & 0xff);
+               smu_data->power_tune_table.LPMLTemperatureMax =
+                               (uint8_t)((temp >> 8) & 0xff);
+               smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+       }
+       return 0;
+}
+
+static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+       return 0;
+}
+
+static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+       if ((hwmgr->thermal_controller.advanceFanControlParameters.
+                       usFanOutputSensitivity & (1 << 15)) ||
+                       0 == hwmgr->thermal_controller.advanceFanControlParameters.
+                       usFanOutputSensitivity)
+               hwmgr->thermal_controller.advanceFanControlParameters.
+               usFanOutputSensitivity = hwmgr->thermal_controller.
+                       advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+       smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+                       PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+                                       advanceFanControlParameters.usFanOutputSensitivity);
+       return 0;
+}
+
+static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.GnbLPML[i] = 0;
+
+       return 0;
+}
+
+static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+       return 0;
+}
+
+static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+       return 0;
+}
+
+static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       uint32_t pm_fuse_table_offset;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU73_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to get pm_fuse_table_offset Failed!",
+                                       return -EINVAL);
+
+               /* DW6 */
+               if (fiji_populate_svi_load_line(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate SviLoadLine Failed!",
+                                       return -EINVAL);
+               /* DW7 */
+               if (fiji_populate_tdc_limit(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
+               /* DW8 */
+               if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TdcWaterfallCtl, "
+                                       "LPMLTemperature Min and Max Failed!",
+                                       return -EINVAL);
+
+               /* DW9-DW12 */
+               if (0 != fiji_populate_temperature_scaler(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate LPMLTemperatureScaler Failed!",
+                                       return -EINVAL);
+
+               /* DW13-DW14 */
+               if (fiji_populate_fuzzy_fan(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate Fuzzy Fan Control parameters Failed!",
+                                       return -EINVAL);
+
+               /* DW15-DW18 */
+               if (fiji_populate_gnb_lpml(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Failed!",
+                                       return -EINVAL);
+
+               /* DW19 */
+               if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Min and Max Vid Failed!",
+                                       return -EINVAL);
+
+               /* DW20 */
+               if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+                                       "Sidd Failed!", return -EINVAL);
+
+               if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to download PmFuseTable Failed!",
+                                       return -EINVAL);
+       }
+       return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    table  the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       uint32_t count;
+       uint8_t index;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+                       table_info->vddc_lookup_table;
+       /* tables is already swapped, so in order to use the value from it,
+        * we need to swap it back.
+        * We are populating vddc CAC data to BapmVddc table
+        * in split and merged mode
+        */
+
+       for (count = 0; count < lookup_table->count; count++) {
+               index = phm_get_voltage_index(lookup_table,
+                               data->vddc_voltage_table.entries[count].value);
+               table->BapmVddcVidLoSidd[count] =
+                       convert_to_vid(lookup_table->entries[index].us_cac_low);
+               table->BapmVddcVidHiSidd[count] =
+                       convert_to_vid(lookup_table->entries[index].us_cac_high);
+       }
+
+       return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always  0
+*/
+
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       int result;
+
+       result = fiji_populate_cac_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate CAC voltage tables to SMC",
+                       return -EINVAL);
+
+       return 0;
+}
+
+static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_Ulv *state)
+{
+       int result = 0;
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+       state->VddcPhase = 1;
+
+       if (!result) {
+               CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+               CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+               CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+       }
+       return result;
+}
+
+static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       return fiji_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       int i;
+
+       /* Index (dpm_table->pcie_speed_table.count)
+        * is reserved for PCIE boot level. */
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                               (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+                               dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity = 1;
+               table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+               table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+                       (uint8_t)dpm_table->pcie_speed_table.count;
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    clock  the engine clock to use to populate the structure
+* @param    sclk   the SMC SCLK structure to be populated
+*/
+static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       uint32_t ref_clock;
+       uint32_t ref_divider;
+       uint32_t fbdiv;
+       int result;
+
+       /* get the engine clock dividers for this clock value */
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error retrieving Engine Clock dividers from VBIOS.",
+                       return result);
+
+       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+       ref_clock = atomctrl_get_reference_clock(hwmgr);
+       ref_divider = 1 + dividers.uc_pll_ref_div;
+
+       /* low 14 bits is fraction and high 12 bits is divider */
+       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+       /* SPLL_FUNC_CNTL setup */
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_REF_DIV, dividers.uc_pll_ref_div);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+       /* SPLL_FUNC_CNTL_3 setup*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+                       SPLL_FB_DIV, fbdiv);
+
+       /* set to use fractional accumulation*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+                       SPLL_DITHEN, 1);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+               struct pp_atomctrl_internal_ss_info ssInfo;
+
+               uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+               if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+                               vco_freq, &ssInfo)) {
+                       /*
+                        * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+                        * ss_info.speed_spectrum_rate -- in unit of khz
+                        *
+                        * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
+                        */
+                       uint32_t clk_s = ref_clock * 5 /
+                                       (ref_divider * ssInfo.speed_spectrum_rate);
+                       /* clkv = 2 * D * fbdiv / NS */
+                       uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
+                                       fbdiv / (clk_s * 10000);
+
+                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+                                       CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+                                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+                       cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+                                       CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+               }
+       }
+
+       sclk->SclkFrequency        = clock;
+       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+       return 0;
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    clock the engine clock to use to populate the structure
+* @param    sclk        the SMC SCLK structure to be populated
+*/
+
+static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, uint16_t sclk_al_threshold,
+               struct SMU73_Discrete_GraphicsLevel *level)
+{
+       int result;
+       /* PP_Clocks minClocks; */
+       uint32_t threshold, mvdd;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       result = fiji_calculate_sclk_params(hwmgr, clock, level);
+
+       /* populate graphics levels */
+       result = fiji_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_sclk, clock,
+                       (uint32_t *)(&level->MinVoltage), &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find VDDC voltage value for "
+                       "VDDC engine clock dependency table",
+                       return result);
+
+       level->SclkFrequency = clock;
+       level->ActivityLevel = sclk_al_threshold;
+       level->CcPwrDynRm = 0;
+       level->CcPwrDynRm1 = 0;
+       level->EnabledForActivity = 0;
+       level->EnabledForThrottle = 1;
+       level->UpHyst = 10;
+       level->DownHyst = 0;
+       level->VoltageDownHyst = 0;
+       level->PowerThrottle = 0;
+
+       threshold = clock * data->fast_watermark_threshold / 100;
+
+       data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+               level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+                                                               hwmgr->display_config.min_core_set_clock_in_sr);
+
+
+       /* Default to slow, highest DPM level will be
+        * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+        */
+       level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+
+       return 0;
+}
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+       uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+       int result = 0;
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+                       SMU73_MAX_LEVELS_GRAPHICS;
+       struct SMU73_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t i, max_entry;
+       uint8_t hightest_pcie_level_enabled = 0,
+                       lowest_pcie_level_enabled = 0,
+                       mid_pcie_level_enabled = 0,
+                       count = 0;
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+               result = fiji_populate_single_graphic_level(hwmgr,
+                               dpm_table->sclk_table.dpm_levels[i].value,
+                               (uint16_t)smu_data->activity_target[i],
+                               &levels[i]);
+               if (result)
+                       return result;
+
+               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+               if (i > 1)
+                       levels[i].DeepSleepDivId = 0;
+       }
+
+       /* Only enable level 0 for now.*/
+       levels[0].EnabledForActivity = 1;
+
+       /* set highest level watermark to high */
+       levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
+                       PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       smu_data->smc_state_table.GraphicsDpmLevelCount =
+                       (uint8_t)dpm_table->sclk_table.count;
+       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+       if (pcie_table != NULL) {
+               PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+                               "There must be 1 or more PCIE levels defined in PPTable.",
+                               return -EINVAL);
+               max_entry = pcie_entry_cnt - 1;
+               for (i = 0; i < dpm_table->sclk_table.count; i++)
+                       levels[i].pcieDpmLevel =
+                                       (uint8_t) ((i < max_entry) ? i : max_entry);
+       } else {
+               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << (hightest_pcie_level_enabled + 1))) != 0))
+                       hightest_pcie_level_enabled++;
+
+               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << lowest_pcie_level_enabled)) == 0))
+                       lowest_pcie_level_enabled++;
+
+               while ((count < hightest_pcie_level_enabled) &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+                       count++;
+
+               mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+                               hightest_pcie_level_enabled ?
+                                               (lowest_pcie_level_enabled + 1 + count) :
+                                               hightest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to hightest_pcie_level_enabled */
+               for (i = 2; i < dpm_table->sclk_table.count; i++)
+                       levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to lowest_pcie_level_enabled */
+               levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to mid_pcie_level_enabled */
+               levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+       }
+       /* level count will send to smc once at init smc table and never change */
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+                       (uint32_t)array_size, SMC_RAM_END);
+
+       return result;
+}
+
+
+/**
+ * MCLK Frequency Ratio
+ * SEQ_CG_RESP  Bit[31:24] - 0x0
+ * Bit[27:24] \96 DDR3 Frequency ratio
+ * 0x0 <= 100MHz,       450 < 0x8 <= 500MHz
+ * 100 < 0x1 <= 150MHz,       500 < 0x9 <= 550MHz
+ * 150 < 0x2 <= 200MHz,       550 < 0xA <= 600MHz
+ * 200 < 0x3 <= 250MHz,       600 < 0xB <= 650MHz
+ * 250 < 0x4 <= 300MHz,       650 < 0xC <= 700MHz
+ * 300 < 0x5 <= 350MHz,       700 < 0xD <= 750MHz
+ * 350 < 0x6 <= 400MHz,       750 < 0xE <= 800MHz
+ * 400 < 0x7 <= 450MHz,       800 < 0xF
+ */
+static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
+{
+       if (mem_clock <= 10000)
+               return 0x0;
+       if (mem_clock <= 15000)
+               return 0x1;
+       if (mem_clock <= 20000)
+               return 0x2;
+       if (mem_clock <= 25000)
+               return 0x3;
+       if (mem_clock <= 30000)
+               return 0x4;
+       if (mem_clock <= 35000)
+               return 0x5;
+       if (mem_clock <= 40000)
+               return 0x6;
+       if (mem_clock <= 45000)
+               return 0x7;
+       if (mem_clock <= 50000)
+               return 0x8;
+       if (mem_clock <= 55000)
+               return 0x9;
+       if (mem_clock <= 60000)
+               return 0xa;
+       if (mem_clock <= 65000)
+               return 0xb;
+       if (mem_clock <= 70000)
+               return 0xc;
+       if (mem_clock <= 75000)
+               return 0xd;
+       if (mem_clock <= 80000)
+               return 0xe;
+       /* mem_clock > 800MHz */
+       return 0xf;
+}
+
+/**
+* Populates the SMC MCLK structure using the provided memory clock
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    clock   the memory clock to use to populate the structure
+* @param    sclk    the SMC SCLK structure to be populated
+*/
+static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+    uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
+{
+       struct pp_atomctrl_memory_clock_param mem_param;
+       int result;
+
+       result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Failed to get Memory PLL Dividers.",
+                       );
+
+       /* Save the result data to outpupt memory level structure */
+       mclk->MclkFrequency   = clock;
+       mclk->MclkDivider     = (uint8_t)mem_param.mpll_post_divider;
+       mclk->FreqRange       = fiji_get_mclk_frequency_ratio(clock);
+
+       return result;
+}
+
+static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       int result = 0;
+       uint32_t mclk_stutter_mode_threshold = 60000;
+
+       if (table_info->vdd_dep_on_mclk) {
+               result = fiji_get_dependency_volt_by_clk(hwmgr,
+                               table_info->vdd_dep_on_mclk, clock,
+                               (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find MinVddc voltage value from memory "
+                               "VDDC voltage dependency table", return result);
+       }
+
+       mem_level->EnabledForThrottle = 1;
+       mem_level->EnabledForActivity = 0;
+       mem_level->UpHyst = 0;
+       mem_level->DownHyst = 100;
+       mem_level->VoltageDownHyst = 0;
+       mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       mem_level->StutterEnable = false;
+
+       mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       /* enable stutter mode if all the follow condition applied
+        * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+        * &(data->DisplayTiming.numExistingDisplays));
+        */
+       data->display_timing.num_existing_displays = 1;
+
+       if (mclk_stutter_mode_threshold &&
+               (clock <= mclk_stutter_mode_threshold) &&
+               (!data->is_uvd_enabled) &&
+               (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+                               STUTTER_ENABLE) & 0x1))
+               mem_level->StutterEnable = true;
+
+       result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
+       if (!result) {
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+       }
+       return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int result;
+       /* populate MCLK dpm table to SMU7 */
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+       uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
+                       SMU73_MAX_LEVELS_MEMORY;
+       struct SMU73_Discrete_MemoryLevel *levels =
+                       smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                               "can not populate memory level as memory clock is zero",
+                               return -EINVAL);
+               result = fiji_populate_single_memory_level(hwmgr,
+                               dpm_table->mclk_table.dpm_levels[i].value,
+                               &levels[i]);
+               if (result)
+                       return result;
+       }
+
+       /* Only enable level 0 for now. */
+       levels[0].EnabledForActivity = 1;
+
+       /* in order to prevent MC activity from stutter mode to push DPM up.
+        * the UVD change complements this by putting the MCLK in
+        * a higher state by default such that we are not effected by
+        * up threshold or and MCLK DPM latency.
+        */
+       levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+       CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount =
+                       (uint8_t)dpm_table->mclk_table.count;
+       data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+       /* set highest level watermark to high */
+       levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+                       PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       /* level count will send to smc once at init smc table and never change */
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+                       (uint32_t)array_size, SMC_RAM_END);
+
+       return result;
+}
+
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
+* @param    voltage     the SMC VOLTAGE structure to be populated
+*/
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+               uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+                               smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+                               "MVDD Voltage is outside the supported range.",
+                               return -EINVAL);
+       } else
+               return -EINVAL;
+
+       return 0;
+}
+
+static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+               SMU73_Discrete_DpmTable *table)
+{
+       int result = 0;
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       SMIO_Pattern vol_level;
+       uint32_t mvdd;
+       uint16_t us_mvdd;
+       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       if (!data->sclk_dpm_key_disabled) {
+               /* Get MinVoltage and Frequency from DPM0,
+                * already converted to SMC_UL */
+               table->ACPILevel.SclkFrequency =
+                               data->dpm_table.sclk_table.dpm_levels[0].value;
+               result = fiji_get_dependency_volt_by_clk(hwmgr,
+                               table_info->vdd_dep_on_sclk,
+                               table->ACPILevel.SclkFrequency,
+                               (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Cannot find ACPI VDDC voltage value " \
+                               "in Clock Dependency Table",
+                               );
+       } else {
+               table->ACPILevel.SclkFrequency =
+                               data->vbios_boot_state.sclk_bootup_value;
+               table->ACPILevel.MinVoltage =
+                               data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+       }
+
+       /* get the engine clock dividers for this clock value */
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+                       table->ACPILevel.SclkFrequency,  &dividers);
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error retrieving Engine Clock dividers from VBIOS.",
+                       return result);
+
+       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+       table->ACPILevel.DeepSleepDivId = 0;
+
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_PWRON, 0);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_RESET, 1);
+       spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+                       SCLK_MUX_SEL, 4);
+
+       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+       if (!data->mclk_dpm_key_disabled) {
+               /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+               table->MemoryACPILevel.MclkFrequency =
+                               data->dpm_table.mclk_table.dpm_levels[0].value;
+               result = fiji_get_dependency_volt_by_clk(hwmgr,
+                               table_info->vdd_dep_on_mclk,
+                               table->MemoryACPILevel.MclkFrequency,
+                       (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
+                               );
+       } else {
+               table->MemoryACPILevel.MclkFrequency =
+                               data->vbios_boot_state.mclk_bootup_value;
+               table->MemoryACPILevel.MinVoltage =
+                               data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+       }
+
+       us_mvdd = 0;
+       if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+                       (data->mclk_dpm_key_disabled))
+               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+       else {
+               if (!fiji_populate_mvdd_value(hwmgr,
+                               data->dpm_table.mclk_table.dpm_levels[0].value,
+                               &vol_level))
+                       us_mvdd = vol_level.Voltage;
+       }
+
+       table->MemoryACPILevel.MinMvdd =
+                       PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpHyst = 0;
+       table->MemoryACPILevel.DownHyst = 100;
+       table->MemoryACPILevel.VoltageDownHyst = 0;
+       table->MemoryACPILevel.ActivityLevel =
+                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       table->MemoryACPILevel.StutterEnable = false;
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+       return result;
+}
+
+static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU73_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       table->VceLevelCount = (uint8_t)(mm_table->count);
+       table->VceBootLevel = 0;
+
+       for (count = 0; count < table->VceLevelCount; count++) {
+               table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+               table->VceLevel[count].MinVoltage = 0;
+               table->VceLevel[count].MinVoltage |=
+                               (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+               table->VceLevel[count].MinVoltage |=
+                               ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /*retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->VceLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for VCE engine clock",
+                               return result);
+
+               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+               SMU73_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       table->AcpLevelCount = (uint8_t)(mm_table->count);
+       table->AcpBootLevel = 0;
+
+       for (count = 0; count < table->AcpLevelCount; count++) {
+               table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
+               table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+               table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->AcpLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for engine clock", return result);
+
+               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+               SMU73_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       table->SamuBootLevel = 0;
+       table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+       for (count = 0; count < table->SamuLevelCount; count++) {
+               /* not sure whether we need evclk or not */
+               table->SamuLevel[count].MinVoltage = 0;
+               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+               table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->SamuLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for samu clock", return result);
+
+               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+               int32_t eng_clock, int32_t mem_clock,
+               struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+       uint32_t dram_timing;
+       uint32_t dram_timing2;
+       uint32_t burstTime;
+       ULONG state, trrds, trrdl;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                       eng_clock, mem_clock);
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+
+       state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
+       trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
+       trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+       arb_regs->McArbBurstTime   = (uint8_t)burstTime;
+       arb_regs->TRRDS            = (uint8_t)trrds;
+       arb_regs->TRRDL            = (uint8_t)trrdl;
+
+       return 0;
+}
+
+static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
+       uint32_t i, j;
+       int result = 0;
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+                       result = fiji_populate_memory_timing_parameters(hwmgr,
+                                       data->dpm_table.sclk_table.dpm_levels[i].value,
+                                       data->dpm_table.mclk_table.dpm_levels[j].value,
+                                       &arb_regs.entries[i][j]);
+                       if (result)
+                               break;
+               }
+       }
+
+       if (!result)
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr->smumgr,
+                               smu_data->smu7_data.arb_table_start,
+                               (uint8_t *)&arb_regs,
+                               sizeof(SMU73_Discrete_MCArbDramTimingTable),
+                               SMC_RAM_END);
+       return result;
+}
+
+static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       table->UvdLevelCount = (uint8_t)(mm_table->count);
+       table->UvdBootLevel = 0;
+
+       for (count = 0; count < table->UvdLevelCount; count++) {
+               table->UvdLevel[count].MinVoltage = 0;
+               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+               table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+               table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].VclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Vclk clock", return result);
+
+               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].DclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Dclk clock", return result);
+
+               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+       }
+       return result;
+}
+
+static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table */
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+                       data->vbios_boot_state.sclk_bootup_value,
+                       (uint32_t *)&(table->GraphicsBootLevel));
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+                       data->vbios_boot_state.mclk_bootup_value,
+                       (uint32_t *)&(table->MemoryBootLevel));
+
+       table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
+                       VOLTAGE_SCALE;
+       table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+                       VOLTAGE_SCALE;
+       table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
+                       VOLTAGE_SCALE;
+
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+       return 0;
+}
+
+static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint8_t count, level;
+
+       count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+       for (level = 0; level < count; level++) {
+               if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+                               data->vbios_boot_state.sclk_bootup_value) {
+                       smu_data->smc_state_table.GraphicsBootLevel = level;
+                       break;
+               }
+       }
+
+       count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+       for (level = 0; level < count; level++) {
+               if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+                               data->vbios_boot_state.mclk_bootup_value) {
+                       smu_data->smc_state_table.MemoryBootLevel = level;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+                       volt_with_cks, value;
+       uint16_t clock_freq_u16;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+                       volt_offset = 0;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+       /* Read SMU_Eefuse to read and calculate RO and determine
+        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+        */
+       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (146 * 4));
+       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (148 * 4));
+       efuse &= 0xFF000000;
+       efuse = efuse >> 24;
+       efuse2 &= 0xF;
+
+       if (efuse2 == 1)
+               ro = (2300 - 1350) * efuse / 255 + 1350;
+       else
+               ro = (2500 - 1000) * efuse / 255 + 1000;
+
+       if (ro >= 1660)
+               type = 0;
+       else
+               type = 1;
+
+       /* Populate Stretch amount */
+       smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+       for (i = 0; i < sclk_table->count; i++) {
+               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+                               sclk_table->entries[i].cks_enable << i;
+               volt_without_cks = (uint32_t)((14041 *
+                       (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+                       (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+               volt_with_cks = (uint32_t)((13946 *
+                       (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+                       (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+               if (volt_without_cks >= volt_with_cks)
+                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+       }
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       STRETCH_ENABLE, 0x0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       masterReset, 0x1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       staticEnable, 0x1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       masterReset, 0x0);
+
+       /* Populate CKS Lookup Table */
+       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+               stretch_amount2 = 0;
+       else if (stretch_amount == 3 || stretch_amount == 4)
+               stretch_amount2 = 1;
+       else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ClockStretcher);
+               PP_ASSERT_WITH_CODE(false,
+                               "Stretch Amount in PPTable not supported\n",
+                               return -EINVAL);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixPWR_CKS_CNTL);
+       value &= 0xFFC2FF87;
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+                       fiji_clock_stretcher_lookup_table[stretch_amount2][0];
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+                       fiji_clock_stretcher_lookup_table[stretch_amount2][1];
+       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+                       GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+                       SclkFrequency) / 100);
+       if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
+                       clock_freq_u16 &&
+           fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
+                       clock_freq_u16) {
+               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+               value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+               value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+               value |= (fiji_clock_stretch_amount_conversion
+                               [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
+                                [stretch_amount]) << 3;
+       }
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+                       CKS_LOOKUPTableEntry[0].minFreq);
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+                       CKS_LOOKUPTableEntry[0].maxFreq);
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+                       fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+                       (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixPWR_CKS_CNTL, value);
+
+       /* Populate DDT Lookup Table */
+       for (i = 0; i < 4; i++) {
+               /* Assign the minimum and maximum VID stored
+                * in the last row of Clock Stretcher Voltage Table.
+                */
+               smu_data->smc_state_table.ClockStretcherDataTable.
+               ClockStretcherDataTableEntry[i].minVID =
+                               (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
+               smu_data->smc_state_table.ClockStretcherDataTable.
+               ClockStretcherDataTableEntry[i].maxVID =
+                               (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
+               /* Loop through each SCLK and check the frequency
+                * to see if it lies within the frequency for clock stretcher.
+                */
+               for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+                       cks_setting = 0;
+                       clock_freq = PP_SMC_TO_HOST_UL(
+                                       smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+                       /* Check the allowed frequency against the sclk level[j].
+                        *  Sclk's endianness has already been converted,
+                        *  and it's in 10Khz unit,
+                        *  as opposed to Data table, which is in Mhz unit.
+                        */
+                       if (clock_freq >=
+                                       (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
+                               cks_setting |= 0x2;
+                               if (clock_freq <
+                                               (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
+                                       cks_setting |= 0x1;
+                       }
+                       smu_data->smc_state_table.ClockStretcherDataTable.
+                       ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+               }
+               CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+                               ClockStretcherDataTable.
+                               ClockStretcherDataTableEntry[i].setting);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+       value &= 0xFFFFFFFE;
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+       return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint16_t config;
+
+       config = VR_MERGED_WITH_VDDC;
+       table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+       /* Set Vddc Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+               config = VR_SVI2_PLANE_1;
+               table->VRConfig |= config;
+       } else {
+               PP_ASSERT_WITH_CODE(false,
+                               "VDDC should be on SVI2 control in merged mode!",
+                               );
+       }
+       /* Set Vddci Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+               config = VR_SVI2_PLANE_2;  /* only in merged mode */
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               config = VR_SMIO_PATTERN_1;
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       } else {
+               config = VR_STATIC_VOLTAGE;
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       }
+       /* Set Mvdd Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+               config = VR_SVI2_PLANE_2;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               config = VR_SMIO_PATTERN_2;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+       } else {
+               config = VR_STATIC_VOLTAGE;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+       }
+
+       return 0;
+}
+
+static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
+       uint32_t tmp;
+       int result;
+
+       /* This is a read-modify-write on the first byte of the ARB table.
+        * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+        * is the field 'current'.
+        * This solution is ugly, but we never write the whole table only
+        * individual fields in it.
+        * In reality this field should not be in that structure
+        * but in a soft register.
+        */
+       result = smu7_read_smc_sram_dword(smumgr,
+                       smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+       if (result)
+               return result;
+
+       tmp &= 0x00FFFFFF;
+       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+       return smu7_write_smc_sram_dword(smumgr,
+                       smu_data->smu7_data.arb_table_start,  tmp, SMC_RAM_END);
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput  the pointer to input data (PowerState)
+* @return   always 0
+*/
+int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+       uint8_t i;
+       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+
+       fiji_initialize_power_tune_defaults(hwmgr);
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+               fiji_populate_smc_voltage_tables(hwmgr, table);
+
+       table->SystemFlags = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+               result = fiji_populate_ulv_state(hwmgr, table);
+               PP_ASSERT_WITH_CODE(0 == result,
+                               "Failed to initialize ULV state!", return result);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                               ixCG_ULV_PARAMETER, 0x40035);
+       }
+
+       result = fiji_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Link Level!", return result);
+
+       result = fiji_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Graphics Level!", return result);
+
+       result = fiji_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Memory Level!", return result);
+
+       result = fiji_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ACPI Level!", return result);
+
+       result = fiji_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize VCE Level!", return result);
+
+       result = fiji_populate_smc_acp_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ACP Level!", return result);
+
+       result = fiji_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize SAMU Level!", return result);
+
+       /* Since only the initial state is completely set up at this point
+        * (the other states are just copies of the boot state) we only
+        * need to populate the  ARB settings for the initial state.
+        */
+       result = fiji_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to Write ARB settings for the initial state.", return result);
+
+       result = fiji_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize UVD Level!", return result);
+
+       result = fiji_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Boot Level!", return result);
+
+       result = fiji_populate_smc_initailial_state(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Boot State!", return result);
+
+       result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to populate BAPM Parameters!", return result);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ClockStretcher)) {
+               result = fiji_populate_clock_stretcher_data_table(hwmgr);
+               PP_ASSERT_WITH_CODE(0 == result,
+                               "Failed to populate Clock Stretcher Data Table!",
+                               return result);
+       }
+
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+       table->TemperatureLimitHigh =
+                       table_info->cac_dtp_table->usTargetOperatingTemp *
+                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->TemperatureLimitLow  =
+                       (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->MemoryVoltageChangeEnable = 1;
+       table->MemoryInterval = 1;
+       table->VoltageResponseTime = 0;
+       table->PhaseResponseTime = 0;
+       table->MemoryThermThrottleEnable = 1;
+       table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/
+       table->PCIeGenInterval = 1;
+       table->VRConfig = 0;
+
+       result = fiji_populate_vr_config(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to populate VRConfig setting!", return result);
+
+       table->ThermGpio = 17;
+       table->SclkStepSize = 0x4000;
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot);
+       } else {
+               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot);
+       }
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+                       &gpio_pin)) {
+               table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_AutomaticDCTransition);
+       } else {
+               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_AutomaticDCTransition);
+       }
+
+       /* Thermal Output GPIO */
+       if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+                       &gpio_pin)) {
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ThermalOutGPIO);
+
+               table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+               /* For porlarity read GPIOPAD_A with assigned Gpio pin
+                * since VBIOS will program this register to set 'inactive state',
+                * driver can then determine 'active state' from this and
+                * program SMU with correct polarity
+                */
+               table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+                               (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+               /* if required, combine VRHot/PCC with thermal out GPIO */
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot) &&
+                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_CombinePCCWithThermalSignal))
+                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+       } else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ThermalOutGPIO);
+               table->ThermOutGpio = 17;
+               table->ThermOutPolarity = 1;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+       }
+
+       for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
+               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+                       smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU73_Discrete_DpmTable, SystemFlags),
+                       (uint8_t *)&(table->SystemFlags),
+                       sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
+                       SMC_RAM_END);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to upload dpm data to SMC memory!", return result);
+
+       result = fiji_init_arb_table_index(hwmgr->smumgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to upload arb data to SMC memory!", return result);
+
+       result = fiji_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to  populate PM fuses to SMC memory!", return result);
+       return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+       SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (smu_data->smu7_data.fan_table_start == 0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (duty100 == 0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+                       usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->
+                       thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+                       thermal_controller.advanceFanControlParameters.ulCycleDelay *
+                       reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+                       hwmgr->device, CGS_IND_REG__SMC,
+                       CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+                       (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+                       SMC_RAM_END);
+
+       if (!res && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ucMinimumPWMLimit)
+               res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_SetFanMinPwm,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ucMinimumPWMLimit);
+
+       if (!res && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+               res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_SetFanSclkTarget,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+       if (res)
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+
+       return 0;
+}
+
+int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return fiji_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr->smumgr,
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU73_Discrete_DpmTable,
+                                       LowSclkInterruptThreshold),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+       result = fiji_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters!",
+                       );
+       return result;
+}
+
+uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU73_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+               case PreVBlankGap:
+                       return offsetof(SMU73_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU73_SoftRegisters, VBlankTimeout);
+               case UcodeLoadStatus:
+                       return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case UvdBootLevel:
+                       return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+               case VceBootLevel:
+                       return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+               case SamuBootLevel:
+                       return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+               }
+       }
+       printk("cant't get the offset of type %x member %x \n", type, member);
+       return 0;
+}
+
+uint32_t fiji_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU73_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU73_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU73_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU73_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU73_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDGFX:
+               return SMU73_MAX_LEVELS_VDDGFX;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU73_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU73_MAX_LEVELS_MVDD;
+       }
+
+       printk("cant't get the mac of %x \n", value);
+       return 0;
+}
+
+
+static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       smu_data->smc_state_table.UvdBootLevel = 0;
+       if (table_info->mm_dep_table->count > 0)
+               smu_data->smc_state_table.UvdBootLevel =
+                               (uint8_t) (table_info->mm_dep_table->count - 1);
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
+                                               UvdBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0x00FFFFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_UVDDPM) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+       return 0;
+}
+
+static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_StablePState))
+               smu_data->smc_state_table.VceBootLevel =
+                       (uint8_t) (table_info->mm_dep_table->count - 1);
+       else
+               smu_data->smc_state_table.VceBootLevel = 0;
+
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                                       offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFF00FFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+       return 0;
+}
+
+static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+       smu_data->smc_state_table.SamuBootLevel = 0;
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFFFFFF00;
+       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+       return 0;
+}
+
+int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+       switch (type) {
+       case SMU_UVD_TABLE:
+               fiji_update_uvd_smc_table(hwmgr);
+               break;
+       case SMU_VCE_TABLE:
+               fiji_update_vce_smc_table(hwmgr);
+               break;
+       case SMU_SAMU_TABLE:
+               fiji_update_samu_smc_table(hwmgr);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t tmp;
+       int result;
+       bool error = false;
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, DpmTable),
+                       &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               smu_data->smu7_data.dpm_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, SoftRegisters),
+                       &tmp, SMC_RAM_END);
+
+       if (!result) {
+               data->soft_regs_start = tmp;
+               smu_data->smu7_data.soft_regs_start = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, mcRegisterTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.mc_reg_table_start = tmp;
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, FanTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.fan_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.arb_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, Version),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               hwmgr->microcode_version_info.SMC = tmp;
+
+       error |= (0 != result);
+
+       return error ? -1 : 0;
+}
+
+int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+
+       /* Program additional LP registers
+        * that are no longer programmed by VBIOS
+        */
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+
+       return 0;
+}
+
+bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+                       ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
new file mode 100644 (file)
index 0000000..d30d150
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef FIJI_SMC_H
+#define FIJI_SMC_H
+
+#include "smumgr.h"
+#include "smu73.h"
+
+struct fiji_pt_defaults {
+       uint8_t   SviLoadLineEn;
+       uint8_t   SviLoadLineVddC;
+       uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+       uint8_t   TDC_MAWt;
+       uint8_t   TdcWaterfallCtl;
+       uint8_t   DTEAmbientTempBase;
+};
+
+int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
+int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
+uint32_t fiji_get_mac_definition(uint32_t value);
+int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
+int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
+
+#endif
+
old mode 100644 (file)
new mode 100755 (executable)
index 8e52a2e..02fe1df
@@ -38,6 +38,7 @@
 #include "bif/bif_5_0_sh_mask.h"
 #include "pp_debug.h"
 #include "fiji_pwrvirus.h"
+#include "fiji_smc.h"
 
 #define AVFS_EN_MSB                                        1568
 #define AVFS_EN_LSB                                        1568
@@ -57,509 +58,6 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
                { 0xf811d047, 0x80380100,   0x01,     0x00,   0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000,   0,      0,   0x0c,   0x01,       0x01,        0x01,      0x00,   0x00,      0x00,     0x00 }
 };
 
-static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type)
-{
-       enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
-       switch (fw_type) {
-       case UCODE_ID_SMU:
-               result = CGS_UCODE_ID_SMU;
-               break;
-       case UCODE_ID_SDMA0:
-               result = CGS_UCODE_ID_SDMA0;
-               break;
-       case UCODE_ID_SDMA1:
-               result = CGS_UCODE_ID_SDMA1;
-               break;
-       case UCODE_ID_CP_CE:
-               result = CGS_UCODE_ID_CP_CE;
-               break;
-       case UCODE_ID_CP_PFP:
-               result = CGS_UCODE_ID_CP_PFP;
-               break;
-       case UCODE_ID_CP_ME:
-               result = CGS_UCODE_ID_CP_ME;
-               break;
-       case UCODE_ID_CP_MEC:
-               result = CGS_UCODE_ID_CP_MEC;
-               break;
-       case UCODE_ID_CP_MEC_JT1:
-               result = CGS_UCODE_ID_CP_MEC_JT1;
-               break;
-       case UCODE_ID_CP_MEC_JT2:
-               result = CGS_UCODE_ID_CP_MEC_JT2;
-               break;
-       case UCODE_ID_RLC_G:
-               result = CGS_UCODE_ID_RLC_G;
-               break;
-       default:
-               break;
-       }
-
-       return result;
-}
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-*/
-static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr,
-               uint32_t smc_addr, uint32_t limit)
-{
-       PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)),
-                       "SMC address must be 4 byte aligned.", return -EINVAL;);
-       PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)),
-                       "SMC address is beyond the SMC RAM area.", return -EINVAL;);
-
-       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr);
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-
-       return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param    smumgr  the address of the powerplay SMU manager.
-* @param    smcStartAddress the start address in the SMC RAM to copy bytes to.
-* @param    src the byte array to copy the bytes from.
-* @param    byteCount the number of bytes to copy.
-*/
-int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr,
-               uint32_t smcStartAddress, const uint8_t *src,
-               uint32_t byteCount, uint32_t limit)
-{
-       int result;
-       uint32_t data, originalData;
-       uint32_t addr, extraShift;
-
-       PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
-                       "SMC address must be 4 byte aligned.", return -EINVAL;);
-       PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
-                       "SMC address is beyond the SMC RAM area.", return -EINVAL;);
-
-       addr = smcStartAddress;
-
-       while (byteCount >= 4) {
-               /* Bytes are written into the SMC addres space with the MSB first. */
-               data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
-               result = fiji_set_smc_sram_address(smumgr, addr, limit);
-               if (result)
-                       return result;
-
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-
-               src += 4;
-               byteCount -= 4;
-               addr += 4;
-       }
-
-       if (byteCount) {
-               /* Now write the odd bytes left.
-                * Do a read modify write cycle.
-                */
-               data = 0;
-
-               result = fiji_set_smc_sram_address(smumgr, addr, limit);
-               if (result)
-                       return result;
-
-               originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
-               extraShift = 8 * (4 - byteCount);
-
-               while (byteCount > 0) {
-                       /* Bytes are written into the SMC addres
-                        * space with the MSB first.
-                        */
-                       data = (0x100 * data) + *src++;
-                       byteCount--;
-               }
-               data <<= extraShift;
-               data |= (originalData & ~((~0UL) << extraShift));
-
-               result = fiji_set_smc_sram_address(smumgr, addr, limit);
-               if (!result)
-                       return result;
-
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-       }
-       return 0;
-}
-
-int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
-{
-       static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
-
-       fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
-
-       return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-*/
-bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
-       return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
-                       CGS_IND_REG__SMC,
-                       SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
-                       && (0x20100 <= cgs_read_ind_register(smumgr->device,
-                                       CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
-       if (!fiji_is_smc_ram_running(smumgr))
-               return -1;
-
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
-               printk(KERN_ERR "Failed to send Previous Message.");
-               SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-       }
-
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       return 0;
-}
-
-/**
- * Send a message to the SMC with parameter
- * @param    smumgr:  the address of the powerplay hardware manager.
- * @param    msg: the message to send.
- * @param    parameter: the parameter to send
- * @return   The response that came from the SMC.
- */
-int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
-               uint16_t msg, uint32_t parameter)
-{
-       if (!fiji_is_smc_ram_running(smumgr))
-               return -1;
-
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
-               printk(KERN_ERR "Failed to send Previous Message.");
-               SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-       }
-
-       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       return 0;
-}
-
-
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-int fiji_send_msg_to_smc_with_parameter_without_waiting(
-               struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
-               printk(KERN_ERR "Failed to send Previous Message.");
-               SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-       }
-       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-       return 0;
-}
-
-/**
-* Uploads the SMU firmware from .hex file
-*
-* @param    smumgr  the address of the powerplay SMU manager.
-* @return   0 or -1.
-*/
-
-static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr)
-{
-       const uint8_t *src;
-       uint32_t byte_count;
-       uint32_t *data;
-       struct cgs_firmware_info info = {0};
-
-       cgs_get_firmware_info(smumgr->device,
-                       fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-
-       if (info.image_size & 3) {
-               printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n");
-               return -EINVAL;
-       }
-
-       if (info.image_size > FIJI_SMC_SIZE) {
-               printk(KERN_ERR "SMC address is beyond the SMC RAM area\n");
-               return -EINVAL;
-       }
-
-       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
-       byte_count = info.image_size;
-       src = (const uint8_t *)info.kptr;
-
-       data = (uint32_t *)src;
-       for (; byte_count >= 4; data++, byte_count -= 4)
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
-
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-       return 0;
-}
-
-/**
-* Read a 32bit value from the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-* @param    value and output parameter for the data read from the SMC SRAM.
-*/
-int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
-               uint32_t *value, uint32_t limit)
-{
-       int     result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
-
-       if (result)
-               return result;
-
-       *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
-       return 0;
-}
-
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-* @param    value to write to the SMC SRAM.
-*/
-int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
-               uint32_t value, uint32_t limit)
-{
-       int result;
-
-       result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
-
-       if (result)
-               return result;
-
-       cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
-       return 0;
-}
-
-static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type)
-{
-       uint32_t result = 0;
-
-       switch (fw_type) {
-       case UCODE_ID_SDMA0:
-               result = UCODE_ID_SDMA0_MASK;
-               break;
-       case UCODE_ID_SDMA1:
-               result = UCODE_ID_SDMA1_MASK;
-               break;
-       case UCODE_ID_CP_CE:
-               result = UCODE_ID_CP_CE_MASK;
-               break;
-       case UCODE_ID_CP_PFP:
-               result = UCODE_ID_CP_PFP_MASK;
-               break;
-       case UCODE_ID_CP_ME:
-               result = UCODE_ID_CP_ME_MASK;
-               break;
-       case UCODE_ID_CP_MEC_JT1:
-               result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
-               break;
-       case UCODE_ID_CP_MEC_JT2:
-               result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK;
-               break;
-       case UCODE_ID_RLC_G:
-               result = UCODE_ID_RLC_G_MASK;
-               break;
-       default:
-               printk(KERN_ERR "UCode type is out of range!");
-               result = 0;
-       }
-
-       return result;
-}
-
-/* Populate one firmware image to the data structure */
-static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr,
-               uint32_t fw_type, struct SMU_Entry *entry)
-{
-       int result;
-       struct cgs_firmware_info info = {0};
-
-       result = cgs_get_firmware_info(
-                       smumgr->device,
-                       fiji_convert_fw_type_to_cgs(fw_type),
-                       &info);
-
-       if (!result) {
-               entry->version = 0;
-               entry->id = (uint16_t)fw_type;
-               entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
-               entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
-               entry->meta_data_addr_high = 0;
-               entry->meta_data_addr_low = 0;
-               entry->data_size_byte = info.image_size;
-               entry->num_register_entries = 0;
-
-               if (fw_type == UCODE_ID_RLC_G)
-                       entry->flags = 1;
-               else
-                       entry->flags = 0;
-       }
-
-       return result;
-}
-
-static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
-       struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-       uint32_t fw_to_load;
-       struct SMU_DRAMData_TOC *toc;
-
-       if (priv->soft_regs_start)
-               cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
-                               priv->soft_regs_start +
-                               offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
-                               0x0);
-
-       toc = (struct SMU_DRAMData_TOC *)priv->header;
-       toc->num_entries = 0;
-       toc->structure_version = 1;
-
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
-                       "Failed to Get Firmware Entry.\n" , return -1 );
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
-                       "Failed to Get Firmware Entry.\n" , return -1 );
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
-                       "Failed to Get Firmware Entry.\n" , return -1 );
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
-                       "Failed to Get Firmware Entry.\n" , return -1 );
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
-                       "Failed to Get Firmware Entry.\n" , return -1 );
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
-                                       "Failed to Get Firmware Entry.\n" , return -1 );
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
-                                       "Failed to Get Firmware Entry.\n" , return -1 );
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
-                                       "Failed to Get Firmware Entry.\n" , return -1 );
-       PP_ASSERT_WITH_CODE(
-                       0 == fiji_populate_single_firmware_entry(smumgr,
-                                       UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
-                                       "Failed to Get Firmware Entry.\n" , return -1 );
-
-       fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI,
-                       priv->header_buffer.mc_addr_high);
-       fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO,
-                       priv->header_buffer.mc_addr_low);
-
-       fw_to_load = UCODE_ID_RLC_G_MASK
-                       + UCODE_ID_SDMA0_MASK
-                       + UCODE_ID_SDMA1_MASK
-                       + UCODE_ID_CP_CE_MASK
-                       + UCODE_ID_CP_ME_MASK
-                       + UCODE_ID_CP_PFP_MASK
-                       + UCODE_ID_CP_MEC_MASK
-                       + UCODE_ID_CP_MEC_JT1_MASK
-                       + UCODE_ID_CP_MEC_JT2_MASK;
-
-       if (fiji_send_msg_to_smc_with_parameter(smumgr,
-                       PPSMC_MSG_LoadUcodes, fw_to_load))
-               printk(KERN_ERR "Fail to Request SMU Load uCode");
-
-       return 0;
-}
-
-
-/* Check if the FW has been loaded, SMU will not return
- * if loading has not finished.
- */
-static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr,
-               uint32_t fw_type)
-{
-       struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-       uint32_t mask = fiji_get_mask_for_firmware_type(fw_type);
-
-       /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
-       if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX,
-                       priv->soft_regs_start +
-                       offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
-                       mask, mask)) {
-               printk(KERN_ERR "check firmware loading failed\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-
-static int fiji_reload_firmware(struct pp_smumgr *smumgr)
-{
-       return smumgr->smumgr_funcs->start_smu(smumgr);
-}
-
-static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr)
-{
-       uint32_t value;
-
-       value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER);
-       if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) {
-               /* driver reads on SR-IOV enabled PF: 0x80000000
-                * driver reads on SR-IOV enabled VF: 0x80000001
-                * driver reads on SR-IOV disabled:   0x00000000
-                */
-               return true;
-       }
-       return false;
-}
-
-static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type)
-{
-       if (fiji_is_hw_virtualization_enabled(smumgr)) {
-               uint32_t masks = fiji_get_mask_for_firmware_type(fw_type);
-               if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr,
-                               PPSMC_MSG_LoadUcodes, masks))
-                       printk(KERN_ERR "Fail to Request SMU Load uCode");
-       }
-       /* For non-virtualization cases,
-        * SMU loads all FWs at once in fiji_request_smu_load_fw.
-        */
-       return 0;
-}
-
 static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
 {
        int result = 0;
@@ -571,7 +69,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
        SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
                        SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-       result = fiji_upload_smu_firmware_image(smumgr);
+       result = smu7_upload_smu_firmware_image(smumgr);
        if (result)
                return result;
 
@@ -610,8 +108,8 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
                        SMU_STATUS, SMU_DONE, 0);
 
        /* Check pass/failed indicator */
-       if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
-                       SMU_STATUS, SMU_PASS)) {
+       if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+                       SMU_STATUS, SMU_PASS) != 1) {
                PP_ASSERT_WITH_CODE(false,
                                "SMU Firmware start failed!", return -1);
        }
@@ -639,12 +137,12 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
        SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
                        SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-       result = fiji_upload_smu_firmware_image(smumgr);
+       result = smu7_upload_smu_firmware_image(smumgr);
        if (result)
                return result;
 
        /* Set smc instruct start point at 0x0 */
-       fiji_program_jump_on_start(smumgr);
+       smu7_program_jump_on_start(smumgr);
 
        /* Enable clock */
        SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -698,15 +196,15 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
 
        priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
        if (priv->avfs.AvfsBtcParam) {
-               if (!fiji_send_msg_to_smc_with_parameter(smumgr,
+               if (!smum_send_msg_to_smc_with_parameter(smumgr,
                                PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
-                       if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
+                       if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
                                priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
                                result = 0;
                        } else {
                                printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt"
                                                " to Enable AVFS Failed!");
-                               fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
+                               smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
                                result = -1;
                        }
                } else {
@@ -736,7 +234,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
        charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
        inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
 
-       PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
+       PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
                        SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
                                        PmFuseTable), &table_start, 0x40000),
                        "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
@@ -748,13 +246,13 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
        inversion_voltage_addr = table_start +
                        offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
 
-       result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr,
+       result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
                        (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
        PP_ASSERT_WITH_CODE(0 == result,
                        "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
                        "be populated.", return -1;);
 
-       result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
+       result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
                        (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
        PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
                        "charz_freq could not be populated.", return -1;);
@@ -769,7 +267,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
        uint32_t level_addr, vr_config_addr;
        uint32_t level_size = sizeof(avfs_graphics_level);
 
-       PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
+       PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
                        SMU7_FIRMWARE_HEADER_LOCATION +
                        offsetof(SMU73_Firmware_Header, DpmTable),
                        &table_start, 0x40000),
@@ -784,7 +282,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
        vr_config_addr = table_start +
                        offsetof(SMU73_Discrete_DpmTable, VRConfig);
 
-       PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr,
+       PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
                        (uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
                        "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
                        "vr_config value over to SMC",
@@ -792,7 +290,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 
        level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
 
-       PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr,
+       PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
                        (uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
                        "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
                        return -1;);
@@ -839,13 +337,13 @@ int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
                break;
        case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
                priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
-               PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
-                               PPSMC_MSG_VftTableIsValid),
+               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
+                               0x666),
                                "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
                                "correctly to VftTableIsValid Msg",
                                return -1;);
                priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
-               PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
+               PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
                                PPSMC_MSG_EnableAvfs),
                                "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
                                "correctly to EnableAvfs Message Msg",
@@ -898,7 +396,7 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
        struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
        /* Only start SMC if SMC RAM is not running */
-       if (!fiji_is_smc_ram_running(smumgr)) {
+       if (!smu7_is_smc_ram_running(smumgr)) {
                fiji_avfs_event_mgr(smumgr, false);
 
                /* Check if SMU is running in protected mode */
@@ -929,12 +427,12 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
        /* Setup SoftRegsStart here for register lookup in case
         * DummyBackEnd is used and ProcessFirmwareHeader is not executed
         */
-       fiji_read_smc_sram_dword(smumgr,
+       smu7_read_smc_sram_dword(smumgr,
                        SMU7_FIRMWARE_HEADER_LOCATION +
                        offsetof(SMU73_Firmware_Header, SoftRegisters),
-                       &(priv->soft_regs_start), 0x40000);
+                       &(priv->smu7_data.soft_regs_start), 0x40000);
 
-       result = fiji_request_smu_load_fw(smumgr);
+       result = smu7_request_smu_load_fw(smumgr);
 
        return result;
 }
@@ -963,28 +461,10 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
 static int fiji_smu_init(struct pp_smumgr *smumgr)
 {
        struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-       uint64_t mc_addr;
-
-       priv->header_buffer.data_size =
-                       ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-       smu_allocate_memory(smumgr->device,
-                       priv->header_buffer.data_size,
-                       CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-                       PAGE_SIZE,
-                       &mc_addr,
-                       &priv->header_buffer.kaddr,
-                       &priv->header_buffer.handle);
-
-       priv->header = priv->header_buffer.kaddr;
-       priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-       priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-       PP_ASSERT_WITH_CODE((NULL != priv->header),
-                       "Out of memory.",
-                       kfree(smumgr->backend);
-                       cgs_free_gpu_mem(smumgr->device,
-                       (cgs_handle_t)priv->header_buffer.handle);
-                       return -1);
+       int i;
+
+       if (smu7_init(smumgr))
+               return -EINVAL;
 
        priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
        if (fiji_is_hw_avfs_present(smumgr))
@@ -999,37 +479,35 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
        else
                priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
 
-       priv->acpi_optimization = 1;
+       for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
+               priv->activity_target[i] = 30;
 
        return 0;
 }
 
-static int fiji_smu_fini(struct pp_smumgr *smumgr)
-{
-       struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
-       smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
-
-       if (smumgr->backend) {
-               kfree(smumgr->backend);
-               smumgr->backend = NULL;
-       }
-
-       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
-       return 0;
-}
 
 static const struct pp_smumgr_func fiji_smu_funcs = {
        .smu_init = &fiji_smu_init,
-       .smu_fini = &fiji_smu_fini,
+       .smu_fini = &smu7_smu_fini,
        .start_smu = &fiji_start_smu,
-       .check_fw_load_finish = &fiji_check_fw_load_finish,
-       .request_smu_load_fw = &fiji_reload_firmware,
-       .request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load,
-       .send_msg_to_smc = &fiji_send_msg_to_smc,
-       .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter,
+       .check_fw_load_finish = &smu7_check_fw_load_finish,
+       .request_smu_load_fw = &smu7_reload_firmware,
+       .request_smu_load_specific_fw = NULL,
+       .send_msg_to_smc = &smu7_send_msg_to_smc,
+       .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
+       .update_smc_table = fiji_update_smc_table,
+       .get_offsetof = fiji_get_offsetof,
+       .process_firmware_header = fiji_process_firmware_header,
+       .init_smc_table = fiji_init_smc_table,
+       .update_sclk_threshold = fiji_update_sclk_threshold,
+       .thermal_setup_fan_table = fiji_thermal_setup_fan_table,
+       .populate_all_graphic_levels = fiji_populate_all_graphic_levels,
+       .populate_all_memory_levels = fiji_populate_all_memory_levels,
+       .get_mac_definition = fiji_get_mac_definition,
+       .initialize_mc_reg_table = fiji_initialize_mc_reg_table,
+       .is_dpm_running = fiji_is_dpm_running,
 };
 
 int fiji_smum_init(struct pp_smumgr *smumgr)
index b4eb483215b1783ada3f31b459edbb21fbafb7e4..adcbdfb209be477dcffd12333ac52b81fffe6a1e 100644 (file)
 #ifndef _FIJI_SMUMANAGER_H_
 #define _FIJI_SMUMANAGER_H_
 
+#include "smu73_discrete.h"
+#include <pp_endian.h>
+#include "smu7_smumgr.h"
+
+
 
 struct fiji_smu_avfs {
        enum AVFS_BTC_STATUS AvfsBtcStatus;
        uint32_t           AvfsBtcParam;
 };
 
-struct fiji_buffer_entry {
-       uint32_t data_size;
-       uint32_t mc_addr_low;
-       uint32_t mc_addr_high;
-       void *kaddr;
-       unsigned long  handle;
-};
 
 struct fiji_smumgr {
-       uint8_t        *header;
-       uint8_t        *mec_image;
-       uint32_t        soft_regs_start;
+       struct smu7_smumgr                   smu7_data;
+
        struct fiji_smu_avfs avfs;
-       uint32_t        acpi_optimization;
+       struct SMU73_Discrete_DpmTable       smc_state_table;
+       struct SMU73_Discrete_Ulv            ulv_setting;
+       struct SMU73_Discrete_PmFuses  power_tune_table;
+       const struct fiji_pt_defaults  *power_tune_defaults;
+       uint32_t        activity_target[SMU73_MAX_LEVELS_GRAPHICS];
 
-       struct fiji_buffer_entry header_buffer;
 };
 
-int fiji_smum_init(struct pp_smumgr *smumgr);
-int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
-               uint32_t *value, uint32_t limit);
-int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
-               uint32_t value, uint32_t limit);
-int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress,
-               const uint8_t *src,     uint32_t byteCount, uint32_t limit);
+
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
new file mode 100644 (file)
index 0000000..40f1868
--- /dev/null
@@ -0,0 +1,2576 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ *
+ */
+
+#include "iceland_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "smu7_ppsmc.h"
+
+#include "smu71_discrete.h"
+
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "processpptables.h"
+
+#include "iceland_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define VDDC_VDDCI_DELTA            200
+
+#define DEVICE_ID_VI_ICELAND_M_6900    0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901    0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902    0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903    0x6903
+
+static struct iceland_pt_defaults defaults_iceland = {
+       /*
+        * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+        * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+        */
+       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+       { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
+       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+static struct iceland_pt_defaults defaults_icelandxt = {
+       /*
+        * sviLoadLIneEn, SviLoadLineVddC,
+        * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+        * BAPM_TEMP_GRADIENT
+        */
+       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+       { 0xA7,  0x0, 0x0, 0xB5,  0x0, 0x0, 0x9F,  0x0, 0x0, 0xD6,  0x0, 0x0, 0xD7,  0x0, 0x0},
+       { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+static struct iceland_pt_defaults defaults_icelandpro = {
+       /*
+        * sviLoadLIneEn, SviLoadLineVddC,
+        * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+        * BAPM_TEMP_GRADIENT
+        */
+       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+       { 0xB7,  0x0, 0x0, 0xC3,  0x0, 0x0, 0xB5,  0x0, 0x0, 0xEA,  0x0, 0x0, 0xE6,  0x0, 0x0},
+       { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       struct cgs_system_info sys_info = {0};
+       uint32_t dev_id;
+
+       sys_info.size = sizeof(struct cgs_system_info);
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       dev_id = (uint32_t)sys_info.value;
+
+       switch (dev_id) {
+       case DEVICE_ID_VI_ICELAND_M_6900:
+       case DEVICE_ID_VI_ICELAND_M_6903:
+               smu_data->power_tune_defaults = &defaults_icelandxt;
+               break;
+
+       case DEVICE_ID_VI_ICELAND_M_6901:
+       case DEVICE_ID_VI_ICELAND_M_6902:
+               smu_data->power_tune_defaults = &defaults_icelandpro;
+               break;
+       default:
+               smu_data->power_tune_defaults = &defaults_iceland;
+               pr_warning("Unknown V.I. Device ID.\n");
+               break;
+       }
+       return;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->tdc_vddc_throttle_release_limit_perc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+       return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       fuse_table_offset +
+                       offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+                               return -EINVAL);
+       else
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+       return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+       return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 8; i++)
+               smu_data->power_tune_table.GnbLPML[i] = 0;
+
+       return 0;
+}
+
+static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+       return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+       return 0;
+}
+
+static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+       uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+
+       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+                           "The CAC Leakage table does not exist!", return -EINVAL);
+       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+                           "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+                           "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+               for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+                       lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+                       hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+               }
+       } else {
+               PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+       }
+
+       return 0;
+}
+
+static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       uint8_t *vid = smu_data->power_tune_table.VddCVid;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+               "There should never be more than 8 entries for VddcVid!!!",
+               return -EINVAL);
+
+       for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+               vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+       }
+
+       return 0;
+}
+
+
+
+static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t pm_fuse_table_offset;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to get pm_fuse_table_offset Failed!",
+                                       return -EINVAL);
+
+               /* DW0 - DW3 */
+               if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate bapm vddc vid Failed!",
+                                       return -EINVAL);
+
+               /* DW4 - DW5 */
+               if (iceland_populate_vddc_vid(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate vddc vid Failed!",
+                                       return -EINVAL);
+
+               /* DW6 */
+               if (iceland_populate_svi_load_line(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate SviLoadLine Failed!",
+                                       return -EINVAL);
+               /* DW7 */
+               if (iceland_populate_tdc_limit(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
+               /* DW8 */
+               if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TdcWaterfallCtl, "
+                                       "LPMLTemperature Min and Max Failed!",
+                                       return -EINVAL);
+
+               /* DW9-DW12 */
+               if (0 != iceland_populate_temperature_scaler(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate LPMLTemperatureScaler Failed!",
+                                       return -EINVAL);
+
+               /* DW13-DW16 */
+               if (iceland_populate_gnb_lpml(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Failed!",
+                                       return -EINVAL);
+
+               /* DW17 */
+               if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Min and Max Vid Failed!",
+                                       return -EINVAL);
+
+               /* DW18 */
+               if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+                                       return -EINVAL);
+
+               if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to download PmFuseTable Failed!",
+                                       return -EINVAL);
+       }
+       return 0;
+}
+
+static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+       struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+       uint32_t clock, uint32_t *vol)
+{
+       uint32_t i = 0;
+
+       /* clock - voltage dependency table is empty table */
+       if (allowed_clock_voltage_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+               /* find first sclk bigger than request */
+               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+                       *vol = allowed_clock_voltage_table->entries[i].v;
+                       return 0;
+               }
+       }
+
+       /* sclk is bigger than max sclk in the dependence table */
+       *vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+       return 0;
+}
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+               pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+               uint16_t *lo)
+{
+       uint16_t v_index;
+       bool vol_found = false;
+       *hi = tab->value * VOLTAGE_SCALE;
+       *lo = tab->value * VOLTAGE_SCALE;
+
+       /* SCLK/VDDC Dependency Table has to exist. */
+       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+                       "The SCLK/VDDC Dependency Table does not exist.\n",
+                       return -EINVAL);
+
+       if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+               pr_warning("CAC Leakage Table does not exist, using vddc.\n");
+               return 0;
+       }
+
+       /*
+        * Since voltage in the sclk/vddc dependency table is not
+        * necessarily in ascending order because of ELB voltage
+        * patching, loop through entire list to find exact voltage.
+        */
+       for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+               if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+                       vol_found = true;
+                       if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+                       } else {
+                               pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+                       }
+                       break;
+               }
+       }
+
+       /*
+        * If voltage is not found in the first pass, loop again to
+        * find the best match, equal or higher value.
+        */
+       if (!vol_found) {
+               for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+                       if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+                               vol_found = true;
+                               if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+                               } else {
+                                       pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+                               }
+                               break;
+                       }
+               }
+
+               if (!vol_found)
+                       pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+       }
+
+       return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+               pp_atomctrl_voltage_table_entry *tab,
+               SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+{
+       int result;
+
+       result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+                       &smc_voltage_tab->StdVoltageHiSidd,
+                       &smc_voltage_tab->StdVoltageLoSidd);
+       if (0 != result) {
+               smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+               smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+       }
+
+       smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+       return 0;
+}
+
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+                       SMU71_Discrete_DpmTable *table)
+{
+       unsigned int count;
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       table->VddcLevelCount = data->vddc_voltage_table.count;
+       for (count = 0; count < table->VddcLevelCount; count++) {
+               result = iceland_populate_smc_voltage_table(hwmgr,
+                               &(data->vddc_voltage_table.entries[count]),
+                               &(table->VddcLevel[count]));
+               PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+               /* GPIO voltage control */
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+                       table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+                       table->VddcLevel[count].Smio = 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+       return 0;
+}
+
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+                       SMU71_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+       int result;
+
+       table->VddciLevelCount = data->vddci_voltage_table.count;
+
+       for (count = 0; count < table->VddciLevelCount; count++) {
+               result = iceland_populate_smc_voltage_table(hwmgr,
+                               &(data->vddci_voltage_table.entries[count]),
+                               &(table->VddciLevel[count]));
+               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+               else
+                       table->VddciLevel[count].Smio |= 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+       return 0;
+}
+
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+                       SMU71_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+       int result;
+
+       table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+       for (count = 0; count < table->VddciLevelCount; count++) {
+               result = iceland_populate_smc_voltage_table(hwmgr,
+                               &(data->mvdd_voltage_table.entries[count]),
+                               &table->MvddLevel[count]);
+               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+                       table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+               else
+                       table->MvddLevel[count].Smio |= 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+       return 0;
+}
+
+
+static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+       SMU71_Discrete_DpmTable *table)
+{
+       int result;
+
+       result = iceland_populate_smc_vddc_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+       result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+       result = iceland_populate_smc_mvdd_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+       return 0;
+}
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU71_Discrete_Ulv *state)
+{
+       uint32_t voltage_response_time, ulv_voltage;
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+       PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+       if (ulv_voltage == 0) {
+               data->ulv_supported = false;
+               return 0;
+       }
+
+       if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+                       state->VddcOffset = 0;
+               else
+                       /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+                       state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+       } else {
+               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+                       state->VddcOffsetVid = 0;
+               else  /* used in SVI2 Mode */
+                       state->VddcOffsetVid = (uint8_t)(
+                                       (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+                                               * VOLTAGE_VID_OFFSET_SCALE2
+                                               / VOLTAGE_VID_OFFSET_SCALE1);
+       }
+       state->VddcPhase = 1;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+       return 0;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
+                SMU71_Discrete_Ulv *ulv_level)
+{
+       return iceland_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t i;
+
+       /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount =
+                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity =
+                       1;
+               table->LinkLevel[i].SPC =
+                       (uint8_t)(data->pcie_spc_cap & 0xff);
+               table->LinkLevel[i].DownThreshold =
+                       PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpThreshold =
+                       PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+               (uint8_t)dpm_table->pcie_speed_table.count;
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       uint32_t    reference_clock;
+       uint32_t reference_divider;
+       uint32_t fbdiv;
+       int result;
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+       reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+       reference_divider = 1 + dividers.uc_pll_ref_div;
+
+       /* low 14 bits is fraction and high 12 bits is divider*/
+       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+       /* SPLL_FUNC_CNTL setup*/
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+               CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+               CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+       /* SPLL_FUNC_CNTL_3 setup*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+               CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+       /* set to use fractional accumulation*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+               CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+               pp_atomctrl_internal_ss_info ss_info;
+
+               uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+               if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+                       /*
+                       * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+                       * ss_info.speed_spectrum_rate -- in unit of khz
+                       */
+                       /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+                       uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+                       /* clkv = 2 * D * fbdiv / NS */
+                       uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+                       cg_spll_spread_spectrum =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+                       cg_spll_spread_spectrum =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+                       cg_spll_spread_spectrum_2 =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+               }
+       }
+
+       sclk->SclkFrequency        = engine_clock;
+       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+       return 0;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+                               const struct phm_phase_shedding_limits_table *pl,
+                                       uint32_t sclk, uint32_t *p_shed)
+{
+       unsigned int i;
+
+       /* use the minimum phase shedding */
+       *p_shed = 1;
+
+       for (i = 0; i < pl->count; i++) {
+               if (sclk < pl->entries[i].Sclk) {
+                       *p_shed = i;
+                       break;
+               }
+       }
+       return 0;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+                                               uint32_t engine_clock,
+                               uint16_t sclk_activity_level_threshold,
+                               SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+       /* populate graphics levels*/
+       result = iceland_get_dependecy_volt_by_clk(hwmgr,
+               hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
+               &graphic_level->MinVddc);
+       PP_ASSERT_WITH_CODE((0 == result),
+               "can not find VDDC voltage value for VDDC       \
+               engine clock dependency table", return result);
+
+       /* SCLK frequency in units of 10KHz*/
+       graphic_level->SclkFrequency = engine_clock;
+       graphic_level->MinVddcPhases = 1;
+
+       if (data->vddc_phase_shed_control)
+               iceland_populate_phase_value_based_on_sclk(hwmgr,
+                               hwmgr->dyn_state.vddc_phase_shed_limits_table,
+                               engine_clock,
+                               &graphic_level->MinVddcPhases);
+
+       /* Indicates maximum activity level for this performance level. 50% for now*/
+       graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+       graphic_level->CcPwrDynRm = 0;
+       graphic_level->CcPwrDynRm1 = 0;
+       /* this level can be used if activity is high enough.*/
+       graphic_level->EnabledForActivity = 0;
+       /* this level can be used for throttling.*/
+       graphic_level->EnabledForThrottle = 1;
+       graphic_level->UpHyst = 0;
+       graphic_level->DownHyst = 100;
+       graphic_level->VoltageDownHyst = 0;
+       graphic_level->PowerThrottle = 0;
+
+       data->display_timing.min_clock_in_sr =
+                       hwmgr->display_config.min_core_set_clock_in_sr;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep))
+               graphic_level->DeepSleepDivId =
+                               smu7_get_sleep_divider_id_from_clock(engine_clock,
+                                               data->display_timing.min_clock_in_sr);
+
+       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+       graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       if (0 == result) {
+               graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+       }
+
+       return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param    hwmgr      the address of the hardware manager
+ */
+int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+       uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
+                                               SMU71_MAX_LEVELS_GRAPHICS;
+
+       SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+       uint32_t i;
+       uint8_t highest_pcie_level_enabled = 0;
+       uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+       uint8_t count = 0;
+       int result = 0;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+               result = iceland_populate_single_graphic_level(hwmgr,
+                                       dpm_table->sclk_table.dpm_levels[i].value,
+                                       (uint16_t)smu_data->activity_target[i],
+                                       &(smu_data->smc_state_table.GraphicsLevel[i]));
+               if (result != 0)
+                       return result;
+
+               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+               if (i > 1)
+                       smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+       }
+
+       /* Only enable level 0 for now. */
+       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+       /* set highest level watermark to high */
+       if (dpm_table->sclk_table.count > 1)
+               smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+                       PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       smu_data->smc_state_table.GraphicsDpmLevelCount =
+               (uint8_t)dpm_table->sclk_table.count;
+       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+       while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                               (1 << (highest_pcie_level_enabled + 1))) != 0) {
+               highest_pcie_level_enabled++;
+       }
+
+       while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+               (1 << lowest_pcie_level_enabled)) == 0) {
+               lowest_pcie_level_enabled++;
+       }
+
+       while ((count < highest_pcie_level_enabled) &&
+                       ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+               count++;
+       }
+
+       mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+               (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+       /* set pcieDpmLevel to highest_pcie_level_enabled*/
+       for (i = 2; i < dpm_table->sclk_table.count; i++) {
+               smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+       }
+
+       /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+       smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+       /* set pcieDpmLevel to mid_pcie_level_enabled*/
+       smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+       /* level count will send to smc once at init smc table and never change*/
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
+                               (uint8_t *)levels, (uint32_t)level_array_size,
+                                                               SMC_RAM_END);
+
+       return result;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    memory_clock the memory clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_mclk_params(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU71_Discrete_MemoryLevel *mclk,
+               bool strobe_mode,
+               bool dllStateOn
+               )
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
+       uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+       uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+       uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+       uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+       uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+       uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+       uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
+       uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+       pp_atomctrl_memory_clock_param mpll_param;
+       int result;
+
+       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+                               memory_clock, &mpll_param, strobe_mode);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+       /* MPLL_FUNC_CNTL setup*/
+       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+       /* MPLL_FUNC_CNTL_1 setup*/
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+       /* MPLL_AD_FUNC_CNTL setup*/
+       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+                                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+       if (data->is_memory_gddr5) {
+               /* MPLL_DQ_FUNC_CNTL setup*/
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+               /*
+                ************************************
+                Fref = Reference Frequency
+                NF = Feedback divider ratio
+                NR = Reference divider ratio
+                Fnom = Nominal VCO output frequency = Fref * NF / NR
+                Fs = Spreading Rate
+                D = Percentage down-spread / 2
+                Fint = Reference input frequency to PFD = Fref / NR
+                NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+                CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+                NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+                CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+                *************************************
+                */
+               pp_atomctrl_internal_ss_info ss_info;
+               uint32_t freq_nom;
+               uint32_t tmp;
+               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+               /* for GDDR5 for all modes and DDR3 */
+               if (1 == mpll_param.qdr)
+                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+               else
+                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
+               tmp = (freq_nom / reference_clock);
+               tmp = tmp * tmp;
+
+               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+                       /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+                       /* ss.Info.speed_spectrum_rate -- in unit of khz */
+                       /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+                       /*     = reference_clock * 5 / speed_spectrum_rate */
+                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+                       /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+                       /*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+                       uint32_t clkv =
+                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+               }
+       }
+
+       /* MCLK_PWRMGT_CNTL setup */
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+       /* Save the result data to outpupt memory level structure */
+       mclk->MclkFrequency   = memory_clock;
+       mclk->MpllFuncCntl    = mpll_func_cntl;
+       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
+       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
+       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
+       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
+       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
+       mclk->DllCntl         = dll_cntl;
+       mclk->MpllSs1         = mpll_ss1;
+       mclk->MpllSs2         = mpll_ss2;
+
+       return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+               bool strobe_mode)
+{
+       uint8_t mc_para_index;
+
+       if (strobe_mode) {
+               if (memory_clock < 12500) {
+                       mc_para_index = 0x00;
+               } else if (memory_clock > 47500) {
+                       mc_para_index = 0x0f;
+               } else {
+                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+               }
+       } else {
+               if (memory_clock < 65000) {
+                       mc_para_index = 0x00;
+               } else if (memory_clock > 135000) {
+                       mc_para_index = 0x0f;
+               } else {
+                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+               }
+       }
+
+       return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+       uint8_t mc_para_index;
+
+       if (memory_clock < 10000) {
+               mc_para_index = 0;
+       } else if (memory_clock >= 80000) {
+               mc_para_index = 0x0f;
+       } else {
+               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+       }
+
+       return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+                                       uint32_t memory_clock, uint32_t *p_shed)
+{
+       unsigned int i;
+
+       *p_shed = 1;
+
+       for (i = 0; i < pl->count; i++) {
+               if (memory_clock < pl->entries[i].Mclk) {
+                       *p_shed = i;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int iceland_populate_single_memory_level(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU71_Discrete_MemoryLevel *memory_level
+               )
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int result = 0;
+       bool dll_state_on;
+       struct cgs_display_info info = {0};
+       uint32_t mclk_edc_wr_enable_threshold = 40000;
+       uint32_t mclk_edc_enable_threshold = 40000;
+       uint32_t mclk_strobe_mode_threshold = 40000;
+
+       if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+               result = iceland_get_dependecy_volt_by_clk(hwmgr,
+                       hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+       }
+
+       if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
+               memory_level->MinVddci = memory_level->MinVddc;
+       } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+               result = iceland_get_dependecy_volt_by_clk(hwmgr,
+                               hwmgr->dyn_state.vddci_dependency_on_mclk,
+                               memory_clock,
+                               &memory_level->MinVddci);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+       }
+
+       memory_level->MinVddcPhases = 1;
+
+       if (data->vddc_phase_shed_control) {
+               iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+                               memory_clock, &memory_level->MinVddcPhases);
+       }
+
+       memory_level->EnabledForThrottle = 1;
+       memory_level->EnabledForActivity = 0;
+       memory_level->UpHyst = 0;
+       memory_level->DownHyst = 100;
+       memory_level->VoltageDownHyst = 0;
+
+       /* Indicates maximum activity level for this performance level.*/
+       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       memory_level->StutterEnable = 0;
+       memory_level->StrobeEnable = 0;
+       memory_level->EdcReadEnable = 0;
+       memory_level->EdcWriteEnable = 0;
+       memory_level->RttEnable = 0;
+
+       /* default set to low watermark. Highest level will be set to high later.*/
+       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+       data->display_timing.num_existing_displays = info.display_count;
+
+       /* stutter mode not support on iceland */
+
+       /* decide strobe mode*/
+       memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+               (memory_clock <= mclk_strobe_mode_threshold);
+
+       /* decide EDC mode and memory clock ratio*/
+       if (data->is_memory_gddr5) {
+               memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+                                       memory_level->StrobeEnable);
+
+               if ((mclk_edc_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_enable_threshold)) {
+                       memory_level->EdcReadEnable = 1;
+               }
+
+               if ((mclk_edc_wr_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_wr_enable_threshold)) {
+                       memory_level->EdcWriteEnable = 1;
+               }
+
+               if (memory_level->StrobeEnable) {
+                       if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+                       else
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+               } else
+                       dll_state_on = data->dll_default_on;
+       } else {
+               memory_level->StrobeRatio =
+                       iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+       }
+
+       result = iceland_calculate_mclk_params(hwmgr,
+               memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+       if (0 == result) {
+               memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+               memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+               memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+               /* MCLK frequency in units of 10KHz*/
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+               /* Indicates maximum activity level for this performance level.*/
+               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+       }
+
+       return result;
+}
+
+/**
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+ *
+ * @param    hwmgr      the address of the hardware manager
+ */
+
+int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int result;
+
+       /* populate MCLK dpm table to SMU7 */
+       uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+       uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+       SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                       "can not populate memory level as memory clock is zero", return -EINVAL);
+               result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+                       &(smu_data->smc_state_table.MemoryLevel[i]));
+               if (0 != result) {
+                       return result;
+               }
+       }
+
+       /* Only enable level 0 for now.*/
+       smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+       /*
+       * in order to prevent MC activity from stutter mode to push DPM up.
+       * the UVD change complements this by putting the MCLK in a higher state
+       * by default such that we are not effected by up threshold or and MCLK DPM latency.
+       */
+       smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+       data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+       /* set highest level watermark to high*/
+       smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       /* level count will send to smc once at init smc table and never change*/
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+               level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+               SMC_RAM_END);
+
+       return result;
+}
+
+static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+                                       SMU71_Discrete_VoltageLevel *voltage)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+                       if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+                               /* Always round to higher voltage. */
+                               voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+
+               PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+                       "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+       SMU71_Discrete_DpmTable *table)
+{
+       int result = 0;
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t vddc_phase_shed_control = 0;
+
+       SMU71_Discrete_VoltageLevel voltage_level;
+       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
+       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+       /* The ACPI state should not do DPM on DC (or ever).*/
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       if (data->acpi_vddc)
+               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+       else
+               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+       table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+       /* assign zero for now*/
+       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+               table->ACPILevel.SclkFrequency,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       /* divider ID for required SCLK*/
+       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+       table->ACPILevel.DeepSleepDivId = 0;
+
+       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+                                                       CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
+       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+                                                       CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
+       spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
+                                                       CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
+
+       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+       /* For various features to be enabled/disabled while this level is active.*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       /* SCLK frequency in units of 10KHz*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+       table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+       table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+       else {
+               if (data->acpi_vddci != 0)
+                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+               else
+                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+       }
+
+       if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+               table->MemoryACPILevel.MinMvdd =
+                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+       else
+               table->MemoryACPILevel.MinMvdd = 0;
+
+       /* Force reset on DLL*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+       /* Disable DLL in ACPIState*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+       /* Enable DLL bypass signal*/
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK0_BYPASS, 0);
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK1_BYPASS, 0);
+
+       table->MemoryACPILevel.DllCntl            =
+               PP_HOST_TO_SMC_UL(dll_cntl);
+       table->MemoryACPILevel.MclkPwrmgtCntl     =
+               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+       table->MemoryACPILevel.MpllAdFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+       table->MemoryACPILevel.MpllDqFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl       =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl_1     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+       table->MemoryACPILevel.MpllFuncCntl_2     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+       table->MemoryACPILevel.MpllSs1            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+       table->MemoryACPILevel.MpllSs2            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpHyst = 0;
+       table->MemoryACPILevel.DownHyst = 100;
+       table->MemoryACPILevel.VoltageDownHyst = 0;
+       /* Indicates maximum activity level for this performance level.*/
+       table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       table->MemoryACPILevel.StutterEnable = 0;
+       table->MemoryACPILevel.StrobeEnable = 0;
+       table->MemoryACPILevel.EdcReadEnable = 0;
+       table->MemoryACPILevel.EdcWriteEnable = 0;
+       table->MemoryACPILevel.RttEnable = 0;
+
+       return result;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+                                       SMU71_Discrete_DpmTable *table)
+{
+       return 0;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU71_Discrete_DpmTable *table)
+{
+       return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+               SMU71_Discrete_DpmTable *table)
+{
+       return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+       SMU71_Discrete_DpmTable *table)
+{
+       return 0;
+}
+
+static int iceland_populate_memory_timing_parameters(
+               struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock,
+               uint32_t memory_clock,
+               struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+               )
+{
+       uint32_t dramTiming;
+       uint32_t dramTiming2;
+       uint32_t burstTime;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                               engine_clock, memory_clock);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+       arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+       return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ * This function is to be called from the SetPowerState table.
+ */
+static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       int result = 0;
+       SMU71_Discrete_MCArbDramTimingTable  arb_regs;
+       uint32_t i, j;
+
+       memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+                       result = iceland_populate_memory_timing_parameters
+                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+                                data->dpm_table.mclk_table.dpm_levels[j].value,
+                                &arb_regs.entries[i][j]);
+
+                       if (0 != result) {
+                               break;
+                       }
+               }
+       }
+
+       if (0 == result) {
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr->smumgr,
+                               smu_data->smu7_data.arb_table_start,
+                               (uint8_t *)&arb_regs,
+                               sizeof(SMU71_Discrete_MCArbDramTimingTable),
+                               SMC_RAM_END
+                               );
+       }
+
+       return result;
+}
+
+static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+                       SMU71_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table*/
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+                       data->vbios_boot_state.sclk_bootup_value,
+                       (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+       if (0 != result) {
+               smu_data->smc_state_table.GraphicsBootLevel = 0;
+               printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+                       in dependency table. Using Graphics DPM level 0!");
+               result = 0;
+       }
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+               data->vbios_boot_state.mclk_bootup_value,
+               (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+       if (0 != result) {
+               smu_data->smc_state_table.MemoryBootLevel = 0;
+               printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+                       in dependency table. Using Memory DPM level 0!");
+               result = 0;
+       }
+
+       table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               table->BootVddci = table->BootVddc;
+       else
+               table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+
+       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+       return result;
+}
+
+static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
+                                SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+       const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
+
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+               if (smu_data->mc_reg_table.validflag & 1<<j) {
+                       PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+                               "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+                       mc_reg_table->address[i].s0 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+                       mc_reg_table->address[i].s1 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+                       i++;
+               }
+       }
+
+       mc_reg_table->last = (uint8_t)i;
+
+       return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void iceland_convert_mc_registers(
+       const struct iceland_mc_reg_entry *entry,
+       SMU71_Discrete_MCRegisterSet *data,
+       uint32_t num_entries, uint32_t valid_flag)
+{
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < num_entries; j++) {
+               if (valid_flag & 1<<j) {
+                       data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+                       i++;
+               }
+       }
+}
+
+static int iceland_convert_mc_reg_table_entry_to_smc(
+               struct pp_smumgr *smumgr,
+               const uint32_t memory_clock,
+               SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+               )
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+       uint32_t i = 0;
+
+       for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+               if (memory_clock <=
+                       smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+                       break;
+               }
+       }
+
+       if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+               --i;
+
+       iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+                               mc_reg_table_data, smu_data->mc_reg_table.last,
+                               smu_data->mc_reg_table.validflag);
+
+       return 0;
+}
+
+static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+               SMU71_Discrete_MCRegisters *mc_regs)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int res;
+       uint32_t i;
+
+       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+               res = iceland_convert_mc_reg_table_entry_to_smc(
+                               hwmgr->smumgr,
+                               data->dpm_table.mclk_table.dpm_levels[i].value,
+                               &mc_regs->data[i]
+                               );
+
+               if (0 != res)
+                       result = res;
+       }
+
+       return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t address;
+       int32_t result;
+
+       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+               return 0;
+
+
+       memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+       result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+       if (result != 0)
+               return result;
+
+
+       address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+       return  smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
+                                (uint8_t *)&smu_data->mc_regs.data[0],
+                               sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+                               SMC_RAM_END);
+}
+
+static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+
+       memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+       result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+       result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize MCRegTable for driver state!", return result;);
+
+       return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+                       (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       uint8_t count, level;
+
+       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+       for (level = 0; level < count; level++) {
+               if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+                        >= data->vbios_boot_state.sclk_bootup_value) {
+                       smu_data->smc_state_table.GraphicsBootLevel = level;
+                       break;
+               }
+       }
+
+       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+       for (level = 0; level < count; level++) {
+               if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+                       >= data->vbios_boot_state.mclk_bootup_value) {
+                       smu_data->smc_state_table.MemoryBootLevel = level;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+       SMU71_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+       struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+       struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+       uint16_t *def1, *def2;
+       int i, j, k;
+
+
+       /*
+        * TDP number of fraction bits are changed from 8 to 7 for Iceland
+        * as requested by SMC team
+        */
+
+       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+       dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+
+       dpm_table->DTETjOffset = 0;
+
+       dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+       dpm_table->GpuTjHyst = 8;
+
+       dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+       /* The following are for new Iceland Multi-input fan/thermal control */
+       if (NULL != ppm) {
+               dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+               dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+       } else {
+               dpm_table->PPM_PkgPwrLimit = 0;
+               dpm_table->PPM_TemperatureLimit = 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+       dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+       def1 = defaults->bapmti_r;
+       def2 = defaults->bapmti_rc;
+
+       for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+               for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+                       for (k = 0; k < SMU71_DTE_SINKS; k++) {
+                               dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+                               dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+                               def1++;
+                               def2++;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+                                           SMU71_Discrete_DpmTable *tab)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+               tab->SVI2Enable |= VDDC_ON_SVI2;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+               tab->SVI2Enable |= VDDCI_ON_SVI2;
+       else
+               tab->MergedVddci = 1;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+               tab->SVI2Enable |= MVDD_ON_SVI2;
+
+       PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+               (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+       return 0;
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @param    pInput  the pointer to input data (PowerState)
+ * @return   always 0
+ */
+int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       SMU71_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+
+
+       iceland_initialize_power_tune_defaults(hwmgr);
+       memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+               iceland_populate_smc_voltage_tables(hwmgr, table);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+
+       if (data->ulv_supported) {
+               result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+               PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ULV state!", return result;);
+
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_ULV_PARAMETER, 0x40035);
+       }
+
+       result = iceland_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Link Level!", return result;);
+
+       result = iceland_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Graphics Level!", return result;);
+
+       result = iceland_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Memory Level!", return result;);
+
+       result = iceland_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize ACPI Level!", return result;);
+
+       result = iceland_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize VCE Level!", return result;);
+
+       result = iceland_populate_smc_acp_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize ACP Level!", return result;);
+
+       result = iceland_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize SAMU Level!", return result;);
+
+       /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+       /* need to populate the  ARB settings for the initial state. */
+       result = iceland_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to Write ARB settings for the initial state.", return result;);
+
+       result = iceland_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize UVD Level!", return result;);
+
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       result = iceland_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Boot Level!", return result;);
+
+       result = iceland_populate_smc_initial_state(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+       result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+
+       table->TemperatureLimitHigh =
+               (data->thermal_temp_setting.temperature_high *
+                SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       table->TemperatureLimitLow =
+               (data->thermal_temp_setting.temperature_low *
+               SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+       table->MemoryVoltageChangeEnable  = 1;
+       table->MemoryInterval  = 1;
+       table->VoltageResponseTime  = 0;
+       table->PhaseResponseTime  = 0;
+       table->MemoryThermThrottleEnable  = 1;
+       table->PCIeBootLinkLevel = 0;
+       table->PCIeGenInterval = 1;
+
+       result = iceland_populate_smc_svi2_config(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to populate SVI2 setting!", return result);
+
+       table->ThermGpio  = 17;
+       table->SclkStepSize = 0x4000;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+       table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+       table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
+                                                                               offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+                                                                               (uint8_t *)&(table->SystemFlags),
+                                                                               sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
+                                                                               SMC_RAM_END);
+
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to upload dpm data to SMC memory!", return result;);
+
+       /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+                       smu_data->smu7_data.ulv_setting_starts,
+                       (uint8_t *)&(smu_data->ulv_setting),
+                       sizeof(SMU71_Discrete_Ulv),
+                       SMC_RAM_END);
+
+
+       result = iceland_populate_initial_mc_reg_table(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == result),
+               "Failed to populate initialize MC Reg table!", return result);
+
+       result = iceland_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to  populate PM fuses to SMC memory!", return result);
+
+       return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+       SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+               return 0;
+
+       if (0 == smu7_data->fan_table_start) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (0 == duty100) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       /* fan_table.FanControl_GL_Flag = 1; */
+
+       res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+       return 0;
+}
+
+
+static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return iceland_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr->smumgr,
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU71_Discrete_DpmTable,
+                                       LowSclkInterruptThreshold),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+
+       result = iceland_update_and_upload_mc_reg_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+       result = iceland_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters!",
+                       );
+
+       return result;
+}
+
+uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+               case PreVBlankGap:
+                       return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU71_SoftRegisters, VBlankTimeout);
+               case UcodeLoadStatus:
+                       return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+               }
+       }
+       printk("cant't get the offset of type %x member %x \n", type, member);
+       return 0;
+}
+
+uint32_t iceland_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU71_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU71_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU71_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU71_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU71_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU71_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU71_MAX_LEVELS_MVDD;
+       }
+
+       printk("cant't get the mac of %x \n", value);
+       return 0;
+}
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+
+       uint32_t tmp;
+       int result;
+       bool error = false;
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, DpmTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->dpm_table_start = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, SoftRegisters),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               data->soft_regs_start = tmp;
+               smu7_data->soft_regs_start = tmp;
+       }
+
+       error |= (0 != result);
+
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, mcRegisterTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->mc_reg_table_start = tmp;
+       }
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, FanTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->fan_table_start = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->arb_table_start = tmp;
+       }
+
+       error |= (0 != result);
+
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, Version),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               hwmgr->microcode_version_info.SMC = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, UlvSettings),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->ulv_setting_starts = tmp;
+       }
+
+       error |= (0 != result);
+
+       return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+       bool result = true;
+
+       switch (in_reg) {
+       case  mmMC_SEQ_RAS_TIMING:
+               *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+               break;
+
+       case  mmMC_SEQ_DLL_STBY:
+               *out_reg = mmMC_SEQ_DLL_STBY_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD0:
+               *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD1:
+               *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CTRL:
+               *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+               break;
+
+       case mmMC_SEQ_CAS_TIMING:
+               *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING:
+               *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING2:
+               *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CMD:
+               *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CTL:
+               *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D0:
+               *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D1:
+               *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D0:
+               *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D1:
+               *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+               break;
+
+       case mmMC_PMG_CMD_EMRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS1:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+               break;
+
+       case mmMC_SEQ_PMG_TIMING:
+               *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS2:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_2:
+               *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+               break;
+
+       default:
+               result = false;
+               break;
+       }
+
+       return result;
+}
+
+static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+{
+       uint32_t i;
+       uint16_t address;
+
+       for (i = 0; i < table->last; i++) {
+               table->mc_reg_address[i].s0 =
+                       iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+                       ? address : table->mc_reg_address[i].s1;
+       }
+       return 0;
+}
+
+static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+                                       struct iceland_mc_reg_table *ni_table)
+{
+       uint8_t i, j;
+
+       PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+               "Invalid VramInfo table.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+               "Invalid VramInfo table.", return -EINVAL);
+
+       for (i = 0; i < table->last; i++) {
+               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+       }
+       ni_table->last = table->last;
+
+       for (i = 0; i < table->num_entries; i++) {
+               ni_table->mc_reg_table_entry[i].mclk_max =
+                       table->mc_reg_table_entry[i].mclk_max;
+               for (j = 0; j < table->last; j++) {
+                       ni_table->mc_reg_table_entry[i].mc_data[j] =
+                               table->mc_reg_table_entry[i].mc_data[j];
+               }
+       }
+
+       ni_table->num_entries = table->num_entries;
+
+       return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to  mmMC_PMG_CMD_EMRS /_LP[15:0].
+ *      Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3.   need to set these data for each clock range
+ *
+ * @param    hwmgr the address of the powerplay hardware manager.
+ * @param    table the address of MCRegTable
+ * @return   always 0
+ */
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+                                       struct iceland_mc_reg_table *table)
+{
+       uint8_t i, j, k;
+       uint32_t temp_reg;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       for (i = 0, j = table->last; i < table->last; i++) {
+               PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                       "Invalid VramInfo table.", return -EINVAL);
+
+               switch (table->mc_reg_address[i].s1) {
+
+               case mmMC_SEQ_MISC1:
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       ((temp_reg & 0xffff0000)) |
+                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+                               if (!data->is_memory_gddr5) {
+                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+                               }
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       if (!data->is_memory_gddr5) {
+                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+                               for (k = 0; k < table->num_entries; k++) {
+                                       table->mc_reg_table_entry[k].mc_data[j] =
+                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+                               }
+                               j++;
+                               PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                                       "Invalid VramInfo table.", return -EINVAL);
+                       }
+
+                       break;
+
+               case mmMC_SEQ_RESERVE_M:
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+                       break;
+
+               default:
+                       break;
+               }
+
+       }
+
+       table->last = j;
+
+       return 0;
+}
+
+static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+{
+       uint8_t i, j;
+       for (i = 0; i < table->last; i++) {
+               for (j = 1; j < table->num_entries; j++) {
+                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+                               table->mc_reg_table_entry[j].mc_data[i]) {
+                               table->validflag |= (1<<i);
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+       pp_atomctrl_mc_reg_table *table;
+       struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+       uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+
+       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+       if (NULL == table)
+               return -ENOMEM;
+
+       /* Program additional LP registers that are no longer programmed by VBIOS */
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+       if (0 == result)
+               result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+       if (0 == result) {
+               iceland_set_s0_mc_reg_index(ni_table);
+               result = iceland_set_mc_special_registers(hwmgr, ni_table);
+       }
+
+       if (0 == result)
+               iceland_set_valid_flag(ni_table);
+
+       kfree(table);
+
+       return result;
+}
+
+bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+                       ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
new file mode 100644 (file)
index 0000000..13c8dbb
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _ICELAND_SMC_H
+#define _ICELAND_SMC_H
+
+#include "smumgr.h"
+
+
+int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
+uint32_t iceland_get_mac_definition(uint32_t value);
+int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
+int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
new file mode 100644 (file)
index 0000000..eeafefc
--- /dev/null
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+
+#include "smumgr.h"
+#include "iceland_smumgr.h"
+#include "pp_debug.h"
+#include "smu_ucode_xfer_vi.h"
+#include "ppsmc.h"
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+#include "cgs_common.h"
+#include "iceland_smc.h"
+
+#define ICELAND_SMC_SIZE               0x20000
+
+static int iceland_start_smc(struct pp_smumgr *smumgr)
+{
+       SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+                                 SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+       return 0;
+}
+
+static void iceland_reset_smc(struct pp_smumgr *smumgr)
+{
+       SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+                                 SMC_SYSCON_RESET_CNTL,
+                                 rst_reg, 1);
+}
+
+
+static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
+{
+       SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+                                 SMC_SYSCON_CLOCK_CNTL_0,
+                                 ck_disable, 1);
+}
+
+static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+{
+       SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+                                 SMC_SYSCON_CLOCK_CNTL_0,
+                                 ck_disable, 0);
+}
+
+static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+{
+       /* set smc instruct start point at 0x0 */
+       smu7_program_jump_on_start(smumgr);
+
+       /* enable smc clock */
+       iceland_start_smc_clock(smumgr);
+
+       /* de-assert reset */
+       iceland_start_smc(smumgr);
+
+       SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+                                INTERRUPTS_ENABLED, 1);
+
+       return 0;
+}
+
+
+static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
+                                       uint32_t length, const uint8_t *src,
+                                       uint32_t limit, uint32_t start_addr)
+{
+       uint32_t byte_count = length;
+       uint32_t data;
+
+       PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
+       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+       while (byte_count >= 4) {
+               data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+               cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+               src += 4;
+               byte_count -= 4;
+       }
+
+       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+       PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+
+       return 0;
+}
+
+
+static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+{
+       uint32_t val;
+       struct cgs_firmware_info info = {0};
+
+       if (smumgr == NULL || smumgr->device == NULL)
+               return -EINVAL;
+
+       /* load SMC firmware */
+       cgs_get_firmware_info(smumgr->device,
+               smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+
+       if (info.image_size & 3) {
+               pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n");
+               return -EINVAL;
+       }
+
+       if (info.image_size > ICELAND_SMC_SIZE) {
+               pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n");
+               return -EINVAL;
+       }
+
+       /* wait for smc boot up */
+       SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+                                        RCU_UC_EVENTS, boot_seq_done, 0);
+
+       /* clear firmware interrupt enable flag */
+       val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+                                   ixSMC_SYSCON_MISC_CNTL);
+       cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+                              ixSMC_SYSCON_MISC_CNTL, val | 1);
+
+       /* stop smc clock */
+       iceland_stop_smc_clock(smumgr);
+
+       /* reset smc */
+       iceland_reset_smc(smumgr);
+       iceland_upload_smc_firmware_data(smumgr, info.image_size,
+                               (uint8_t *)info.kptr, ICELAND_SMC_SIZE,
+                               info.ucode_start_address);
+
+       return 0;
+}
+
+static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+                                               uint32_t firmwareType)
+{
+       return 0;
+}
+
+static int iceland_start_smu(struct pp_smumgr *smumgr)
+{
+       int result;
+
+       result = iceland_smu_upload_firmware_image(smumgr);
+       if (result)
+               return result;
+       result = iceland_smu_start_smc(smumgr);
+       if (result)
+               return result;
+
+       if (!smu7_is_smc_ram_running(smumgr)) {
+               printk("smu not running, upload firmware again \n");
+               result = iceland_smu_upload_firmware_image(smumgr);
+               if (result)
+                       return result;
+
+               result = iceland_smu_start_smc(smumgr);
+               if (result)
+                       return result;
+       }
+
+       result = smu7_request_smu_load_fw(smumgr);
+
+       return result;
+}
+
+/**
+ * Write a 32bit value to the SMC SRAM space.
+ * ALL PARAMETERS ARE IN HOST BYTE ORDER.
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    smcAddress the address in the SMC RAM to access.
+ * @param    value to write to the SMC SRAM.
+ */
+static int iceland_smu_init(struct pp_smumgr *smumgr)
+{
+       int i;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+       if (smu7_init(smumgr))
+               return -EINVAL;
+
+       for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
+               smu_data->activity_target[i] = 30;
+
+       return 0;
+}
+
+static const struct pp_smumgr_func iceland_smu_funcs = {
+       .smu_init = &iceland_smu_init,
+       .smu_fini = &smu7_smu_fini,
+       .start_smu = &iceland_start_smu,
+       .check_fw_load_finish = &smu7_check_fw_load_finish,
+       .request_smu_load_fw = &smu7_reload_firmware,
+       .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
+       .send_msg_to_smc = &smu7_send_msg_to_smc,
+       .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+       .download_pptable_settings = NULL,
+       .upload_pptable_settings = NULL,
+       .get_offsetof = iceland_get_offsetof,
+       .process_firmware_header = iceland_process_firmware_header,
+       .init_smc_table = iceland_init_smc_table,
+       .update_sclk_threshold = iceland_update_sclk_threshold,
+       .thermal_setup_fan_table = iceland_thermal_setup_fan_table,
+       .populate_all_graphic_levels = iceland_populate_all_graphic_levels,
+       .populate_all_memory_levels = iceland_populate_all_memory_levels,
+       .get_mac_definition = iceland_get_mac_definition,
+       .initialize_mc_reg_table = iceland_initialize_mc_reg_table,
+       .is_dpm_running = iceland_is_dpm_running,
+};
+
+int iceland_smum_init(struct pp_smumgr *smumgr)
+{
+       struct iceland_smumgr *iceland_smu = NULL;
+
+       iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
+
+       if (iceland_smu == NULL)
+               return -ENOMEM;
+
+       smumgr->backend = iceland_smu;
+       smumgr->smumgr_funcs = &iceland_smu_funcs;
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
new file mode 100644 (file)
index 0000000..cfadfee
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#ifndef _ICELAND_SMUMGR_H_
+#define _ICELAND_SMUMGR_H_
+
+
+#include "smu7_smumgr.h"
+#include "pp_endian.h"
+#include "smu71_discrete.h"
+
+struct iceland_pt_defaults {
+       uint8_t   svi_load_line_en;
+       uint8_t   svi_load_line_vddc;
+       uint8_t   tdc_vddc_throttle_release_limit_perc;
+       uint8_t   tdc_mawt;
+       uint8_t   tdc_waterfall_ctl;
+       uint8_t   dte_ambient_temp_base;
+       uint32_t  display_cac;
+       uint32_t  bamp_temp_gradient;
+       uint16_t  bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+       uint16_t  bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+};
+
+struct iceland_mc_reg_entry {
+       uint32_t mclk_max;
+       uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct iceland_mc_reg_table {
+       uint8_t   last;               /* number of registers*/
+       uint8_t   num_entries;        /* number of entries in mc_reg_table_entry used*/
+       uint16_t  validflag;          /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+       struct iceland_mc_reg_entry    mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+       SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct iceland_smumgr {
+       struct smu7_smumgr smu7_data;
+       struct SMU71_Discrete_DpmTable       smc_state_table;
+       struct SMU71_Discrete_PmFuses  power_tune_table;
+       struct SMU71_Discrete_Ulv            ulv_setting;
+       struct iceland_pt_defaults  *power_tune_defaults;
+       SMU71_Discrete_MCRegisters      mc_regs;
+       struct iceland_mc_reg_table mc_reg_table;
+       uint32_t        activity_target[SMU71_MAX_LEVELS_GRAPHICS];
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
new file mode 100644 (file)
index 0000000..4ccc0b7
--- /dev/null
@@ -0,0 +1,2287 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "polaris10_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "polaris10_smumgr.h"
+#include "pppcielanes.h"
+
+#include "smu_ucode_xfer_vi.h"
+#include "smu74_discrete.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "polaris10_pwrvirus.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VDDC_VDDCI_DELTA            200
+#define MC_CG_ARB_FREQ_F1           0x0b
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+       /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+       { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+       { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+                       {VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
+                       {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+                       {VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
+                       {VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
+                       {VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
+                       {VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
+                       {VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
+                       {VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
+
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+               struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+               uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+       uint32_t i;
+       uint16_t vddci;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       *voltage = *mvdd = 0;
+
+       /* clock - voltage dependency table is empty table */
+       if (dep_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < dep_table->count; i++) {
+               /* find first sclk bigger than request */
+               if (dep_table->entries[i].clk >= clock) {
+                       *voltage |= (dep_table->entries[i].vddc *
+                                       VOLTAGE_SCALE) << VDDC_SHIFT;
+                       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+                               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       else if (dep_table->entries[i].vddci)
+                               *voltage |= (dep_table->entries[i].vddci *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       else {
+                               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               (dep_table->entries[i].vddc -
+                                                               (uint16_t)VDDC_VDDCI_DELTA));
+                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       }
+
+                       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+                               *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+                                       VOLTAGE_SCALE;
+                       else if (dep_table->entries[i].mvdd)
+                               *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+                                       VOLTAGE_SCALE;
+
+                       *voltage |= 1 << PHASES_SHIFT;
+                       return 0;
+               }
+       }
+
+       /* sclk is bigger than max sclk in the dependence table */
+       *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+       else if (dep_table->entries[i-1].vddci) {
+               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                               (dep_table->entries[i].vddc -
+                                               (uint16_t)VDDC_VDDCI_DELTA));
+               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+       }
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+               *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+       else if (dep_table->entries[i].mvdd)
+               *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+       return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+       uint32_t tmp;
+       tmp = raw_setting * 4096 / 100;
+       return (uint16_t)tmp;
+}
+
+static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+       SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+       struct pp_advance_fan_control_parameters *fan_table =
+                       &hwmgr->thermal_controller.advanceFanControlParameters;
+       int i, j, k;
+       const uint16_t *pdef1;
+       const uint16_t *pdef2;
+
+       table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+       table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+                               "Target Operating Temp is out of Range!",
+                               );
+
+       table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTargetOperatingTemp * 256);
+       table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitHotspot * 256);
+       table->FanGainEdge = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainEdge));
+       table->FanGainHotspot = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+       pdef1 = defaults->BAPMTI_R;
+       pdef2 = defaults->BAPMTI_RC;
+
+       for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+               for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+                       for (k = 0; k < SMU74_DTE_SINKS; k++) {
+                               table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+                               table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+                               pdef1++;
+                               pdef2++;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+       return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       fuse_table_offset +
+                       offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+                               return -EINVAL);
+       else {
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+               smu_data->power_tune_table.LPMLTemperatureMin =
+                               (uint8_t)((temp >> 16) & 0xff);
+               smu_data->power_tune_table.LPMLTemperatureMax =
+                               (uint8_t)((temp >> 8) & 0xff);
+               smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+       }
+       return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+       return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+/* TO DO move to hwmgr */
+       if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+               || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+                       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+       smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+                               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+       return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.GnbLPML[i] = 0;
+
+       return 0;
+}
+
+static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+       return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+       hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+       return 0;
+}
+
+static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t pm_fuse_table_offset;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU74_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to get pm_fuse_table_offset Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_svi_load_line(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate SviLoadLine Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_tdc_limit(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+               if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TdcWaterfallCtl, "
+                                       "LPMLTemperature Min and Max Failed!",
+                                       return -EINVAL);
+
+               if (0 != polaris10_populate_temperature_scaler(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate LPMLTemperatureScaler Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_fuzzy_fan(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate Fuzzy Fan Control parameters Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_gnb_lpml(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Min and Max Vid Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+                                       "Sidd Failed!", return -EINVAL);
+
+               if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to download PmFuseTable Failed!",
+                                       return -EINVAL);
+       }
+       return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+                       SMU74_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count, level;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               count = data->mvdd_voltage_table.count;
+               if (count > SMU_MAX_SMIO_LEVELS)
+                       count = SMU_MAX_SMIO_LEVELS;
+               for (level = 0; level < count; level++) {
+                       table->SmioTable2.Pattern[level].Voltage =
+                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+                       table->SmioTable2.Pattern[level].Smio =
+                               (uint8_t) level;
+                       table->Smio[level] |=
+                               data->mvdd_voltage_table.entries[level].smio_low;
+               }
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+               table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+       }
+
+       return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+                                       struct SMU74_Discrete_DpmTable *table)
+{
+       uint32_t count, level;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       count = data->vddci_voltage_table.count;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               if (count > SMU_MAX_SMIO_LEVELS)
+                       count = SMU_MAX_SMIO_LEVELS;
+               for (level = 0; level < count; ++level) {
+                       table->SmioTable1.Pattern[level].Voltage =
+                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+                       table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+                       table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+               }
+       }
+
+       table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+       return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    table  the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       uint32_t count;
+       uint8_t index;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+                       table_info->vddc_lookup_table;
+       /* tables is already swapped, so in order to use the value from it,
+        * we need to swap it back.
+        * We are populating vddc CAC data to BapmVddc table
+        * in split and merged mode
+        */
+       for (count = 0; count < lookup_table->count; count++) {
+               index = phm_get_voltage_index(lookup_table,
+                               data->vddc_voltage_table.entries[count].value);
+               table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+               table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+               table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+       }
+
+       return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always  0
+*/
+
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       polaris10_populate_smc_vddci_table(hwmgr, table);
+       polaris10_populate_smc_mvdd_table(hwmgr, table);
+       polaris10_populate_cac_table(hwmgr, table);
+
+       return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_Ulv *state)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+       state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+       return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int i;
+
+       /* Index (dpm_table->pcie_speed_table.count)
+        * is reserved for PCIE boot level. */
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                               (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+                               dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity = 1;
+               table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+               table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+                       (uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+                                  SMU74_Discrete_DpmTable  *table)
+{
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       uint32_t i, ref_clk;
+
+       struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+       ref_clk = smu7_get_xclk(hwmgr);
+
+       if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+               for (i = 0; i < NUM_SCLK_RANGE; i++) {
+                       table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+                       table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+                       table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+                       table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+                       table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+               }
+               return;
+       }
+
+       for (i = 0; i < NUM_SCLK_RANGE; i++) {
+               smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+               smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+               table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+               table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+               table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+               table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+               table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+       }
+}
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    clock  the engine clock to use to populate the structure
+* @param    sclk   the SMC SCLK structure to be populated
+*/
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+       struct pp_atomctrl_clock_dividers_ai dividers;
+       uint32_t ref_clock;
+       uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+       uint8_t i;
+       int result;
+       uint64_t temp;
+
+       sclk_setting->SclkFrequency = clock;
+       /* get the engine clock dividers for this clock value */
+       result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
+       if (result == 0) {
+               sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+               sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+               sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+               sclk_setting->PllRange = dividers.ucSclkPllRange;
+               sclk_setting->Sclk_slew_rate = 0x400;
+               sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+               sclk_setting->Pcc_down_slew_rate = 0xffff;
+               sclk_setting->SSc_En = dividers.ucSscEnable;
+               sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+               sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+               sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+               return result;
+       }
+
+       ref_clock = smu7_get_xclk(hwmgr);
+
+       for (i = 0; i < NUM_SCLK_RANGE; i++) {
+               if (clock > smu_data->range_table[i].trans_lower_frequency
+               && clock <= smu_data->range_table[i].trans_upper_frequency) {
+                       sclk_setting->PllRange = i;
+                       break;
+               }
+       }
+
+       sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+       temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+       temp <<= 0x10;
+       do_div(temp, ref_clock);
+       sclk_setting->Fcw_frac = temp & 0xffff;
+
+       pcc_target_percent = 10; /*  Hardcode 10% for now. */
+       pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+       sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+       ss_target_percent = 2; /*  Hardcode 2% for now. */
+       sclk_setting->SSc_En = 0;
+       if (ss_target_percent) {
+               sclk_setting->SSc_En = 1;
+               ss_target_freq = clock - (clock * ss_target_percent / 100);
+               sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+               temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+               temp <<= 0x10;
+               do_div(temp, ref_clock);
+               sclk_setting->Fcw1_frac = temp & 0xffff;
+       }
+
+       return 0;
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    clock the engine clock to use to populate the structure
+* @param    sclk        the SMC SCLK structure to be populated
+*/
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, uint16_t sclk_al_threshold,
+               struct SMU74_Discrete_GraphicsLevel *level)
+{
+       int result;
+       /* PP_Clocks minClocks; */
+       uint32_t mvdd;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       SMU_SclkSetting curr_sclk_setting = { 0 };
+
+       result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+       /* populate graphics levels */
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_sclk, clock,
+                       &level->MinVoltage, &mvdd);
+
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find VDDC voltage value for "
+                       "VDDC engine clock dependency table",
+                       return result);
+       level->ActivityLevel = sclk_al_threshold;
+
+       level->CcPwrDynRm = 0;
+       level->CcPwrDynRm1 = 0;
+       level->EnabledForActivity = 0;
+       level->EnabledForThrottle = 1;
+       level->UpHyst = 10;
+       level->DownHyst = 0;
+       level->VoltageDownHyst = 0;
+       level->PowerThrottle = 0;
+       data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+               level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+                                                               hwmgr->display_config.min_core_set_clock_in_sr);
+
+       /* Default to slow, highest DPM level will be
+        * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+        */
+       if (data->update_up_hyst)
+               level->UpHyst = (uint8_t)data->up_hyst;
+       if (data->update_down_hyst)
+               level->DownHyst = (uint8_t)data->down_hyst;
+
+       level->SclkSetting = curr_sclk_setting;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+       return 0;
+}
+
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+       uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+       int result = 0;
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+                       SMU74_MAX_LEVELS_GRAPHICS;
+       struct SMU74_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t i, max_entry;
+       uint8_t hightest_pcie_level_enabled = 0,
+               lowest_pcie_level_enabled = 0,
+               mid_pcie_level_enabled = 0,
+               count = 0;
+
+       polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+               result = polaris10_populate_single_graphic_level(hwmgr,
+                               dpm_table->sclk_table.dpm_levels[i].value,
+                               (uint16_t)smu_data->activity_target[i],
+                               &(smu_data->smc_state_table.GraphicsLevel[i]));
+               if (result)
+                       return result;
+
+               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+               if (i > 1)
+                       levels[i].DeepSleepDivId = 0;
+       }
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_SPLLShutdownSupport))
+               smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+       smu_data->smc_state_table.GraphicsDpmLevelCount =
+                       (uint8_t)dpm_table->sclk_table.count;
+       hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+       if (pcie_table != NULL) {
+               PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+                               "There must be 1 or more PCIE levels defined in PPTable.",
+                               return -EINVAL);
+               max_entry = pcie_entry_cnt - 1;
+               for (i = 0; i < dpm_table->sclk_table.count; i++)
+                       levels[i].pcieDpmLevel =
+                                       (uint8_t) ((i < max_entry) ? i : max_entry);
+       } else {
+               while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << (hightest_pcie_level_enabled + 1))) != 0))
+                       hightest_pcie_level_enabled++;
+
+               while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << lowest_pcie_level_enabled)) == 0))
+                       lowest_pcie_level_enabled++;
+
+               while ((count < hightest_pcie_level_enabled) &&
+                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+                       count++;
+
+               mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+                               hightest_pcie_level_enabled ?
+                                               (lowest_pcie_level_enabled + 1 + count) :
+                                               hightest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to hightest_pcie_level_enabled */
+               for (i = 2; i < dpm_table->sclk_table.count; i++)
+                       levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to lowest_pcie_level_enabled */
+               levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to mid_pcie_level_enabled */
+               levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+       }
+       /* level count will send to smc once at init smc table and never change */
+       result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
+                       (uint32_t)array_size, SMC_RAM_END);
+
+       return result;
+}
+
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       int result = 0;
+       struct cgs_display_info info = {0, 0, NULL};
+       uint32_t mclk_stutter_mode_threshold = 40000;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+
+       if (table_info->vdd_dep_on_mclk) {
+               result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                               table_info->vdd_dep_on_mclk, clock,
+                               &mem_level->MinVoltage, &mem_level->MinMvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find MinVddc voltage value from memory "
+                               "VDDC voltage dependency table", return result);
+       }
+
+       mem_level->MclkFrequency = clock;
+       mem_level->EnabledForThrottle = 1;
+       mem_level->EnabledForActivity = 0;
+       mem_level->UpHyst = 0;
+       mem_level->DownHyst = 100;
+       mem_level->VoltageDownHyst = 0;
+       mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       mem_level->StutterEnable = false;
+       mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       data->display_timing.num_existing_displays = info.display_count;
+
+       if (mclk_stutter_mode_threshold &&
+               (clock <= mclk_stutter_mode_threshold) &&
+               (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+                               STUTTER_ENABLE) & 0x1))
+               mem_level->StutterEnable = true;
+
+       if (!result) {
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+       }
+       return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+       int result;
+       /* populate MCLK dpm table to SMU7 */
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+       uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+                       SMU74_MAX_LEVELS_MEMORY;
+       struct SMU74_Discrete_MemoryLevel *levels =
+                       smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                               "can not populate memory level as memory clock is zero",
+                               return -EINVAL);
+               result = polaris10_populate_single_memory_level(hwmgr,
+                               dpm_table->mclk_table.dpm_levels[i].value,
+                               &levels[i]);
+               if (i == dpm_table->mclk_table.count - 1) {
+                       levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+                       levels[i].EnabledForActivity = 1;
+               }
+               if (result)
+                       return result;
+       }
+
+       /* In order to prevent MC activity from stutter mode to push DPM up,
+        * the UVD change complements this by putting the MCLK in
+        * a higher state by default such that we are not affected by
+        * up threshold or and MCLK DPM latency.
+        */
+       levels[0].ActivityLevel = 0x1f;
+       CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount =
+                       (uint8_t)dpm_table->mclk_table.count;
+       hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+       /* level count will send to smc once at init smc table and never change */
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+                       (uint32_t)array_size, SMC_RAM_END);
+
+       return result;
+}
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
+* @param    voltage     the SMC VOLTAGE structure to be populated
+*/
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+               uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+                               smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+                               "MVDD Voltage is outside the supported range.",
+                               return -EINVAL);
+       } else
+               return -EINVAL;
+
+       return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+               SMU74_Discrete_DpmTable *table)
+{
+       int result = 0;
+       uint32_t sclk_frequency;
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       SMIO_Pattern vol_level;
+       uint32_t mvdd;
+       uint16_t us_mvdd;
+
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       /* Get MinVoltage and Frequency from DPM0,
+        * already converted to SMC_UL */
+       sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_sclk,
+                       sclk_frequency,
+                       &table->ACPILevel.MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Cannot find ACPI VDDC voltage value "
+                       "in Clock Dependency Table",
+                       );
+
+       result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
+       PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       table->ACPILevel.DeepSleepDivId = 0;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+       /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+       table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_mclk,
+                       table->MemoryACPILevel.MclkFrequency,
+                       &table->MemoryACPILevel.MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Cannot find ACPI VDDCI voltage value "
+                       "in Clock Dependency Table",
+                       );
+
+       us_mvdd = 0;
+       if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+                       (data->mclk_dpm_key_disabled))
+               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+       else {
+               if (!polaris10_populate_mvdd_value(hwmgr,
+                               data->dpm_table.mclk_table.dpm_levels[0].value,
+                               &vol_level))
+                       us_mvdd = vol_level.Voltage;
+       }
+
+       if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+               table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+       else
+               table->MemoryACPILevel.MinMvdd = 0;
+
+       table->MemoryACPILevel.StutterEnable = false;
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpHyst = 0;
+       table->MemoryACPILevel.DownHyst = 100;
+       table->MemoryACPILevel.VoltageDownHyst = 0;
+       table->MemoryACPILevel.ActivityLevel =
+                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+       return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU74_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
+
+       table->VceLevelCount = (uint8_t)(mm_table->count);
+       table->VceBootLevel = 0;
+
+       for (count = 0; count < table->VceLevelCount; count++) {
+               table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+               table->VceLevel[count].MinVoltage = 0;
+               table->VceLevel[count].MinVoltage |=
+                               (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+               table->VceLevel[count].MinVoltage |=
+                               (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /*retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->VceLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for VCE engine clock",
+                               return result);
+
+               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+               SMU74_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
+
+       table->SamuBootLevel = 0;
+       table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+       for (count = 0; count < table->SamuLevelCount; count++) {
+               /* not sure whether we need evclk or not */
+               table->SamuLevel[count].MinVoltage = 0;
+               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->SamuLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for samu clock", return result);
+
+               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+               int32_t eng_clock, int32_t mem_clock,
+               SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+       uint32_t dram_timing;
+       uint32_t dram_timing2;
+       uint32_t burst_time;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                       eng_clock, mem_clock);
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+       arb_regs->McArbBurstTime   = (uint8_t)burst_time;
+
+       return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+       uint32_t i, j;
+       int result = 0;
+
+       for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+                       result = polaris10_populate_memory_timing_parameters(hwmgr,
+                                       hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+                                       hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+                                       &arb_regs.entries[i][j]);
+                       if (result == 0)
+                               result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
+                       if (result != 0)
+                               return result;
+               }
+       }
+
+       result = smu7_copy_bytes_to_smc(
+                       hwmgr->smumgr,
+                       smu_data->smu7_data.arb_table_start,
+                       (uint8_t *)&arb_regs,
+                       sizeof(SMU74_Discrete_MCArbDramTimingTable),
+                       SMC_RAM_END);
+       return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
+
+       table->UvdLevelCount = (uint8_t)(mm_table->count);
+       table->UvdBootLevel = 0;
+
+       for (count = 0; count < table->UvdLevelCount; count++) {
+               table->UvdLevel[count].MinVoltage = 0;
+               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+               table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].VclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Vclk clock", return result);
+
+               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].DclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Dclk clock", return result);
+
+               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+       }
+
+       return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table */
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+                       data->vbios_boot_state.sclk_bootup_value,
+                       (uint32_t *)&(table->GraphicsBootLevel));
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+                       data->vbios_boot_state.mclk_bootup_value,
+                       (uint32_t *)&(table->MemoryBootLevel));
+
+       table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
+                       VOLTAGE_SCALE;
+       table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+                       VOLTAGE_SCALE;
+       table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
+                       VOLTAGE_SCALE;
+
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+       return 0;
+}
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint8_t count, level;
+
+       count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+       for (level = 0; level < count; level++) {
+               if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+                               hw_data->vbios_boot_state.sclk_bootup_value) {
+                       smu_data->smc_state_table.GraphicsBootLevel = level;
+                       break;
+               }
+       }
+
+       count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+       for (level = 0; level < count; level++) {
+               if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+                               hw_data->vbios_boot_state.mclk_bootup_value) {
+                       smu_data->smc_state_table.MemoryBootLevel = level;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+       uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+       uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+       /* Read SMU_Eefuse to read and calculate RO and determine
+        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+        */
+       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (67 * 4));
+       efuse &= 0xFF000000;
+       efuse = efuse >> 24;
+
+       if (hwmgr->chip_id == CHIP_POLARIS10) {
+               min = 1000;
+               max = 2300;
+       } else {
+               min = 1100;
+               max = 2100;
+       }
+
+       ro = efuse * (max - min) / 255 + min;
+
+       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+       for (i = 0; i < sclk_table->count; i++) {
+               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+                               sclk_table->entries[i].cks_enable << i;
+               if (hwmgr->chip_id == CHIP_POLARIS10) {
+                       volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
+                                               (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+                       volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+                                       (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+               } else {
+                       volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
+                                               (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+                       volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+                                       (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+               }
+
+               if (volt_without_cks >= volt_with_cks)
+                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+                                       sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+       }
+
+       smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+       /* Populate CKS Lookup Table */
+       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+               stretch_amount2 = 0;
+       else if (stretch_amount == 3 || stretch_amount == 4)
+               stretch_amount2 = 1;
+       else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ClockStretcher);
+               PP_ASSERT_WITH_CODE(false,
+                               "Stretch Amount in PPTable not supported\n",
+                               return -EINVAL);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+       value &= 0xFFFFFFFE;
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+       return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       uint16_t config;
+
+       config = VR_MERGED_WITH_VDDC;
+       table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+       /* Set Vddc Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+               config = VR_SVI2_PLANE_1;
+               table->VRConfig |= config;
+       } else {
+               PP_ASSERT_WITH_CODE(false,
+                               "VDDC should be on SVI2 control in merged mode!",
+                               );
+       }
+       /* Set Vddci Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+               config = VR_SVI2_PLANE_2;  /* only in merged mode */
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               config = VR_SMIO_PATTERN_1;
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       } else {
+               config = VR_STATIC_VOLTAGE;
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       }
+       /* Set Mvdd Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+               config = VR_SVI2_PLANE_2;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
+                       offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
+       } else {
+               config = VR_STATIC_VOLTAGE;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+       }
+
+       return 0;
+}
+
+
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+       SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+       int result = 0;
+       struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+       AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+       AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+       uint32_t tmp, i;
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+               return result;
+
+       result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+       if (0 == result) {
+               table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+               table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+               table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+               table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+               table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+               table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+               table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+               table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+               table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+               table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
+               table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+               table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+               table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+               table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
+               table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+               AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+               AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+               AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+               AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+               AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+               AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+               AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+               for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+                       AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+                       AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+               }
+
+               result = smu7_read_smc_sram_dword(smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+                               &tmp, SMC_RAM_END);
+
+               smu7_copy_bytes_to_smc(smumgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_meanNsigma,
+                                       sizeof(AVFS_meanNsigma_t),
+                                       SMC_RAM_END);
+
+               result = smu7_read_smc_sram_dword(smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+                               &tmp, SMC_RAM_END);
+               smu7_copy_bytes_to_smc(smumgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_SclkOffset,
+                                       sizeof(AVFS_Sclk_Offset_t),
+                                       SMC_RAM_END);
+
+               data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+               data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+       }
+       return result;
+}
+
+
+/**
+* Initialize the ARB DRAM timing table's index field.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       uint32_t tmp;
+       int result;
+
+       /* This is a read-modify-write on the first byte of the ARB table.
+        * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+        * is the field 'current'.
+        * This solution is ugly, but we never write the whole table only
+        * individual fields in it.
+        * In reality this field should not be in that structure
+        * but in a soft register.
+        */
+       result = smu7_read_smc_sram_dword(smumgr,
+                       smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+       if (result)
+               return result;
+
+       tmp &= 0x00FFFFFF;
+       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+       return smu7_write_smc_sram_dword(smumgr,
+                       smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       struct  phm_ppt_v1_information *table_info =
+                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (table_info &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID)
+               smu_data->power_tune_defaults =
+                               &polaris10_power_tune_data_set_array
+                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+       else
+               smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+       uint8_t i;
+       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+       pp_atomctrl_clock_dividers_vi dividers;
+
+       polaris10_initialize_power_tune_defaults(hwmgr);
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+               polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+       table->SystemFlags = 0;
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (hw_data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+               result = polaris10_populate_ulv_state(hwmgr, table);
+               PP_ASSERT_WITH_CODE(0 == result,
+                               "Failed to initialize ULV state!", return result);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                               ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+       }
+
+       result = polaris10_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Link Level!", return result);
+
+       result = polaris10_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Graphics Level!", return result);
+
+       result = polaris10_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Memory Level!", return result);
+
+       result = polaris10_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ACPI Level!", return result);
+
+       result = polaris10_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize VCE Level!", return result);
+
+       result = polaris10_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize SAMU Level!", return result);
+
+       /* Since only the initial state is completely set up at this point
+        * (the other states are just copies of the boot state) we only
+        * need to populate the  ARB settings for the initial state.
+        */
+       result = polaris10_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to Write ARB settings for the initial state.", return result);
+
+       result = polaris10_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize UVD Level!", return result);
+
+       result = polaris10_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Boot Level!", return result);
+
+       result = polaris10_populate_smc_initailial_state(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Boot State!", return result);
+
+       result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to populate BAPM Parameters!", return result);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ClockStretcher)) {
+               result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+               PP_ASSERT_WITH_CODE(0 == result,
+                               "Failed to populate Clock Stretcher Data Table!",
+                               return result);
+       }
+
+       result = polaris10_populate_avfs_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+       table->CurrSclkPllRange = 0xff;
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+       table->TemperatureLimitHigh =
+                       table_info->cac_dtp_table->usTargetOperatingTemp *
+                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->TemperatureLimitLow  =
+                       (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->MemoryVoltageChangeEnable = 1;
+       table->MemoryInterval = 1;
+       table->VoltageResponseTime = 0;
+       table->PhaseResponseTime = 0;
+       table->MemoryThermThrottleEnable = 1;
+       table->PCIeBootLinkLevel = 0;
+       table->PCIeGenInterval = 1;
+       table->VRConfig = 0;
+
+       result = polaris10_populate_vr_config(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to populate VRConfig setting!", return result);
+
+       table->ThermGpio = 17;
+       table->SclkStepSize = 0x4000;
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+       } else {
+               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot);
+       }
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+                       &gpio_pin)) {
+               table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_AutomaticDCTransition);
+       } else {
+               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_AutomaticDCTransition);
+       }
+
+       /* Thermal Output GPIO */
+       if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+                       &gpio_pin)) {
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ThermalOutGPIO);
+
+               table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+               /* For porlarity read GPIOPAD_A with assigned Gpio pin
+                * since VBIOS will program this register to set 'inactive state',
+                * driver can then determine 'active state' from this and
+                * program SMU with correct polarity
+                */
+               table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+                                       & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+               /* if required, combine VRHot/PCC with thermal out GPIO */
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+               && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+       } else {
+               table->ThermOutGpio = 17;
+               table->ThermOutPolarity = 1;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+       }
+
+       /* Populate BIF_SCLK levels into SMC DPM table */
+       for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
+               PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+               if (i == 0)
+                       table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+               else
+                       table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+       }
+
+       for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+                       smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+                       (uint8_t *)&(table->SystemFlags),
+                       sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+                       SMC_RAM_END);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to upload dpm data to SMC memory!", return result);
+
+       result = polaris10_init_arb_table_index(hwmgr->smumgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to upload arb data to SMC memory!", return result);
+
+       result = polaris10_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to  populate PM fuses to SMC memory!", return result);
+       return 0;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return polaris10_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+       int ret;
+       struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+               return 0;
+
+       ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+       ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
+                       0 : -1;
+
+       if (!ret)
+               /* If this param is not changed, this function could fire unnecessarily */
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+       return ret;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (smu_data->smu7_data.fan_table_start == 0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (duty100 == 0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+                       usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->
+                       thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+                       thermal_controller.advanceFanControlParameters.ulCycleDelay *
+                       reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+                       hwmgr->device, CGS_IND_REG__SMC,
+                       CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+                       (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+                       SMC_RAM_END);
+
+       if (!res && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ucMinimumPWMLimit)
+               res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_SetFanMinPwm,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ucMinimumPWMLimit);
+
+       if (!res && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+               res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_SetFanSclkTarget,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+       if (res)
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+
+       return 0;
+}
+
+static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       smu_data->smc_state_table.UvdBootLevel = 0;
+       if (table_info->mm_dep_table->count > 0)
+               smu_data->smc_state_table.UvdBootLevel =
+                               (uint8_t) (table_info->mm_dep_table->count - 1);
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
+                                               UvdBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0x00FFFFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_UVDDPM) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+       return 0;
+}
+
+static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_StablePState))
+               smu_data->smc_state_table.VceBootLevel =
+                       (uint8_t) (table_info->mm_dep_table->count - 1);
+       else
+               smu_data->smc_state_table.VceBootLevel = 0;
+
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                                       offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFF00FFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+       return 0;
+}
+
+static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+       smu_data->smc_state_table.SamuBootLevel = 0;
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFFFFFF00;
+       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+       return 0;
+}
+
+
+static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+       int max_entry, i;
+
+       max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+                                               SMU74_MAX_LEVELS_LINK :
+                                               pcie_table->count;
+       /* Setup BIF_SCLK levels */
+       for (i = 0; i < max_entry; i++)
+               smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+       return 0;
+}
+
+int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+       switch (type) {
+       case SMU_UVD_TABLE:
+               polaris10_update_uvd_smc_table(hwmgr);
+               break;
+       case SMU_VCE_TABLE:
+               polaris10_update_vce_smc_table(hwmgr);
+               break;
+       case SMU_SAMU_TABLE:
+               polaris10_update_samu_smc_table(hwmgr);
+               break;
+       case SMU_BIF_TABLE:
+               polaris10_update_bif_smc_table(hwmgr);
+       default:
+               break;
+       }
+       return 0;
+}
+
+int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr->smumgr,
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU74_Discrete_DpmTable,
+                                       LowSclkInterruptThreshold),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to update SCLK threshold!", return result);
+
+       result = polaris10_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters!",
+                       );
+
+       return result;
+}
+
+uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU74_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+               case PreVBlankGap:
+                       return offsetof(SMU74_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU74_SoftRegisters, VBlankTimeout);
+               case UcodeLoadStatus:
+                       return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case UvdBootLevel:
+                       return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+               case VceBootLevel:
+                       return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+               case SamuBootLevel:
+                       return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+               }
+       }
+       printk("cant't get the offset of type %x member %x \n", type, member);
+       return 0;
+}
+
+uint32_t polaris10_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU74_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU74_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU74_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU74_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU74_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDGFX:
+               return SMU74_MAX_LEVELS_VDDGFX;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU74_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU74_MAX_LEVELS_MVDD;
+       case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+               return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+       }
+
+       printk("cant't get the mac of %x \n", value);
+       return 0;
+}
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t tmp;
+       int result;
+       bool error = false;
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, DpmTable),
+                       &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               smu_data->smu7_data.dpm_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, SoftRegisters),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.soft_regs_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, mcRegisterTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.mc_reg_table_start = tmp;
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, FanTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.fan_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.arb_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, Version),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               hwmgr->microcode_version_info.SMC = tmp;
+
+       error |= (0 != result);
+
+       return error ? -1 : 0;
+}
+
+bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+                       ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
new file mode 100644 (file)
index 0000000..5ade3ce
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef POLARIS10_SMC_H
+#define POLARIS10_SMC_H
+
+#include "smumgr.h"
+
+
+int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
+int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
+int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
+uint32_t polaris10_get_mac_definition(uint32_t value);
+int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
+bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
+
+#endif
+
index 5dba7c50971020fe1ff3a052970fd14cb8860b09..5c3598ab7daeb1f2f73984ec72102f582c961ce1 100644 (file)
 #include "ppatomctrl.h"
 #include "pp_debug.h"
 #include "cgs_common.h"
+#include "polaris10_smc.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
 
-#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_SCALE 4
-
-/* Microcode file is stored in this buffer */
-#define BUFFER_SIZE                 80000
-#define MAX_STRING_SIZE             15
-#define BUFFER_SIZETWO              131072  /* 128 *1024 */
-
-#define SMC_RAM_END 0x40000
+#define PPPOLARIS10_TARGETACTIVITY_DFLT                     50
 
 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
        /*  Min      pcie   DeepSleep Activity  CgSpll      CgSpll    CcPwr  CcPwr  Sclk         Enabled      Enabled                       Voltage    Power */
@@ -62,572 +57,9 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
        { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
 };
 
-static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
-       {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smcAddress the address in the SMC RAM to access.
-*/
-static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
-{
-       PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
-       PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
-
-       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
-       return 0;
-}
-
-/**
-* Copy bytes from SMC RAM space into driver memory.
-*
-* @param    smumgr  the address of the powerplay SMU manager.
-* @param    smc_start_address the start address in the SMC RAM to copy bytes from
-* @param    src the byte array to copy the bytes to.
-* @param    byte_count the number of bytes to copy.
-*/
-int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
-       uint32_t data;
-       uint32_t addr;
-       uint8_t *dest_byte;
-       uint8_t i, data_byte[4] = {0};
-       uint32_t *pdata = (uint32_t *)&data_byte;
-
-       PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;);
-       PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
-
-       addr = smc_start_address;
-
-       while (byte_count >= 4) {
-               polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
-
-               *dest = PP_SMC_TO_HOST_UL(data);
-
-               dest += 1;
-               byte_count -= 4;
-               addr += 4;
-       }
-
-       if (byte_count) {
-               polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
-               *pdata = PP_SMC_TO_HOST_UL(data);
-       /* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
-               dest_byte = (uint8_t *)dest;
-               for (i = 0; i < byte_count; i++)
-                       dest_byte[i] = data_byte[i];
-       }
-
-       return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param    pSmuMgr  the address of the powerplay SMU manager.
-* @param    smc_start_address the start address in the SMC RAM to copy bytes to.
-* @param    src the byte array to copy the bytes from.
-* @param    byte_count the number of bytes to copy.
-*/
-int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
-                               const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
-       int result;
-       uint32_t data = 0;
-       uint32_t original_data;
-       uint32_t addr = 0;
-       uint32_t extra_shift;
-
-       PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1);
-       PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
-
-       addr = smc_start_address;
-
-       while (byte_count >= 4) {
-       /* Bytes are written into the SMC addres space with the MSB first. */
-               data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
-               result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
-               if (0 != result)
-                       return result;
-
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
-
-               src += 4;
-               byte_count -= 4;
-               addr += 4;
-       }
-
-       if (0 != byte_count) {
-
-               data = 0;
-
-               result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
-               if (0 != result)
-                       return result;
-
-
-               original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
-
-               extra_shift = 8 * (4 - byte_count);
-
-               while (byte_count > 0) {
-                       /* Bytes are written into the SMC addres space with the MSB first. */
-                       data = (0x100 * data) + *src++;
-                       byte_count--;
-               }
-
-               data <<= extra_shift;
-
-               data |= (original_data & ~((~0UL) << extra_shift));
-
-               result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
-               if (0 != result)
-                       return result;
-
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
-       }
-
-       return 0;
-}
-
-
-static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr)
-{
-       static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
-
-       polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
-
-       return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-*/
-bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
-       return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
-       && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
-{
-       uint32_t efuse;
-
-       efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
-       efuse &= 0x00000001;
-       if (efuse)
-               return true;
-
-       return false;
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
-       int ret;
-
-       if (!polaris10_is_smc_ram_running(smumgr))
-               return -1;
-
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
-
-       if (ret != 1)
-               printk("\n failed to send pre message %x ret is %d \n",  msg, ret);
-
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
-
-       if (ret != 1)
-               printk("\n failed to send message %x ret is %d \n",  msg, ret);
-
-       return 0;
-}
-
-
-/**
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   Always return 0.
-*/
-int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
-{
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-       return 0;
-}
-
-/**
-* Send a message to the SMC with parameter
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
-       if (!polaris10_is_smc_ram_running(smumgr)) {
-               return -1;
-       }
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
-       return polaris10_send_msg_to_smc(smumgr, msg);
-}
-
-
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
-       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
-       return polaris10_send_msg_to_smc_without_waiting(smumgr, msg);
-}
-
-int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
-{
-       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
-               printk("Failed to send Message.\n");
-
-       return 0;
-}
-
-/**
-* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr)
-{
-       /* If the SMC is not even on it qualifies as inactive. */
-       if (!polaris10_is_smc_ram_running(smumgr))
-               return -1;
-
-       SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
-       return 0;
-}
-
-
-/**
-* Upload the SMC firmware to the SMC microcontroller.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    pFirmware the data structure containing the various sections of the firmware.
-*/
-static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
-{
-       uint32_t byte_count = length;
-
-       PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1);
+static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
+       0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
 
-       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
-
-       for (; byte_count >= 4; byte_count -= 4)
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
-
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
-       PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1);
-
-       return 0;
-}
-
-static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type)
-{
-       enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
-       switch (fw_type) {
-       case UCODE_ID_SMU:
-               result = CGS_UCODE_ID_SMU;
-               break;
-       case UCODE_ID_SMU_SK:
-               result = CGS_UCODE_ID_SMU_SK;
-               break;
-       case UCODE_ID_SDMA0:
-               result = CGS_UCODE_ID_SDMA0;
-               break;
-       case UCODE_ID_SDMA1:
-               result = CGS_UCODE_ID_SDMA1;
-               break;
-       case UCODE_ID_CP_CE:
-               result = CGS_UCODE_ID_CP_CE;
-               break;
-       case UCODE_ID_CP_PFP:
-               result = CGS_UCODE_ID_CP_PFP;
-               break;
-       case UCODE_ID_CP_ME:
-               result = CGS_UCODE_ID_CP_ME;
-               break;
-       case UCODE_ID_CP_MEC:
-               result = CGS_UCODE_ID_CP_MEC;
-               break;
-       case UCODE_ID_CP_MEC_JT1:
-               result = CGS_UCODE_ID_CP_MEC_JT1;
-               break;
-       case UCODE_ID_CP_MEC_JT2:
-               result = CGS_UCODE_ID_CP_MEC_JT2;
-               break;
-       case UCODE_ID_RLC_G:
-               result = CGS_UCODE_ID_RLC_G;
-               break;
-       default:
-               break;
-       }
-
-       return result;
-}
-
-static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr)
-{
-       int result = 0;
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
-       struct cgs_firmware_info info = {0};
-
-       if (smu_data->security_hard_key == 1)
-               cgs_get_firmware_info(smumgr->device,
-                       polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-       else
-               cgs_get_firmware_info(smumgr->device,
-                       polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
-
-       /* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
-       result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE);
-
-       return result;
-}
-
-/**
-* Read a 32bit value from the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smcAddress the address in the SMC RAM to access.
-* @param    value and output parameter for the data read from the SMC SRAM.
-*/
-int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
-{
-       int result;
-
-       result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
-
-       if (result)
-               return result;
-
-       *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
-       return 0;
-}
-
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-* @param    value to write to the SMC SRAM.
-*/
-int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
-{
-       int result;
-
-       result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
-
-       if (result)
-               return result;
-
-       cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
-
-       return 0;
-}
-
-
-int polaris10_smu_fini(struct pp_smumgr *smumgr)
-{
-       if (smumgr->backend) {
-               kfree(smumgr->backend);
-               smumgr->backend = NULL;
-       }
-       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
-       return 0;
-}
-
-/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
-static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type)
-{
-       uint32_t result = 0;
-
-       switch (fw_type) {
-       case UCODE_ID_SDMA0:
-               result = UCODE_ID_SDMA0_MASK;
-               break;
-       case UCODE_ID_SDMA1:
-               result = UCODE_ID_SDMA1_MASK;
-               break;
-       case UCODE_ID_CP_CE:
-               result = UCODE_ID_CP_CE_MASK;
-               break;
-       case UCODE_ID_CP_PFP:
-               result = UCODE_ID_CP_PFP_MASK;
-               break;
-       case UCODE_ID_CP_ME:
-               result = UCODE_ID_CP_ME_MASK;
-               break;
-       case UCODE_ID_CP_MEC_JT1:
-       case UCODE_ID_CP_MEC_JT2:
-               result = UCODE_ID_CP_MEC_MASK;
-               break;
-       case UCODE_ID_RLC_G:
-               result = UCODE_ID_RLC_G_MASK;
-               break;
-       default:
-               printk("UCode type is out of range! \n");
-               result = 0;
-       }
-
-       return result;
-}
-
-/* Populate one firmware image to the data structure */
-
-static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr,
-                                               uint32_t fw_type,
-                                               struct SMU_Entry *entry)
-{
-       int result = 0;
-       struct cgs_firmware_info info = {0};
-
-       result = cgs_get_firmware_info(smumgr->device,
-                               polaris10_convert_fw_type_to_cgs(fw_type),
-                               &info);
-
-       if (!result) {
-               entry->version = info.version;
-               entry->id = (uint16_t)fw_type;
-               entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
-               entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
-               entry->meta_data_addr_high = 0;
-               entry->meta_data_addr_low = 0;
-               entry->data_size_byte = info.image_size;
-               entry->num_register_entries = 0;
-       }
-
-       if (fw_type == UCODE_ID_RLC_G)
-               entry->flags = 1;
-       else
-               entry->flags = 0;
-
-       return 0;
-}
-
-static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-       uint32_t fw_to_load;
-
-       int result = 0;
-       struct SMU_DRAMData_TOC *toc;
-
-       if (!smumgr->reload_fw) {
-               printk(KERN_INFO "[ powerplay ] skip reloading...\n");
-               return 0;
-       }
-
-       if (smu_data->soft_regs_start)
-               cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
-                                       smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
-                                       0x0);
-
-       polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
-       polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
-
-       toc = (struct SMU_DRAMData_TOC *)smu_data->header;
-       toc->num_entries = 0;
-       toc->structure_version = 1;
-
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-       PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-
-       polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
-       polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
-
-       fw_to_load = UCODE_ID_RLC_G_MASK
-                  + UCODE_ID_SDMA0_MASK
-                  + UCODE_ID_SDMA1_MASK
-                  + UCODE_ID_CP_CE_MASK
-                  + UCODE_ID_CP_ME_MASK
-                  + UCODE_ID_CP_PFP_MASK
-                  + UCODE_ID_CP_MEC_MASK;
-
-       if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
-               printk(KERN_ERR "Fail to Request SMU Load uCode");
-
-       return result;
-}
-
-/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
-static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-       uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type);
-       uint32_t ret;
-       /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
-       ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
-                                       smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
-                                       fw_mask, fw_mask);
-
-       return ret;
-}
-
-static int polaris10_reload_firmware(struct pp_smumgr *smumgr)
-{
-       return smumgr->smumgr_funcs->start_smu(smumgr);
-}
 
 static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
 {
@@ -669,7 +101,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
        struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
 
        if (0 != smu_data->avfs.avfs_btc_param) {
-               if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+               if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
                        printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
                        result = -1;
                }
@@ -697,7 +129,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
        graphics_level_size = sizeof(avfs_graphics_level_polaris10);
        u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
 
-       PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr,
+       PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
                                SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
                                &dpm_table_start, 0x40000),
                        "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -708,14 +140,14 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 
        vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
 
-       PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address,
+       PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
                                (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
                        "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
                        return -1);
 
        graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
 
-       PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+       PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
                                (uint8_t *)(&avfs_graphics_level_polaris10),
                                graphics_level_size, 0x40000),
                        "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -723,7 +155,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 
        graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
 
-       PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+       PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
                                (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
                                "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
                        return -1);
@@ -732,7 +164,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 
        graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
 
-       PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+       PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
                        (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
                        "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
                        return -1);
@@ -793,7 +225,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
        SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
                                        SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-       result = polaris10_upload_smu_firmware_image(smumgr);
+       result = smu7_upload_smu_firmware_image(smumgr);
        if (result != 0)
                return result;
 
@@ -812,7 +244,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
 
 
        /* Call Test SMU message with 0x20000 offset to trigger SMU start */
-       polaris10_send_msg_to_smc_offset(smumgr);
+       smu7_send_msg_to_smc_offset(smumgr);
 
        /* Wait done bit to be set */
        /* Check pass/failed indicator */
@@ -853,12 +285,12 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
                                        SMC_SYSCON_RESET_CNTL,
                                        rst_reg, 1);
 
-       result = polaris10_upload_smu_firmware_image(smumgr);
+       result = smu7_upload_smu_firmware_image(smumgr);
        if (result != 0)
                return result;
 
        /* Set smc instruct start point at 0x0 */
-       polaris10_program_jump_on_start(smumgr);
+       smu7_program_jump_on_start(smumgr);
 
        SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
                                        SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
@@ -881,10 +313,10 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
        bool SMU_VFT_INTACT;
 
        /* Only start SMC if SMC RAM is not running */
-       if (!polaris10_is_smc_ram_running(smumgr)) {
+       if (!smu7_is_smc_ram_running(smumgr)) {
                SMU_VFT_INTACT = false;
                smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
-               smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+               smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
 
                /* Check if SMU is running in protected mode */
                if (smu_data->protected_mode == 0) {
@@ -894,7 +326,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
 
                        /* If failed, try with different security Key. */
                        if (result != 0) {
-                               smu_data->security_hard_key ^= 1;
+                               smu_data->smu7_data.security_hard_key ^= 1;
                                result = polaris10_start_smu_in_protection_mode(smumgr);
                        }
                }
@@ -906,89 +338,69 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
        } else
                SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
 
-       smu_data->post_initial_boot = true;
        polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
        /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
-       polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
-                                       &(smu_data->soft_regs_start), 0x40000);
+       smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+                                       &(smu_data->smu7_data.soft_regs_start), 0x40000);
 
-       result = polaris10_request_smu_load_fw(smumgr);
+       result = smu7_request_smu_load_fw(smumgr);
 
        return result;
 }
 
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+       uint32_t efuse;
+
+       efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+       efuse &= 0x00000001;
+       if (efuse)
+               return true;
+
+       return false;
+}
+
 static int polaris10_smu_init(struct pp_smumgr *smumgr)
 {
-       struct polaris10_smumgr *smu_data;
-       uint8_t *internal_buf;
-       uint64_t mc_addr = 0;
-       /* Allocate memory for backend private data */
-       smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-       smu_data->header_buffer.data_size =
-               ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-       smu_data->smu_buffer.data_size = 200*4096;
-       smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
-/* Allocate FW image data structure and header buffer and
- * send the header buffer address to SMU */
-       smu_allocate_memory(smumgr->device,
-               smu_data->header_buffer.data_size,
-               CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-               PAGE_SIZE,
-               &mc_addr,
-               &smu_data->header_buffer.kaddr,
-               &smu_data->header_buffer.handle);
-
-       smu_data->header = smu_data->header_buffer.kaddr;
-       smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-       smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-       PP_ASSERT_WITH_CODE((NULL != smu_data->header),
-               "Out of memory.",
-               kfree(smumgr->backend);
-               cgs_free_gpu_mem(smumgr->device,
-               (cgs_handle_t)smu_data->header_buffer.handle);
-               return -1);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       int i;
 
-/* Allocate buffer for SMU internal buffer and send the address to SMU.
- * Iceland SMU does not need internal buffer.*/
-       smu_allocate_memory(smumgr->device,
-               smu_data->smu_buffer.data_size,
-               CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-               PAGE_SIZE,
-               &mc_addr,
-               &smu_data->smu_buffer.kaddr,
-               &smu_data->smu_buffer.handle);
-
-       internal_buf = smu_data->smu_buffer.kaddr;
-       smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-       smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-       PP_ASSERT_WITH_CODE((NULL != internal_buf),
-               "Out of memory.",
-               kfree(smumgr->backend);
-               cgs_free_gpu_mem(smumgr->device,
-               (cgs_handle_t)smu_data->smu_buffer.handle);
-               return -1;);
+       if (smu7_init(smumgr))
+               return -EINVAL;
 
        if (polaris10_is_hw_avfs_present(smumgr))
                smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
        else
                smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
 
+       for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
+               smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+
        return 0;
 }
 
-static const struct pp_smumgr_func ellsemere_smu_funcs = {
+static const struct pp_smumgr_func polaris10_smu_funcs = {
        .smu_init = polaris10_smu_init,
-       .smu_fini = polaris10_smu_fini,
+       .smu_fini = smu7_smu_fini,
        .start_smu = polaris10_start_smu,
-       .check_fw_load_finish = polaris10_check_fw_load_finish,
-       .request_smu_load_fw = polaris10_reload_firmware,
+       .check_fw_load_finish = smu7_check_fw_load_finish,
+       .request_smu_load_fw = smu7_reload_firmware,
        .request_smu_load_specific_fw = NULL,
-       .send_msg_to_smc = polaris10_send_msg_to_smc,
-       .send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter,
+       .send_msg_to_smc = smu7_send_msg_to_smc,
+       .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
+       .update_smc_table = polaris10_update_smc_table,
+       .get_offsetof = polaris10_get_offsetof,
+       .process_firmware_header = polaris10_process_firmware_header,
+       .init_smc_table = polaris10_init_smc_table,
+       .update_sclk_threshold = polaris10_update_sclk_threshold,
+       .thermal_avfs_enable = polaris10_thermal_avfs_enable,
+       .thermal_setup_fan_table = polaris10_thermal_setup_fan_table,
+       .populate_all_graphic_levels = polaris10_populate_all_graphic_levels,
+       .populate_all_memory_levels = polaris10_populate_all_memory_levels,
+       .get_mac_definition = polaris10_get_mac_definition,
+       .is_dpm_running = polaris10_is_dpm_running,
 };
 
 int polaris10_smum_init(struct pp_smumgr *smumgr)
@@ -998,10 +410,10 @@ int polaris10_smum_init(struct pp_smumgr *smumgr)
        polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
 
        if (polaris10_smu == NULL)
-               return -1;
+               return -EINVAL;
 
        smumgr->backend = polaris10_smu;
-       smumgr->smumgr_funcs = &ellsemere_smu_funcs;
+       smumgr->smumgr_funcs = &polaris10_smu_funcs;
 
        return 0;
 }
index e5377aec057f146b72647b65f92ce992f7b3b6e4..49ebf1d5a53c9a0194eead44ccfd605414a385a8 100644 (file)
 #ifndef _POLARIS10_SMUMANAGER_H
 #define _POLARIS10_SMUMANAGER_H
 
-#include <polaris10_ppsmc.h>
+
 #include <pp_endian.h>
+#include "smu74.h"
+#include "smu74_discrete.h"
+#include "smu7_smumgr.h"
+
+#define SMC_RAM_END 0x40000
 
 struct polaris10_avfs {
        enum AVFS_BTC_STATUS avfs_btc_status;
        uint32_t           avfs_btc_param;
 };
 
-struct polaris10_buffer_entry {
-       uint32_t data_size;
-       uint32_t mc_addr_low;
-       uint32_t mc_addr_high;
-       void *kaddr;
-       unsigned long  handle;
+struct polaris10_pt_defaults {
+       uint8_t   SviLoadLineEn;
+       uint8_t   SviLoadLineVddC;
+       uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+       uint8_t   TDC_MAWt;
+       uint8_t   TdcWaterfallCtl;
+       uint8_t   DTEAmbientTempBase;
+
+       uint32_t  DisplayCac;
+       uint32_t  BAPM_TEMP_GRADIENT;
+       uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+       uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+};
+
+
+
+struct polaris10_range_table {
+       uint32_t trans_lower_frequency; /* in 10khz */
+       uint32_t trans_upper_frequency;
 };
 
 struct polaris10_smumgr {
-       uint8_t *header;
-       uint8_t *mec_image;
-       struct polaris10_buffer_entry smu_buffer;
-       struct polaris10_buffer_entry header_buffer;
-       uint32_t soft_regs_start;
-       uint8_t *read_rrm_straps;
-       uint32_t read_drm_straps_mc_address_high;
-       uint32_t read_drm_straps_mc_address_low;
-       uint32_t acpi_optimization;
-       bool post_initial_boot;
+       struct smu7_smumgr smu7_data;
        uint8_t protected_mode;
-       uint8_t security_hard_key;
        struct polaris10_avfs  avfs;
+       SMU74_Discrete_DpmTable              smc_state_table;
+       struct SMU74_Discrete_Ulv            ulv_setting;
+       struct SMU74_Discrete_PmFuses  power_tune_table;
+       struct polaris10_range_table                range_table[NUM_SCLK_RANGE];
+       const struct polaris10_pt_defaults       *power_tune_defaults;
+       uint32_t                   activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+       uint32_t                   bif_sclk_table[SMU74_MAX_LEVELS_LINK];
 };
 
 
-int polaris10_smum_init(struct pp_smumgr *smumgr);
-
-int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit);
-int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit);
-int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
-                               const uint8_t *src, uint32_t byte_count, uint32_t limit);
-
 #endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
new file mode 100644 (file)
index 0000000..6af744f
--- /dev/null
@@ -0,0 +1,589 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#include "smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
+
+#define SMU7_SMC_SIZE 0x20000
+
+static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+{
+       PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
+
+       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
+       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
+       return 0;
+}
+
+
+int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+{
+       uint32_t data;
+       uint32_t addr;
+       uint8_t *dest_byte;
+       uint8_t i, data_byte[4] = {0};
+       uint32_t *pdata = (uint32_t *)&data_byte;
+
+       PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+       addr = smc_start_address;
+
+       while (byte_count >= 4) {
+               smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+
+               *dest = PP_SMC_TO_HOST_UL(data);
+
+               dest += 1;
+               byte_count -= 4;
+               addr += 4;
+       }
+
+       if (byte_count) {
+               smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+               *pdata = PP_SMC_TO_HOST_UL(data);
+       /* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
+               dest_byte = (uint8_t *)dest;
+               for (i = 0; i < byte_count; i++)
+                       dest_byte[i] = data_byte[i];
+       }
+
+       return 0;
+}
+
+
+int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+                               const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+       int result;
+       uint32_t data = 0;
+       uint32_t original_data;
+       uint32_t addr = 0;
+       uint32_t extra_shift;
+
+       PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+       addr = smc_start_address;
+
+       while (byte_count >= 4) {
+       /* Bytes are written into the SMC addres space with the MSB first. */
+               data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+               result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+               if (0 != result)
+                       return result;
+
+               cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+
+               src += 4;
+               byte_count -= 4;
+               addr += 4;
+       }
+
+       if (0 != byte_count) {
+
+               data = 0;
+
+               result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+               if (0 != result)
+                       return result;
+
+
+               original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+
+               extra_shift = 8 * (4 - byte_count);
+
+               while (byte_count > 0) {
+                       /* Bytes are written into the SMC addres space with the MSB first. */
+                       data = (0x100 * data) + *src++;
+                       byte_count--;
+               }
+
+               data <<= extra_shift;
+
+               data |= (original_data & ~((~0UL) << extra_shift));
+
+               result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+               if (0 != result)
+                       return result;
+
+               cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+       }
+
+       return 0;
+}
+
+
+int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
+{
+       static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+       smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+
+       return 0;
+}
+
+bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
+{
+       return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+       && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+{
+       int ret;
+
+       if (!smu7_is_smc_ram_running(smumgr))
+               return -EINVAL;
+
+
+       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+       if (ret != 1)
+               printk("\n failed to send pre message %x ret is %d \n",  msg, ret);
+
+       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+       if (ret != 1)
+               printk("\n failed to send message %x ret is %d \n",  msg, ret);
+
+       return 0;
+}
+
+int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+{
+       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+       return 0;
+}
+
+int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+       if (!smu7_is_smc_ram_running(smumgr)) {
+               return -EINVAL;
+       }
+
+       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+       return smu7_send_msg_to_smc(smumgr, msg);
+}
+
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+       return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
+}
+
+int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+{
+       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+
+       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+               printk("Failed to send Message.\n");
+
+       return 0;
+}
+
+int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+{
+       if (!smu7_is_smc_ram_running(smumgr))
+               return -EINVAL;
+
+       SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+       return 0;
+}
+
+
+enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+       enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+       switch (fw_type) {
+       case UCODE_ID_SMU:
+               result = CGS_UCODE_ID_SMU;
+               break;
+       case UCODE_ID_SMU_SK:
+               result = CGS_UCODE_ID_SMU_SK;
+               break;
+       case UCODE_ID_SDMA0:
+               result = CGS_UCODE_ID_SDMA0;
+               break;
+       case UCODE_ID_SDMA1:
+               result = CGS_UCODE_ID_SDMA1;
+               break;
+       case UCODE_ID_CP_CE:
+               result = CGS_UCODE_ID_CP_CE;
+               break;
+       case UCODE_ID_CP_PFP:
+               result = CGS_UCODE_ID_CP_PFP;
+               break;
+       case UCODE_ID_CP_ME:
+               result = CGS_UCODE_ID_CP_ME;
+               break;
+       case UCODE_ID_CP_MEC:
+               result = CGS_UCODE_ID_CP_MEC;
+               break;
+       case UCODE_ID_CP_MEC_JT1:
+               result = CGS_UCODE_ID_CP_MEC_JT1;
+               break;
+       case UCODE_ID_CP_MEC_JT2:
+               result = CGS_UCODE_ID_CP_MEC_JT2;
+               break;
+       case UCODE_ID_RLC_G:
+               result = CGS_UCODE_ID_RLC_G;
+               break;
+       default:
+               break;
+       }
+
+       return result;
+}
+
+
+int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+{
+       int result;
+
+       result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+
+       if (result)
+               return result;
+
+       *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+       return 0;
+}
+
+int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+{
+       int result;
+
+       result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+
+       if (result)
+               return result;
+
+       cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+
+       return 0;
+}
+
+/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
+
+static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
+{
+       uint32_t result = 0;
+
+       switch (fw_type) {
+       case UCODE_ID_SDMA0:
+               result = UCODE_ID_SDMA0_MASK;
+               break;
+       case UCODE_ID_SDMA1:
+               result = UCODE_ID_SDMA1_MASK;
+               break;
+       case UCODE_ID_CP_CE:
+               result = UCODE_ID_CP_CE_MASK;
+               break;
+       case UCODE_ID_CP_PFP:
+               result = UCODE_ID_CP_PFP_MASK;
+               break;
+       case UCODE_ID_CP_ME:
+               result = UCODE_ID_CP_ME_MASK;
+               break;
+       case UCODE_ID_CP_MEC:
+       case UCODE_ID_CP_MEC_JT1:
+       case UCODE_ID_CP_MEC_JT2:
+               result = UCODE_ID_CP_MEC_MASK;
+               break;
+       case UCODE_ID_RLC_G:
+               result = UCODE_ID_RLC_G_MASK;
+               break;
+       default:
+               printk("UCode type is out of range! \n");
+               result = 0;
+       }
+
+       return result;
+}
+
+static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+                                               uint32_t fw_type,
+                                               struct SMU_Entry *entry)
+{
+       int result = 0;
+       struct cgs_firmware_info info = {0};
+
+       result = cgs_get_firmware_info(smumgr->device,
+                               smu7_convert_fw_type_to_cgs(fw_type),
+                               &info);
+
+       if (!result) {
+               entry->version = info.version;
+               entry->id = (uint16_t)fw_type;
+               entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
+               entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
+               entry->meta_data_addr_high = 0;
+               entry->meta_data_addr_low = 0;
+               entry->data_size_byte = info.image_size;
+               entry->num_register_entries = 0;
+       }
+
+       if (fw_type == UCODE_ID_RLC_G)
+               entry->flags = 1;
+       else
+               entry->flags = 0;
+
+       return 0;
+}
+
+int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
+{
+       struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+       uint32_t fw_to_load;
+       int result = 0;
+       struct SMU_DRAMData_TOC *toc;
+
+       if (!smumgr->reload_fw) {
+               printk(KERN_INFO "[ powerplay ] skip reloading...\n");
+               return 0;
+       }
+
+       if (smu_data->soft_regs_start)
+               cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+                                       smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+                                       SMU_SoftRegisters, UcodeLoadStatus),
+                                       0x0);
+
+       if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
+               smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
+               smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
+               fw_to_load = UCODE_ID_RLC_G_MASK
+                          + UCODE_ID_SDMA0_MASK
+                          + UCODE_ID_SDMA1_MASK
+                          + UCODE_ID_CP_CE_MASK
+                          + UCODE_ID_CP_ME_MASK
+                          + UCODE_ID_CP_PFP_MASK
+                          + UCODE_ID_CP_MEC_MASK;
+       } else {
+               fw_to_load = UCODE_ID_RLC_G_MASK
+                          + UCODE_ID_SDMA0_MASK
+                          + UCODE_ID_SDMA1_MASK
+                          + UCODE_ID_CP_CE_MASK
+                          + UCODE_ID_CP_ME_MASK
+                          + UCODE_ID_CP_PFP_MASK
+                          + UCODE_ID_CP_MEC_MASK
+                          + UCODE_ID_CP_MEC_JT1_MASK
+                          + UCODE_ID_CP_MEC_JT2_MASK;
+       }
+
+       toc = (struct SMU_DRAMData_TOC *)smu_data->header;
+       toc->num_entries = 0;
+       toc->structure_version = 1;
+
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+                               UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", return -EINVAL);
+
+       smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+       smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+
+       if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+               printk(KERN_ERR "Fail to Request SMU Load uCode");
+
+       return result;
+}
+
+/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
+int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+{
+       struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+       uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
+       uint32_t ret;
+
+       ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
+                                       smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+                                       SMU_SoftRegisters, UcodeLoadStatus),
+                                       fw_mask, fw_mask);
+
+       return ret;
+}
+
+int smu7_reload_firmware(struct pp_smumgr *smumgr)
+{
+       return smumgr->smumgr_funcs->start_smu(smumgr);
+}
+
+static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+{
+       uint32_t byte_count = length;
+
+       PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
+       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+
+       for (; byte_count >= 4; byte_count -= 4)
+               cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+
+       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+
+       PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+
+       return 0;
+}
+
+
+int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+{
+       int result = 0;
+       struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+       struct cgs_firmware_info info = {0};
+
+       if (smu_data->security_hard_key == 1)
+               cgs_get_firmware_info(smumgr->device,
+                       smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+       else
+               cgs_get_firmware_info(smumgr->device,
+                       smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
+
+       result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
+
+       return result;
+}
+
+
+int smu7_init(struct pp_smumgr *smumgr)
+{
+       struct smu7_smumgr *smu_data;
+       uint8_t *internal_buf;
+       uint64_t mc_addr = 0;
+
+       /* Allocate memory for backend private data */
+       smu_data = (struct smu7_smumgr *)(smumgr->backend);
+       smu_data->header_buffer.data_size =
+                       ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+       smu_data->smu_buffer.data_size = 200*4096;
+
+/* Allocate FW image data structure and header buffer and
+ * send the header buffer address to SMU */
+       smu_allocate_memory(smumgr->device,
+               smu_data->header_buffer.data_size,
+               CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+               PAGE_SIZE,
+               &mc_addr,
+               &smu_data->header_buffer.kaddr,
+               &smu_data->header_buffer.handle);
+
+       smu_data->header = smu_data->header_buffer.kaddr;
+       smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+       smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+       PP_ASSERT_WITH_CODE((NULL != smu_data->header),
+               "Out of memory.",
+               kfree(smumgr->backend);
+               cgs_free_gpu_mem(smumgr->device,
+               (cgs_handle_t)smu_data->header_buffer.handle);
+               return -EINVAL);
+
+       smu_allocate_memory(smumgr->device,
+               smu_data->smu_buffer.data_size,
+               CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+               PAGE_SIZE,
+               &mc_addr,
+               &smu_data->smu_buffer.kaddr,
+               &smu_data->smu_buffer.handle);
+
+       internal_buf = smu_data->smu_buffer.kaddr;
+       smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+       smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+       PP_ASSERT_WITH_CODE((NULL != internal_buf),
+               "Out of memory.",
+               kfree(smumgr->backend);
+               cgs_free_gpu_mem(smumgr->device,
+               (cgs_handle_t)smu_data->smu_buffer.handle);
+               return -EINVAL);
+
+       return 0;
+}
+
+
+int smu7_smu_fini(struct pp_smumgr *smumgr)
+{
+       if (smumgr->backend) {
+               kfree(smumgr->backend);
+               smumgr->backend = NULL;
+       }
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
new file mode 100644 (file)
index 0000000..76352f2
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_SMUMANAGER_H
+#define _SMU7_SMUMANAGER_H
+
+
+#include <pp_endian.h>
+
+#define SMC_RAM_END 0x40000
+#define mmSMC_IND_INDEX_11                              0x01AC
+#define mmSMC_IND_DATA_11                               0x01AD
+
+struct smu7_buffer_entry {
+       uint32_t data_size;
+       uint32_t mc_addr_low;
+       uint32_t mc_addr_high;
+       void *kaddr;
+       unsigned long  handle;
+};
+
+struct smu7_smumgr {
+       uint8_t *header;
+       uint8_t *mec_image;
+       struct smu7_buffer_entry smu_buffer;
+       struct smu7_buffer_entry header_buffer;
+
+       uint32_t                             soft_regs_start;
+       uint32_t                             dpm_table_start;
+       uint32_t                             mc_reg_table_start;
+       uint32_t                             fan_table_start;
+       uint32_t                             arb_table_start;
+       uint32_t                             ulv_setting_starts;
+       uint8_t                              security_hard_key;
+       uint32_t acpi_optimization;
+};
+
+
+int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+                               uint32_t *dest, uint32_t byte_count, uint32_t limit);
+int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+                       const uint8_t *src, uint32_t byte_count, uint32_t limit);
+int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
+bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
+int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
+int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
+int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
+                                               uint32_t parameter);
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
+                                               uint16_t msg, uint32_t parameter);
+int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
+int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
+
+enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
+int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+                                               uint32_t *value, uint32_t limit);
+int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+                                               uint32_t value, uint32_t limit);
+
+int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
+int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
+int smu7_reload_firmware(struct pp_smumgr *smumgr);
+int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
+int smu7_init(struct pp_smumgr *smumgr);
+int smu7_smu_fini(struct pp_smumgr *smumgr);
+
+#endif
\ No newline at end of file
index 7723473e51a073cea3365d0c9b32b4a211bd51bc..e5812aa456f3b1440e525f0592d299672db14759 100644 (file)
 #include "smumgr.h"
 #include "cgs_common.h"
 #include "linux/delay.h"
-#include "cz_smumgr.h"
-#include "tonga_smumgr.h"
-#include "fiji_smumgr.h"
-#include "polaris10_smumgr.h"
+
 
 int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -47,7 +44,6 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
        smumgr->device = pp_init->device;
        smumgr->chip_family = pp_init->chip_family;
        smumgr->chip_id = pp_init->chip_id;
-       smumgr->hw_revision = pp_init->rev_id;
        smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
        smumgr->reload_fw = 1;
        handle->smu_mgr = smumgr;
@@ -58,6 +54,9 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
                break;
        case AMDGPU_FAMILY_VI:
                switch (smumgr->chip_id) {
+               case CHIP_TOPAZ:
+                       iceland_smum_init(smumgr);
+                       break;
                case CHIP_TONGA:
                        tonga_smum_init(smumgr);
                        break;
@@ -87,6 +86,57 @@ int smum_fini(struct pp_smumgr *smumgr)
        return 0;
 }
 
+int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result)
+{
+       if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
+               return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
+
+       return 0;
+}
+
+int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+               void *input, void *output, void *storage, int result)
+{
+       if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
+               return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
+
+       return 0;
+}
+
+int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+
+       if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
+               return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
+
+       return 0;
+}
+
+int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+
+       if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
+               return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
+
+       return 0;
+}
+
+uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
+{
+       if (NULL != smumgr->smumgr_funcs->get_offsetof)
+               return smumgr->smumgr_funcs->get_offsetof(type, member);
+
+       return 0;
+}
+
+int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
+               return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
+       return 0;
+}
+
 int smum_get_argument(struct pp_smumgr *smumgr)
 {
        if (NULL != smumgr->smumgr_funcs->get_argument)
@@ -95,13 +145,20 @@ int smum_get_argument(struct pp_smumgr *smumgr)
        return 0;
 }
 
+uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
+{
+       if (NULL != smumgr->smumgr_funcs->get_mac_definition)
+               return smumgr->smumgr_funcs->get_mac_definition(value);
+
+       return 0;
+}
+
 int smum_download_powerplay_table(struct pp_smumgr *smumgr,
                                                                void **table)
 {
        if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
                return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
                                                                        table);
-
        return 0;
 }
 
@@ -268,3 +325,44 @@ int smu_free_memory(void *device, void *handle)
 
        return 0;
 }
+
+int smum_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
+               return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
+
+       return 0;
+}
+
+int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
+               return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
+
+       return 0;
+}
+
+int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
+               return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
+
+       return 0;
+}
+
+/*this interface is needed by island ci/vi */
+int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
+               return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
+
+       return 0;
+}
+
+bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
+               return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
+
+       return true;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
new file mode 100644 (file)
index 0000000..4dfd3f6
--- /dev/null
@@ -0,0 +1,3207 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ *
+ */
+
+#include "tonga_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "tonga_smumgr.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "smu7_ppsmc.h"
+
+#include "smu72_discrete.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define VDDC_VDDCI_DELTA            200
+
+
+static struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,  TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,        BAPM_TEMP_GRADIENT
+ */
+       {1,               0xF,             0xFD,                0x19,
+        5,               45,                 0,              0xB0000,
+        {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
+               0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+        {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
+               0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
+       },
+};
+
+/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
+static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
+       {600, 1050, 3, 0},
+       {600, 1050, 6, 1}
+};
+
+/* [FF, SS] type, [] 4 voltage ranges,
+ * and [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
+       { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+       { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
+};
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
+static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
+       {0, 1, 3, 2, 4, 5},
+       {0, 2, 4, 5, 6, 5}
+};
+
+/* PPGen has the gain setting generated in x * 100 unit
+ * This function is to convert the unit to x * 4096(0x1000) unit.
+ *  This is the unit expected by SMC firmware
+ */
+
+
+static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+       phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
+       uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+       uint32_t i = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                          (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       /* clock - voltage dependency table is empty table */
+       if (allowed_clock_voltage_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+               /* find first sclk bigger than request */
+               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+                       voltage->VddGfx = phm_get_voltage_index(
+                                       pptable_info->vddgfx_lookup_table,
+                               allowed_clock_voltage_table->entries[i].vddgfx);
+                       voltage->Vddc = phm_get_voltage_index(
+                                               pptable_info->vddc_lookup_table,
+                                 allowed_clock_voltage_table->entries[i].vddc);
+
+                       if (allowed_clock_voltage_table->entries[i].vddci)
+                               voltage->Vddci =
+                                       phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
+                       else
+                               voltage->Vddci =
+                                       phm_get_voltage_id(&data->vddci_voltage_table,
+                                               allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
+
+
+                       if (allowed_clock_voltage_table->entries[i].mvdd)
+                               *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
+
+                       voltage->Phases = 1;
+                       return 0;
+               }
+       }
+
+       /* sclk is bigger than max sclk in the dependence table */
+       voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+               allowed_clock_voltage_table->entries[i-1].vddgfx);
+       voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
+               allowed_clock_voltage_table->entries[i-1].vddc);
+
+       if (allowed_clock_voltage_table->entries[i-1].vddci)
+               voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
+                       allowed_clock_voltage_table->entries[i-1].vddci);
+
+       if (allowed_clock_voltage_table->entries[i-1].mvdd)
+               *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
+
+       return 0;
+}
+
+
+/**
+ * Vddc table preparation for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       unsigned int count;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+               table->VddcLevelCount = data->vddc_voltage_table.count;
+               for (count = 0; count < table->VddcLevelCount; count++) {
+                       table->VddcTable[count] =
+                               PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
+               }
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+       }
+       return 0;
+}
+
+/**
+ * VddGfx table preparation for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       unsigned int count;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+               table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
+               for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
+                       table->VddGfxTable[count] =
+                               PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
+               }
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
+       }
+       return 0;
+}
+
+/**
+ * Vddci table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+
+       table->VddciLevelCount = data->vddci_voltage_table.count;
+       for (count = 0; count < table->VddciLevelCount; count++) {
+               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+                       table->VddciTable[count] =
+                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+               } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+                       table->SmioTable1.Pattern[count].Voltage =
+                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
+                       table->SmioTable1.Pattern[count].Smio =
+                               (uint8_t) count;
+                       table->Smio[count] |=
+                               data->vddci_voltage_table.entries[count].smio_low;
+                       table->VddciTable[count] =
+                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+               }
+       }
+
+       table->SmioMask1 = data->vddci_voltage_table.mask_low;
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+       return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               table->MvddLevelCount = data->mvdd_voltage_table.count;
+               for (count = 0; count < table->MvddLevelCount; count++) {
+                       table->SmioTable2.Pattern[count].Voltage =
+                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+                       table->SmioTable2.Pattern[count].Smio =
+                               (uint8_t) count;
+                       table->Smio[count] |=
+                               data->mvdd_voltage_table.entries[count].smio_low;
+               }
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+       }
+
+       return 0;
+}
+
+/**
+ * Preparation of vddc and vddgfx CAC tables for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       uint32_t count;
+       uint8_t index = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
+                                          pptable_info->vddgfx_lookup_table;
+       struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
+                                               pptable_info->vddc_lookup_table;
+
+       /* table is already swapped, so in order to use the value from it
+        * we need to swap it back.
+        */
+       uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
+       uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
+
+       for (count = 0; count < vddc_level_count; count++) {
+               /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
+               index = phm_get_voltage_index(vddc_lookup_table,
+                       data->vddc_voltage_table.entries[count].value);
+               table->BapmVddcVidLoSidd[count] =
+                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+               table->BapmVddcVidHiSidd[count] =
+                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+               table->BapmVddcVidHiSidd2[count] =
+                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+       }
+
+       if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+               /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
+               for (count = 0; count < vddgfx_level_count; count++) {
+                       index = phm_get_voltage_index(vddgfx_lookup_table,
+                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
+                       table->BapmVddGfxVidHiSidd2[count] =
+                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
+               }
+       } else {
+               for (count = 0; count < vddc_level_count; count++) {
+                       index = phm_get_voltage_index(vddc_lookup_table,
+                               data->vddc_voltage_table.entries[count].value);
+                       table->BapmVddGfxVidLoSidd[count] =
+                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+                       table->BapmVddGfxVidHiSidd[count] =
+                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+                       table->BapmVddGfxVidHiSidd2[count] =
+                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * Preparation of voltage tables for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+
+static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+       SMU72_Discrete_DpmTable *table)
+{
+       int result;
+
+       result = tonga_populate_smc_vddc_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate VDDC voltage table to SMC",
+                       return -EINVAL);
+
+       result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate VDDCI voltage table to SMC",
+                       return -EINVAL);
+
+       result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate VDDGFX voltage table to SMC",
+                       return -EINVAL);
+
+       result = tonga_populate_smc_mvdd_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate MVDD voltage table to SMC",
+                       return -EINVAL);
+
+       result = tonga_populate_cac_tables(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate CAC voltage tables to SMC",
+                       return -EINVAL);
+
+       return 0;
+}
+
+static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU72_Discrete_Ulv *state)
+{
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+       state->VddcPhase = 1;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+       return 0;
+}
+
+static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
+               struct SMU72_Discrete_DpmTable *table)
+{
+       return tonga_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t i;
+
+       /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount =
+                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity =
+                       1;
+               table->LinkLevel[i].SPC =
+                       (uint8_t)(data->pcie_spc_cap & 0xff);
+               table->LinkLevel[i].DownThreshold =
+                       PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpThreshold =
+                       PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+               (uint8_t)dpm_table->pcie_speed_table.count;
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       uint32_t    reference_clock;
+       uint32_t reference_divider;
+       uint32_t fbdiv;
+       int result;
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+       reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+       reference_divider = 1 + dividers.uc_pll_ref_div;
+
+       /* low 14 bits is fraction and high 12 bits is divider*/
+       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+       /* SPLL_FUNC_CNTL setup*/
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+               CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+               CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+       /* SPLL_FUNC_CNTL_3 setup*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+               CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+       /* set to use fractional accumulation*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+               CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+               pp_atomctrl_internal_ss_info ss_info;
+
+               uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+               if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+                       /*
+                       * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+                       * ss_info.speed_spectrum_rate -- in unit of khz
+                       */
+                       /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+                       uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+                       /* clkv = 2 * D * fbdiv / NS */
+                       uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+                       cg_spll_spread_spectrum =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+                       cg_spll_spread_spectrum =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+                       cg_spll_spread_spectrum_2 =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+               }
+       }
+
+       sclk->SclkFrequency        = engine_clock;
+       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+       return 0;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+                                               uint32_t engine_clock,
+                               uint16_t sclk_activity_level_threshold,
+                               SMU72_Discrete_GraphicsLevel *graphic_level)
+{
+       int result;
+       uint32_t mvdd;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                           (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+       /* populate graphics levels*/
+       result = tonga_get_dependecy_volt_by_clk(hwmgr,
+               pptable_info->vdd_dep_on_sclk, engine_clock,
+               &graphic_level->MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((!result),
+               "can not find VDDC voltage value for VDDC "
+               "engine clock dependency table", return result);
+
+       /* SCLK frequency in units of 10KHz*/
+       graphic_level->SclkFrequency = engine_clock;
+       /* Indicates maximum activity level for this performance level. 50% for now*/
+       graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+       graphic_level->CcPwrDynRm = 0;
+       graphic_level->CcPwrDynRm1 = 0;
+       /* this level can be used if activity is high enough.*/
+       graphic_level->EnabledForActivity = 0;
+       /* this level can be used for throttling.*/
+       graphic_level->EnabledForThrottle = 1;
+       graphic_level->UpHyst = 0;
+       graphic_level->DownHyst = 0;
+       graphic_level->VoltageDownHyst = 0;
+       graphic_level->PowerThrottle = 0;
+
+       data->display_timing.min_clock_in_sr =
+                       hwmgr->display_config.min_core_set_clock_in_sr;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep))
+               graphic_level->DeepSleepDivId =
+                               smu7_get_sleep_divider_id_from_clock(engine_clock,
+                                               data->display_timing.min_clock_in_sr);
+
+       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+       graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       if (!result) {
+               /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+               /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+       }
+
+       return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param    hwmgr      the address of the hardware manager
+ */
+int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
+       uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
+       uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+
+       uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
+                                               SMU72_MAX_LEVELS_GRAPHICS;
+
+       SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+       uint32_t i, max_entry;
+       uint8_t highest_pcie_level_enabled = 0;
+       uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+       uint8_t count = 0;
+       int result = 0;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+               result = tonga_populate_single_graphic_level(hwmgr,
+                                       dpm_table->sclk_table.dpm_levels[i].value,
+                                       (uint16_t)smu_data->activity_target[i],
+                                       &(smu_data->smc_state_table.GraphicsLevel[i]));
+               if (result != 0)
+                       return result;
+
+               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+               if (i > 1)
+                       smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+       }
+
+       /* Only enable level 0 for now. */
+       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+       /* set highest level watermark to high */
+       if (dpm_table->sclk_table.count > 1)
+               smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+                       PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       smu_data->smc_state_table.GraphicsDpmLevelCount =
+               (uint8_t)dpm_table->sclk_table.count;
+       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+       if (pcie_table != NULL) {
+               PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
+                       "There must be 1 or more PCIE levels defined in PPTable.",
+                       return -EINVAL);
+               max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
+               for (i = 0; i < dpm_table->sclk_table.count; i++) {
+                       smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
+                               (uint8_t) ((i < max_entry) ? i : max_entry);
+               }
+       } else {
+               if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
+                       printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !");
+
+               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                       (1<<(highest_pcie_level_enabled+1))) != 0)) {
+                       highest_pcie_level_enabled++;
+               }
+
+               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                       (1<<lowest_pcie_level_enabled)) == 0)) {
+                       lowest_pcie_level_enabled++;
+               }
+
+               while ((count < highest_pcie_level_enabled) &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                       (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
+                       count++;
+               }
+               mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+                       (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+               /* set pcieDpmLevel to highest_pcie_level_enabled*/
+               for (i = 2; i < dpm_table->sclk_table.count; i++)
+                       smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+               smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to mid_pcie_level_enabled*/
+               smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+       }
+       /* level count will send to smc once at init smc table and never change*/
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
+                               (uint8_t *)levels, (uint32_t)level_array_size,
+                                                               SMC_RAM_END);
+
+       return result;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    memory_clock the memory clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int tonga_calculate_mclk_params(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU72_Discrete_MemoryLevel *mclk,
+               bool strobe_mode,
+               bool dllStateOn
+               )
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+       uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+       uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+       uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+       uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+       uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+       uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+       uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+       uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+       pp_atomctrl_memory_clock_param mpll_param;
+       int result;
+
+       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+                               memory_clock, &mpll_param, strobe_mode);
+       PP_ASSERT_WITH_CODE(
+                       !result,
+                       "Error retrieving Memory Clock Parameters from VBIOS.",
+                       return result);
+
+       /* MPLL_FUNC_CNTL setup*/
+       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
+                                       mpll_param.bw_ctrl);
+
+       /* MPLL_FUNC_CNTL_1 setup*/
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                       MPLL_FUNC_CNTL_1, CLKF,
+                                       mpll_param.mpll_fb_divider.cl_kf);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                       MPLL_FUNC_CNTL_1, CLKFRAC,
+                                       mpll_param.mpll_fb_divider.clk_frac);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                               MPLL_FUNC_CNTL_1, VCO_MODE,
+                                               mpll_param.vco_mode);
+
+       /* MPLL_AD_FUNC_CNTL setup*/
+       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
+                                       mpll_param.mpll_post_divider);
+
+       if (data->is_memory_gddr5) {
+               /* MPLL_DQ_FUNC_CNTL setup*/
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL,
+                                               mpll_param.yclk_sel);
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
+                                               mpll_param.mpll_post_divider);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+               /*
+                ************************************
+                Fref = Reference Frequency
+                NF = Feedback divider ratio
+                NR = Reference divider ratio
+                Fnom = Nominal VCO output frequency = Fref * NF / NR
+                Fs = Spreading Rate
+                D = Percentage down-spread / 2
+                Fint = Reference input frequency to PFD = Fref / NR
+                NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+                CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+                NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+                CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+                *************************************
+                */
+               pp_atomctrl_internal_ss_info ss_info;
+               uint32_t freq_nom;
+               uint32_t tmp;
+               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+               /* for GDDR5 for all modes and DDR3 */
+               if (1 == mpll_param.qdr)
+                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+               else
+                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
+               tmp = (freq_nom / reference_clock);
+               tmp = tmp * tmp;
+
+               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+                       /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+                       /* ss.Info.speed_spectrum_rate -- in unit of khz */
+                       /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+                       /*     = reference_clock * 5 / speed_spectrum_rate */
+                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+                       /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+                       /*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+                       uint32_t clkv =
+                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+               }
+       }
+
+       /* MCLK_PWRMGT_CNTL setup */
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+       /* Save the result data to outpupt memory level structure */
+       mclk->MclkFrequency   = memory_clock;
+       mclk->MpllFuncCntl    = mpll_func_cntl;
+       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
+       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
+       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
+       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
+       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
+       mclk->DllCntl         = dll_cntl;
+       mclk->MpllSs1         = mpll_ss1;
+       mclk->MpllSs2         = mpll_ss2;
+
+       return 0;
+}
+
+static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
+               bool strobe_mode)
+{
+       uint8_t mc_para_index;
+
+       if (strobe_mode) {
+               if (memory_clock < 12500)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 47500)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+       } else {
+               if (memory_clock < 65000)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 135000)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+       }
+
+       return mc_para_index;
+}
+
+static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+       uint8_t mc_para_index;
+
+       if (memory_clock < 10000)
+               mc_para_index = 0;
+       else if (memory_clock >= 80000)
+               mc_para_index = 0x0f;
+       else
+               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+       return mc_para_index;
+}
+
+
+static int tonga_populate_single_memory_level(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU72_Discrete_MemoryLevel *memory_level
+               )
+{
+       uint32_t mvdd = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       int result = 0;
+       bool dll_state_on;
+       struct cgs_display_info info = {0};
+       uint32_t mclk_edc_wr_enable_threshold = 40000;
+       uint32_t mclk_stutter_mode_threshold = 30000;
+       uint32_t mclk_edc_enable_threshold = 40000;
+       uint32_t mclk_strobe_mode_threshold = 40000;
+
+       if (NULL != pptable_info->vdd_dep_on_mclk) {
+               result = tonga_get_dependecy_volt_by_clk(hwmgr,
+                               pptable_info->vdd_dep_on_mclk,
+                               memory_clock,
+                               &memory_level->MinVoltage, &mvdd);
+               PP_ASSERT_WITH_CODE(
+                       !result,
+                       "can not find MinVddc voltage value from memory VDDC "
+                       "voltage dependency table",
+                       return result);
+       }
+
+       if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+               memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
+       else
+               memory_level->MinMvdd = mvdd;
+
+       memory_level->EnabledForThrottle = 1;
+       memory_level->EnabledForActivity = 0;
+       memory_level->UpHyst = 0;
+       memory_level->DownHyst = 100;
+       memory_level->VoltageDownHyst = 0;
+
+       /* Indicates maximum activity level for this performance level.*/
+       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       memory_level->StutterEnable = 0;
+       memory_level->StrobeEnable = 0;
+       memory_level->EdcReadEnable = 0;
+       memory_level->EdcWriteEnable = 0;
+       memory_level->RttEnable = 0;
+
+       /* default set to low watermark. Highest level will be set to high later.*/
+       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+       data->display_timing.num_existing_displays = info.display_count;
+
+       if ((mclk_stutter_mode_threshold != 0) &&
+           (memory_clock <= mclk_stutter_mode_threshold) &&
+           (!data->is_uvd_enabled)
+           && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+           && (data->display_timing.num_existing_displays <= 2)
+           && (data->display_timing.num_existing_displays != 0))
+               memory_level->StutterEnable = 1;
+
+       /* decide strobe mode*/
+       memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+               (memory_clock <= mclk_strobe_mode_threshold);
+
+       /* decide EDC mode and memory clock ratio*/
+       if (data->is_memory_gddr5) {
+               memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
+                                       memory_level->StrobeEnable);
+
+               if ((mclk_edc_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_enable_threshold)) {
+                       memory_level->EdcReadEnable = 1;
+               }
+
+               if ((mclk_edc_wr_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_wr_enable_threshold)) {
+                       memory_level->EdcWriteEnable = 1;
+               }
+
+               if (memory_level->StrobeEnable) {
+                       if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
+                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+                       } else {
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+                       }
+
+               } else {
+                       dll_state_on = data->dll_default_on;
+               }
+       } else {
+               memory_level->StrobeRatio =
+                       tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
+               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+       }
+
+       result = tonga_calculate_mclk_params(hwmgr,
+               memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+       if (!result) {
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
+               /* MCLK frequency in units of 10KHz*/
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+               /* Indicates maximum activity level for this performance level.*/
+               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+       }
+
+       return result;
+}
+
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int result;
+
+       /* populate MCLK dpm table to SMU7 */
+       uint32_t level_array_address =
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+       uint32_t level_array_size =
+                               sizeof(SMU72_Discrete_MemoryLevel) *
+                               SMU72_MAX_LEVELS_MEMORY;
+       SMU72_Discrete_MemoryLevel *levels =
+                               smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                       "can not populate memory level as memory clock is zero",
+                       return -EINVAL);
+               result = tonga_populate_single_memory_level(
+                               hwmgr,
+                               dpm_table->mclk_table.dpm_levels[i].value,
+                               &(smu_data->smc_state_table.MemoryLevel[i]));
+               if (result)
+                       return result;
+       }
+
+       /* Only enable level 0 for now.*/
+       smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+       /*
+       * in order to prevent MC activity from stutter mode to push DPM up.
+       * the UVD change complements this by putting the MCLK in a higher state
+       * by default such that we are not effected by up threshold or and MCLK DPM latency.
+       */
+       smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+       data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+       /* set highest level watermark to high*/
+       smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       /* level count will send to smc once at init smc table and never change*/
+       result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+               level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+               SMC_RAM_END);
+
+       return result;
+}
+
+static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+                               uint32_t mclk, SMIO_Pattern *smio_pattern)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+                               /* Always round to higher voltage. */
+                               smio_pattern->Voltage =
+                                     data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+
+               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+                       "MVDD Voltage is outside the supported range.",
+                       return -EINVAL);
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+
+static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+       SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+
+       SMIO_Pattern voltage_level;
+       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
+       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+       /* The ACPI state should not do DPM on DC (or ever).*/
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       table->ACPILevel.MinVoltage =
+                       smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
+
+       /* assign zero for now*/
+       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+               table->ACPILevel.SclkFrequency,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.",
+               return result);
+
+       /* divider ID for required SCLK*/
+       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+       table->ACPILevel.DeepSleepDivId = 0;
+
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                                       SPLL_PWRON, 0);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                                               SPLL_RESET, 1);
+       spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+                                               SCLK_MUX_SEL, 4);
+
+       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+       /* For various features to be enabled/disabled while this level is active.*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       /* SCLK frequency in units of 10KHz*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+       table->MemoryACPILevel.MinVoltage =
+                           smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
+
+       /*  CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+       if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
+               table->MemoryACPILevel.MinMvdd =
+                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+       else
+               table->MemoryACPILevel.MinMvdd = 0;
+
+       /* Force reset on DLL*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+       /* Disable DLL in ACPIState*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+       /* Enable DLL bypass signal*/
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK0_BYPASS, 0);
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK1_BYPASS, 0);
+
+       table->MemoryACPILevel.DllCntl            =
+               PP_HOST_TO_SMC_UL(dll_cntl);
+       table->MemoryACPILevel.MclkPwrmgtCntl     =
+               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+       table->MemoryACPILevel.MpllAdFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+       table->MemoryACPILevel.MpllDqFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl       =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl_1     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+       table->MemoryACPILevel.MpllFuncCntl_2     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+       table->MemoryACPILevel.MpllSs1            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+       table->MemoryACPILevel.MpllSs2            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpHyst = 0;
+       table->MemoryACPILevel.DownHyst = 100;
+       table->MemoryACPILevel.VoltageDownHyst = 0;
+       /* Indicates maximum activity level for this performance level.*/
+       table->MemoryACPILevel.ActivityLevel =
+                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       table->MemoryACPILevel.StutterEnable = 0;
+       table->MemoryACPILevel.StrobeEnable = 0;
+       table->MemoryACPILevel.EdcReadEnable = 0;
+       table->MemoryACPILevel.EdcWriteEnable = 0;
+       table->MemoryACPILevel.RttEnable = 0;
+
+       return result;
+}
+
+static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+                                       SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+
+       uint8_t count;
+       pp_atomctrl_clock_dividers_vi dividers;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                                               pptable_info->mm_dep_table;
+
+       table->UvdLevelCount = (uint8_t) (mm_table->count);
+       table->UvdBootLevel = 0;
+
+       for (count = 0; count < table->UvdLevelCount; count++) {
+               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+               table->UvdLevel[count].MinVoltage.Vddc =
+                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
+                                               mm_table->entries[count].vddc);
+               table->UvdLevel[count].MinVoltage.VddGfx =
+                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+                                               mm_table->entries[count].vddgfx) : 0;
+               table->UvdLevel[count].MinVoltage.Vddci =
+                       phm_get_voltage_id(&data->vddci_voltage_table,
+                                            mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               table->UvdLevel[count].MinVoltage.Phases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(
+                                       hwmgr,
+                                       table->UvdLevel[count].VclkFrequency,
+                                       &dividers);
+
+               PP_ASSERT_WITH_CODE((!result),
+                                   "can not find divide id for Vclk clock",
+                                       return result);
+
+               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                                                         table->UvdLevel[count].DclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((!result),
+                                   "can not find divide id for Dclk clock",
+                                       return result);
+
+               table->UvdLevel[count].DclkDivider =
+                                       (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+       }
+
+       return result;
+
+}
+
+static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+
+       uint8_t count;
+       pp_atomctrl_clock_dividers_vi dividers;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                             (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                                                    pptable_info->mm_dep_table;
+
+       table->VceLevelCount = (uint8_t) (mm_table->count);
+       table->VceBootLevel = 0;
+
+       for (count = 0; count < table->VceLevelCount; count++) {
+               table->VceLevel[count].Frequency =
+                       mm_table->entries[count].eclk;
+               table->VceLevel[count].MinVoltage.Vddc =
+                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
+                               mm_table->entries[count].vddc);
+               table->VceLevel[count].MinVoltage.VddGfx =
+                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+                               mm_table->entries[count].vddgfx) : 0;
+               table->VceLevel[count].MinVoltage.Vddci =
+                       phm_get_voltage_id(&data->vddci_voltage_table,
+                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               table->VceLevel[count].MinVoltage.Phases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                                       table->VceLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((!result),
+                               "can not find divide id for VCE engine clock",
+                               return result);
+
+               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+       }
+
+       return result;
+}
+
+static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+               SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+       uint8_t count;
+       pp_atomctrl_clock_dividers_vi dividers;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                            (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                                                   pptable_info->mm_dep_table;
+
+       table->AcpLevelCount = (uint8_t) (mm_table->count);
+       table->AcpBootLevel = 0;
+
+       for (count = 0; count < table->AcpLevelCount; count++) {
+               table->AcpLevel[count].Frequency =
+                       pptable_info->mm_dep_table->entries[count].aclk;
+               table->AcpLevel[count].MinVoltage.Vddc =
+                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
+                       mm_table->entries[count].vddc);
+               table->AcpLevel[count].MinVoltage.VddGfx =
+                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+                               mm_table->entries[count].vddgfx) : 0;
+               table->AcpLevel[count].MinVoltage.Vddci =
+                       phm_get_voltage_id(&data->vddci_voltage_table,
+                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               table->AcpLevel[count].MinVoltage.Phases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                       table->AcpLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((!result),
+                       "can not find divide id for engine clock", return result);
+
+               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+       }
+
+       return result;
+}
+
+static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+               SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+       uint8_t count;
+       pp_atomctrl_clock_dividers_vi dividers;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                            (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                                                   pptable_info->mm_dep_table;
+
+       table->SamuBootLevel = 0;
+       table->SamuLevelCount = (uint8_t) (mm_table->count);
+
+       for (count = 0; count < table->SamuLevelCount; count++) {
+               /* not sure whether we need evclk or not */
+               table->SamuLevel[count].Frequency =
+                       pptable_info->mm_dep_table->entries[count].samclock;
+               table->SamuLevel[count].MinVoltage.Vddc =
+                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
+                               mm_table->entries[count].vddc);
+               table->SamuLevel[count].MinVoltage.VddGfx =
+                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+                               mm_table->entries[count].vddgfx) : 0;
+               table->SamuLevel[count].MinVoltage.Vddci =
+                       phm_get_voltage_id(&data->vddci_voltage_table,
+                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               table->SamuLevel[count].MinVoltage.Phases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                                       table->SamuLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((!result),
+                       "can not find divide id for samu clock", return result);
+
+               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+       }
+
+       return result;
+}
+
+static int tonga_populate_memory_timing_parameters(
+               struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock,
+               uint32_t memory_clock,
+               struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
+               )
+{
+       uint32_t dramTiming;
+       uint32_t dramTiming2;
+       uint32_t burstTime;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                               engine_clock, memory_clock);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+       arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+       return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ * This function is to be called from the SetPowerState table.
+ */
+static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       int result = 0;
+       SMU72_Discrete_MCArbDramTimingTable  arb_regs;
+       uint32_t i, j;
+
+       memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+                       result = tonga_populate_memory_timing_parameters
+                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+                                data->dpm_table.mclk_table.dpm_levels[j].value,
+                                &arb_regs.entries[i][j]);
+
+                       if (result)
+                               break;
+               }
+       }
+
+       if (!result) {
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr->smumgr,
+                               smu_data->smu7_data.arb_table_start,
+                               (uint8_t *)&arb_regs,
+                               sizeof(SMU72_Discrete_MCArbDramTimingTable),
+                               SMC_RAM_END
+                               );
+       }
+
+       return result;
+}
+
+static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table*/
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+       data->vbios_boot_state.sclk_bootup_value,
+       (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+       if (result != 0) {
+               smu_data->smc_state_table.GraphicsBootLevel = 0;
+               printk(KERN_ERR "[powerplay] VBIOS did not find boot engine "
+                               "clock value in dependency table. "
+                               "Using Graphics DPM level 0 !");
+               result = 0;
+       }
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+               data->vbios_boot_state.mclk_bootup_value,
+               (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+       if (result != 0) {
+               smu_data->smc_state_table.MemoryBootLevel = 0;
+               printk(KERN_ERR "[powerplay] VBIOS did not find boot "
+                               "engine clock value in dependency table."
+                               "Using Memory DPM level 0 !");
+               result = 0;
+       }
+
+       table->BootVoltage.Vddc =
+               phm_get_voltage_id(&(data->vddc_voltage_table),
+                       data->vbios_boot_state.vddc_bootup_value);
+       table->BootVoltage.VddGfx =
+               phm_get_voltage_id(&(data->vddgfx_voltage_table),
+                       data->vbios_boot_state.vddgfx_bootup_value);
+       table->BootVoltage.Vddci =
+               phm_get_voltage_id(&(data->vddci_voltage_table),
+                       data->vbios_boot_state.vddci_bootup_value);
+       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+       return result;
+}
+
+static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+                       volt_with_cks, value;
+       uint16_t clock_freq_u16;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+                       volt_offset = 0;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+       uint32_t hw_revision, dev_id;
+       struct cgs_system_info sys_info = {0};
+
+       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+       sys_info.size = sizeof(struct cgs_system_info);
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       hw_revision = (uint32_t)sys_info.value;
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       dev_id = (uint32_t)sys_info.value;
+
+       /* Read SMU_Eefuse to read and calculate RO and determine
+        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+        */
+       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (146 * 4));
+       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (148 * 4));
+       efuse &= 0xFF000000;
+       efuse = efuse >> 24;
+       efuse2 &= 0xF;
+
+       if (efuse2 == 1)
+               ro = (2300 - 1350) * efuse / 255 + 1350;
+       else
+               ro = (2500 - 1000) * efuse / 255 + 1000;
+
+       if (ro >= 1660)
+               type = 0;
+       else
+               type = 1;
+
+       /* Populate Stretch amount */
+       smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+
+       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+       for (i = 0; i < sclk_table->count; i++) {
+               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+                               sclk_table->entries[i].cks_enable << i;
+               if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
+                       volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
+                               (sclk_table->entries[i].clk/100) / 10000) * 1000 /
+                               (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
+                       volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
+                               (sclk_table->entries[i].clk/100) / 100000) * 1000 /
+                               (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
+               } else {
+                       volt_without_cks = (uint32_t)((14041 *
+                               (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+                               (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+                       volt_with_cks = (uint32_t)((13946 *
+                               (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+                               (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+               }
+               if (volt_without_cks >= volt_with_cks)
+                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+       }
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       STRETCH_ENABLE, 0x0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       masterReset, 0x1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       staticEnable, 0x1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       masterReset, 0x0);
+
+       /* Populate CKS Lookup Table */
+       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+               stretch_amount2 = 0;
+       else if (stretch_amount == 3 || stretch_amount == 4)
+               stretch_amount2 = 1;
+       else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ClockStretcher);
+               PP_ASSERT_WITH_CODE(false,
+                               "Stretch Amount in PPTable not supported\n",
+                               return -EINVAL);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixPWR_CKS_CNTL);
+       value &= 0xFFC2FF87;
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+                       tonga_clock_stretcher_lookup_table[stretch_amount2][0];
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+                       tonga_clock_stretcher_lookup_table[stretch_amount2][1];
+       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+                       GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+                       SclkFrequency) / 100);
+       if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
+                       clock_freq_u16 &&
+           tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
+                       clock_freq_u16) {
+               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+               value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+               value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+               value |= (tonga_clock_stretch_amount_conversion
+                               [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
+                                [stretch_amount]) << 3;
+       }
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+                       CKS_LOOKUPTableEntry[0].minFreq);
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+                       CKS_LOOKUPTableEntry[0].maxFreq);
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+                       tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+                       (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixPWR_CKS_CNTL, value);
+
+       /* Populate DDT Lookup Table */
+       for (i = 0; i < 4; i++) {
+               /* Assign the minimum and maximum VID stored
+                * in the last row of Clock Stretcher Voltage Table.
+                */
+               smu_data->smc_state_table.ClockStretcherDataTable.
+               ClockStretcherDataTableEntry[i].minVID =
+                               (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
+               smu_data->smc_state_table.ClockStretcherDataTable.
+               ClockStretcherDataTableEntry[i].maxVID =
+                               (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
+               /* Loop through each SCLK and check the frequency
+                * to see if it lies within the frequency for clock stretcher.
+                */
+               for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+                       cks_setting = 0;
+                       clock_freq = PP_SMC_TO_HOST_UL(
+                                       smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+                       /* Check the allowed frequency against the sclk level[j].
+                        *  Sclk's endianness has already been converted,
+                        *  and it's in 10Khz unit,
+                        *  as opposed to Data table, which is in Mhz unit.
+                        */
+                       if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
+                               cks_setting |= 0x2;
+                               if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
+                                       cks_setting |= 0x1;
+                       }
+                       smu_data->smc_state_table.ClockStretcherDataTable.
+                       ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+               }
+               CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+                               ClockStretcherDataTable.
+                               ClockStretcherDataTableEntry[i].setting);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       ixPWR_CKS_CNTL);
+       value &= 0xFFFFFFFE;
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       ixPWR_CKS_CNTL, value);
+
+       return 0;
+}
+
+/**
+ * Populates the SMC VRConfig field in DPM table.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint16_t config;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+               /*  Splitted mode */
+               config = VR_SVI2_PLANE_1;
+               table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+                       config = VR_SVI2_PLANE_2;
+                       table->VRConfig |= config;
+               } else {
+                       printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should "
+                               "be both on SVI2 control in splitted mode !\n");
+               }
+       } else {
+               /* Merged mode  */
+               config = VR_MERGED_WITH_VDDC;
+               table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+               /* Set Vddc Voltage Controller  */
+               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+                       config = VR_SVI2_PLANE_1;
+                       table->VRConfig |= config;
+               } else {
+                       printk(KERN_ERR "[ powerplay ] VDDC should be on "
+                                       "SVI2 control in merged mode !\n");
+               }
+       }
+
+       /* Set Vddci Voltage Controller  */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+               config = VR_SVI2_PLANE_2;  /* only in merged mode */
+               table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               config = VR_SMIO_PATTERN_1;
+               table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+       }
+
+       /* Set Mvdd Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               config = VR_SMIO_PATTERN_2;
+               table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+       }
+
+       return 0;
+}
+
+
+/**
+ * Initialize the ARB DRAM timing table's index field.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+       uint32_t tmp;
+       int result;
+
+       /*
+       * This is a read-modify-write on the first byte of the ARB table.
+       * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
+       * is the field 'current'.
+       * This solution is ugly, but we never write the whole table only
+       * individual fields in it.
+       * In reality this field should not be in that structure
+       * but in a soft register.
+       */
+       result = smu7_read_smc_sram_dword(smumgr,
+                               smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+       if (result != 0)
+               return result;
+
+       tmp &= 0x00FFFFFF;
+       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+       return smu7_write_smc_sram_dword(smumgr,
+                       smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+
+static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+       SMU72_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+       int  i, j, k;
+       uint16_t *pdef1;
+       uint16_t *pdef2;
+
+       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+                       (uint16_t)(cac_dtp_table->usTDP * 256));
+       dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+                       (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+                       "Target Operating Temp is out of Range !",
+                       );
+
+       dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+       dpm_table->GpuTjHyst = 8;
+
+       dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+       dpm_table->BAPM_TEMP_GRADIENT =
+                               PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+       pdef1 = defaults->bapmti_r;
+       pdef2 = defaults->bapmti_rc;
+
+       for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+               for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+                       for (k = 0; k < SMU72_DTE_SINKS; k++) {
+                               dpm_table->BAPMTI_R[i][j][k] =
+                                               PP_HOST_TO_SMC_US(*pdef1);
+                               dpm_table->BAPMTI_RC[i][j][k] =
+                                               PP_HOST_TO_SMC_US(*pdef2);
+                               pdef1++;
+                               pdef2++;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       /* TDC number of fraction bits are changed from 8 to 7
+        * for Fiji as requested by SMC team
+        */
+       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->tdc_vddc_throttle_release_limit_perc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+       return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+                       fuse_table_offset +
+                       offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 "
+                               "(SviLoadLineEn) from SMC Failed !",
+                               return -EINVAL);
+       else
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+       return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+       return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+       if ((hwmgr->thermal_controller.advanceFanControlParameters.
+                       usFanOutputSensitivity & (1 << 15)) ||
+               (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+               hwmgr->thermal_controller.advanceFanControlParameters.
+               usFanOutputSensitivity = hwmgr->thermal_controller.
+                       advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+       smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+                       PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+                                       advanceFanControlParameters.usFanOutputSensitivity);
+       return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.GnbLPML[i] = 0;
+
+       return 0;
+}
+
+static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+       return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+       hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+       return 0;
+}
+
+static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t pm_fuse_table_offset;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to get pm_fuse_table_offset Failed !",
+                               return -EINVAL);
+
+               /* DW6 */
+               if (tonga_populate_svi_load_line(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate SviLoadLine Failed !",
+                               return -EINVAL);
+               /* DW7 */
+               if (tonga_populate_tdc_limit(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TDCLimit Failed !",
+                                       return -EINVAL);
+               /* DW8 */
+               if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate TdcWaterfallCtl Failed !",
+                               return -EINVAL);
+
+               /* DW9-DW12 */
+               if (tonga_populate_temperature_scaler(hwmgr) != 0)
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate LPMLTemperatureScaler Failed !",
+                               return -EINVAL);
+
+               /* DW13-DW14 */
+               if (tonga_populate_fuzzy_fan(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate Fuzzy Fan "
+                               "Control parameters Failed !",
+                               return -EINVAL);
+
+               /* DW15-DW18 */
+               if (tonga_populate_gnb_lpml(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate GnbLPML Failed !",
+                               return -EINVAL);
+
+               /* DW19 */
+               if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate GnbLPML "
+                               "Min and Max Vid Failed !",
+                               return -EINVAL);
+
+               /* DW20 */
+               if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(
+                               false,
+                               "Attempt to populate BapmVddCBaseLeakage "
+                               "Hi and Lo Sidd Failed !",
+                               return -EINVAL);
+
+               if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to download PmFuseTable Failed !",
+                                       return -EINVAL);
+       }
+       return 0;
+}
+
+static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
+                                SMU72_Discrete_MCRegisters *mc_reg_table)
+{
+       const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
+
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+               if (smu_data->mc_reg_table.validflag & 1<<j) {
+                       PP_ASSERT_WITH_CODE(
+                               i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+                               "Index of mc_reg_table->address[] array "
+                               "out of boundary",
+                               return -EINVAL);
+                       mc_reg_table->address[i].s0 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+                       mc_reg_table->address[i].s1 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+                       i++;
+               }
+       }
+
+       mc_reg_table->last = (uint8_t)i;
+
+       return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void tonga_convert_mc_registers(
+       const struct tonga_mc_reg_entry *entry,
+       SMU72_Discrete_MCRegisterSet *data,
+       uint32_t num_entries, uint32_t valid_flag)
+{
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < num_entries; j++) {
+               if (valid_flag & 1<<j) {
+                       data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+                       i++;
+               }
+       }
+}
+
+static int tonga_convert_mc_reg_table_entry_to_smc(
+               struct pp_smumgr *smumgr,
+               const uint32_t memory_clock,
+               SMU72_Discrete_MCRegisterSet *mc_reg_table_data
+               )
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+       uint32_t i = 0;
+
+       for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+               if (memory_clock <=
+                       smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+                       break;
+               }
+       }
+
+       if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+               --i;
+
+       tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+                               mc_reg_table_data, smu_data->mc_reg_table.last,
+                               smu_data->mc_reg_table.validflag);
+
+       return 0;
+}
+
+static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+               SMU72_Discrete_MCRegisters *mc_regs)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int res;
+       uint32_t i;
+
+       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+               res = tonga_convert_mc_reg_table_entry_to_smc(
+                               hwmgr->smumgr,
+                               data->dpm_table.mclk_table.dpm_levels[i].value,
+                               &mc_regs->data[i]
+                               );
+
+               if (0 != res)
+                       result = res;
+       }
+
+       return result;
+}
+
+static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t address;
+       int32_t result;
+
+       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+               return 0;
+
+
+       memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
+
+       result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+       if (result != 0)
+               return result;
+
+
+       address = smu_data->smu7_data.mc_reg_table_start +
+                       (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
+
+       return  smu7_copy_bytes_to_smc(
+                       hwmgr->smumgr, address,
+                       (uint8_t *)&smu_data->mc_regs.data[0],
+                       sizeof(SMU72_Discrete_MCRegisterSet) *
+                       data->dpm_table.mclk_table.count,
+                       SMC_RAM_END);
+}
+
+static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+
+       memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
+       result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize MCRegTable for the MC register addresses !",
+               return result;);
+
+       result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize MCRegTable for driver state !",
+               return result;);
+
+       return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+                       (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       struct  phm_ppt_v1_information *table_info =
+                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (table_info &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID)
+               smu_data->power_tune_defaults =
+                               &tonga_power_tune_data_set_array
+                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+       else
+               smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @param    pInput  the pointer to input data (PowerState)
+ * @return   always 0
+ */
+int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       uint8_t i;
+       pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+
+
+       memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+       tonga_initialize_power_tune_defaults(hwmgr);
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+               tonga_populate_smc_voltage_tables(hwmgr, table);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
+
+       if (i == 1 || i == 0)
+               table->SystemFlags |= 0x40;
+
+       if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+               result = tonga_populate_ulv_state(hwmgr, table);
+               PP_ASSERT_WITH_CODE(!result,
+                       "Failed to initialize ULV state !",
+                       return result;);
+
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_ULV_PARAMETER, 0x40035);
+       }
+
+       result = tonga_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize Link Level !", return result);
+
+       result = tonga_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize Graphics Level !", return result);
+
+       result = tonga_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize Memory Level !", return result);
+
+       result = tonga_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize ACPI Level !", return result);
+
+       result = tonga_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize VCE Level !", return result);
+
+       result = tonga_populate_smc_acp_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize ACP Level !", return result);
+
+       result = tonga_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize SAMU Level !", return result);
+
+       /* Since only the initial state is completely set up at this
+       * point (the other states are just copies of the boot state) we only
+       * need to populate the  ARB settings for the initial state.
+       */
+       result = tonga_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to Write ARB settings for the initial state.",
+               return result;);
+
+       result = tonga_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize UVD Level !", return result);
+
+       result = tonga_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize Boot Level !", return result);
+
+       tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to populate BAPM Parameters !", return result);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ClockStretcher)) {
+               result = tonga_populate_clock_stretcher_data_table(hwmgr);
+               PP_ASSERT_WITH_CODE(!result,
+                       "Failed to populate Clock Stretcher Data Table !",
+                       return result;);
+       }
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+       table->TemperatureLimitHigh =
+               table_info->cac_dtp_table->usTargetOperatingTemp *
+               SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->TemperatureLimitLow =
+               (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+               SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->MemoryVoltageChangeEnable  = 1;
+       table->MemoryInterval  = 1;
+       table->VoltageResponseTime  = 0;
+       table->PhaseResponseTime  = 0;
+       table->MemoryThermThrottleEnable  = 1;
+
+       /*
+       * Cail reads current link status and reports it as cap (we cannot
+       * change this due to some previous issues we had)
+       * SMC drops the link status to lowest level after enabling
+       * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
+       * but this time Cail reads current link status which was set to low by
+       * SMC and reports it as cap to powerplay
+       * To avoid it, we set PCIeBootLinkLevel to highest dpm level
+       */
+       PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+                       "There must be 1 or more PCIE levels defined in PPTable.",
+                       return -EINVAL);
+
+       table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
+
+       table->PCIeGenInterval  = 1;
+
+       result = tonga_populate_vr_config(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to populate VRConfig setting !", return result);
+
+       table->ThermGpio  = 17;
+       table->SclkStepSize = 0x4000;
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+                                               &gpio_pin_assignment)) {
+               table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_RegulatorHot);
+       } else {
+               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_RegulatorHot);
+       }
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+                                               &gpio_pin_assignment)) {
+               table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition);
+       } else {
+               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition);
+       }
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_Falcon_QuickTransition);
+
+       if (0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition);
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_Falcon_QuickTransition);
+       }
+
+       if (atomctrl_get_pp_assign_pin(hwmgr,
+                       THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalOutGPIO);
+
+               table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+
+               table->ThermOutPolarity =
+                       (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+                       (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
+
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+               /* if required, combine VRHot/PCC with thermal out GPIO*/
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_RegulatorHot) &&
+                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CombinePCCWithThermalSignal)){
+                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+               }
+       } else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalOutGPIO);
+
+               table->ThermOutGpio = 17;
+               table->ThermOutPolarity = 1;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+       }
+
+       for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
+               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(
+                       hwmgr->smumgr,
+                       smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
+                       (uint8_t *)&(table->SystemFlags),
+                       sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
+                       SMC_RAM_END);
+
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to upload dpm data to SMC memory !", return result;);
+
+       result = tonga_init_arb_table_index(hwmgr->smumgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "Failed to upload arb data to SMC memory !", return result);
+
+       tonga_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE((!result),
+               "Failed to populate initialize pm fuses !", return result);
+
+       result = tonga_populate_initial_mc_reg_table(hwmgr);
+       PP_ASSERT_WITH_CODE((!result),
+               "Failed to populate initialize MC Reg table !", return result);
+
+       return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_MicrocodeFanControl))
+               return 0;
+
+       if (0 == smu_data->smu7_data.fan_table_start) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+                                               CGS_IND_REG__SMC,
+                                               CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (0 == duty100) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+                  hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+                 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+                   hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+                   hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       fan_table.FanControl_GL_Flag = 1;
+
+       res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+                                       smu_data->smu7_data.fan_table_start,
+                                       (uint8_t *)&fan_table,
+                                       (uint32_t)sizeof(fan_table),
+                                       SMC_RAM_END);
+
+       return 0;
+}
+
+
+static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return tonga_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr->smumgr,
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable,
+                                       LowSclkInterruptThreshold),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+
+       result = tonga_update_and_upload_mc_reg_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((!result),
+                               "Failed to upload MC reg table !",
+                               return result);
+
+       result = tonga_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters !",
+                       );
+
+       return result;
+}
+
+uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU72_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+               case PreVBlankGap:
+                       return offsetof(SMU72_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU72_SoftRegisters, VBlankTimeout);
+               case UcodeLoadStatus:
+                       return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case UvdBootLevel:
+                       return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+               case VceBootLevel:
+                       return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+               case SamuBootLevel:
+                       return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+               }
+       }
+       printk("cant't get the offset of type %x member %x\n", type, member);
+       return 0;
+}
+
+uint32_t tonga_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU72_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU72_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU72_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU72_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU72_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDGFX:
+               return SMU72_MAX_LEVELS_VDDGFX;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU72_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU72_MAX_LEVELS_MVDD;
+       }
+       printk("cant't get the mac value %x\n", value);
+
+       return 0;
+}
+
+
+static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       smu_data->smc_state_table.UvdBootLevel = 0;
+       if (table_info->mm_dep_table->count > 0)
+               smu_data->smc_state_table.UvdBootLevel =
+                               (uint8_t) (table_info->mm_dep_table->count - 1);
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0x00FFFFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+       cgs_write_ind_register(hwmgr->device,
+                               CGS_IND_REG__SMC,
+                               mm_boot_level_offset, mm_boot_level_value);
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_UVDDPM) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+       return 0;
+}
+
+static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+       smu_data->smc_state_table.VceBootLevel =
+               (uint8_t) (table_info->mm_dep_table->count - 1);
+
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                                       offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFF00FFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+       return 0;
+}
+
+static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+       smu_data->smc_state_table.SamuBootLevel = 0;
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFFFFFF00;
+       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+       return 0;
+}
+
+int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+       switch (type) {
+       case SMU_UVD_TABLE:
+               tonga_update_uvd_smc_table(hwmgr);
+               break;
+       case SMU_VCE_TABLE:
+               tonga_update_vce_smc_table(hwmgr);
+               break;
+       case SMU_SAMU_TABLE:
+               tonga_update_samu_smc_table(hwmgr);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+       uint32_t tmp;
+       int result;
+       bool error = false;
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, DpmTable),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.dpm_table_start = tmp;
+
+       error |= (result != 0);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, SoftRegisters),
+                               &tmp, SMC_RAM_END);
+
+       if (!result) {
+               data->soft_regs_start = tmp;
+               smu_data->smu7_data.soft_regs_start = tmp;
+       }
+
+       error |= (result != 0);
+
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, mcRegisterTable),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.mc_reg_table_start = tmp;
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, FanTable),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.fan_table_start = tmp;
+
+       error |= (result != 0);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.arb_table_start = tmp;
+
+       error |= (result != 0);
+
+       result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, Version),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               hwmgr->microcode_version_info.SMC = tmp;
+
+       error |= (result != 0);
+
+       return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+       bool result = true;
+
+       switch (in_reg) {
+       case  mmMC_SEQ_RAS_TIMING:
+               *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+               break;
+
+       case  mmMC_SEQ_DLL_STBY:
+               *out_reg = mmMC_SEQ_DLL_STBY_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD0:
+               *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD1:
+               *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CTRL:
+               *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+               break;
+
+       case mmMC_SEQ_CAS_TIMING:
+               *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING:
+               *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING2:
+               *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CMD:
+               *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CTL:
+               *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D0:
+               *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D1:
+               *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D0:
+               *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D1:
+               *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+               break;
+
+       case mmMC_PMG_CMD_EMRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS1:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+               break;
+
+       case mmMC_SEQ_PMG_TIMING:
+               *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS2:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_2:
+               *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+               break;
+
+       default:
+               result = false;
+               break;
+       }
+
+       return result;
+}
+
+static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
+{
+       uint32_t i;
+       uint16_t address;
+
+       for (i = 0; i < table->last; i++) {
+               table->mc_reg_address[i].s0 =
+                       tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
+                                                       &address) ?
+                                                       address :
+                                                table->mc_reg_address[i].s1;
+       }
+       return 0;
+}
+
+static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+                                       struct tonga_mc_reg_table *ni_table)
+{
+       uint8_t i, j;
+
+       PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+               "Invalid VramInfo table.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+               "Invalid VramInfo table.", return -EINVAL);
+
+       for (i = 0; i < table->last; i++)
+               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+       ni_table->last = table->last;
+
+       for (i = 0; i < table->num_entries; i++) {
+               ni_table->mc_reg_table_entry[i].mclk_max =
+                       table->mc_reg_table_entry[i].mclk_max;
+               for (j = 0; j < table->last; j++) {
+                       ni_table->mc_reg_table_entry[i].mc_data[j] =
+                               table->mc_reg_table_entry[i].mc_data[j];
+               }
+       }
+
+       ni_table->num_entries = table->num_entries;
+
+       return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
+ *      mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
+ *      mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
+ *      mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3.   need to set these data for each clock range
+ * @param    hwmgr the address of the powerplay hardware manager.
+ * @param    table the address of MCRegTable
+ * @return   always 0
+ */
+static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+                                       struct tonga_mc_reg_table *table)
+{
+       uint8_t i, j, k;
+       uint32_t temp_reg;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       for (i = 0, j = table->last; i < table->last; i++) {
+               PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                       "Invalid VramInfo table.", return -EINVAL);
+
+               switch (table->mc_reg_address[i].s1) {
+
+               case mmMC_SEQ_MISC1:
+                       temp_reg = cgs_read_register(hwmgr->device,
+                                                       mmMC_PMG_CMD_EMRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       ((temp_reg & 0xffff0000)) |
+                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+                               if (!data->is_memory_gddr5)
+                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       if (!data->is_memory_gddr5) {
+                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+                               for (k = 0; k < table->num_entries; k++)
+                                       table->mc_reg_table_entry[k].mc_data[j] =
+                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+                               j++;
+                               PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                                       "Invalid VramInfo table.", return -EINVAL);
+                       }
+
+                       break;
+
+               case mmMC_SEQ_RESERVE_M:
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+                       break;
+
+               default:
+                       break;
+               }
+
+       }
+
+       table->last = j;
+
+       return 0;
+}
+
+static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
+{
+       uint8_t i, j;
+
+       for (i = 0; i < table->last; i++) {
+               for (j = 1; j < table->num_entries; j++) {
+                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+                               table->mc_reg_table_entry[j].mc_data[i]) {
+                               table->validflag |= (1<<i);
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+       pp_atomctrl_mc_reg_table *table;
+       struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+       uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
+
+       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+       if (table == NULL)
+               return -ENOMEM;
+
+       /* Program additional LP registers that are no longer programmed by VBIOS */
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
+                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
+                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
+                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+       if (!result)
+               result = tonga_copy_vbios_smc_reg_table(table, ni_table);
+
+       if (!result) {
+               tonga_set_s0_mc_reg_index(ni_table);
+               result = tonga_set_mc_special_registers(hwmgr, ni_table);
+       }
+
+       if (!result)
+               tonga_set_valid_flag(ni_table);
+
+       kfree(table);
+
+       return result;
+}
+
+bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+                       ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
new file mode 100644 (file)
index 0000000..8ae169f
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _TONGA_SMC_H
+#define _TONGA_SMC_H
+
+#include "smumgr.h"
+#include "smu72.h"
+
+
+#define ASICID_IS_TONGA_P(wDID, bRID)   \
+       (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
+       || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
+
+
+struct tonga_pt_defaults {
+       uint8_t   svi_load_line_en;
+       uint8_t   svi_load_line_vddC;
+       uint8_t   tdc_vddc_throttle_release_limit_perc;
+       uint8_t   tdc_mawt;
+       uint8_t   tdc_waterfall_ctl;
+       uint8_t   dte_ambient_temp_base;
+       uint32_t  display_cac;
+       uint32_t  bamp_temp_gradient;
+       uint16_t  bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+       uint16_t  bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+};
+
+int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
+int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
+uint32_t tonga_get_mac_definition(uint32_t value);
+int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
+int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
+#endif
+
index f42c536b3af10a58bb12b95fd6ecdd52a9e0b0b0..5f9124046b9b79d8fa7b763b3a3e41877571e859 100644 (file)
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
 #include "cgs_common.h"
+#include "tonga_smc.h"
+#include "smu7_smumgr.h"
 
-#define TONGA_SMC_SIZE                 0x20000
-#define BUFFER_SIZE                    80000
-#define MAX_STRING_SIZE                        15
-#define BUFFER_SIZETWO              131072 /*128 *1024*/
-
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smcAddress the address in the SMC RAM to access.
-*/
-static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr,
-                               uint32_t smcAddress, uint32_t limit)
-{
-       if (smumgr == NULL || smumgr->device == NULL)
-               return -EINVAL;
-       PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
-               "SMC address must be 4 byte aligned.",
-               return -1;);
-
-       PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
-               "SMC address is beyond the SMC RAM area.",
-               return -1;);
-
-       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
-       return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param    smumgr  the address of the powerplay SMU manager.
-* @param    smcStartAddress the start address in the SMC RAM to copy bytes to.
-* @param    src the byte array to copy the bytes from.
-* @param    byteCount the number of bytes to copy.
-*/
-int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
-               uint32_t smcStartAddress, const uint8_t *src,
-               uint32_t byteCount, uint32_t limit)
-{
-       uint32_t addr;
-       uint32_t data, orig_data;
-       int result = 0;
-       uint32_t extra_shift;
-
-       if (smumgr == NULL || smumgr->device == NULL)
-               return -EINVAL;
-       PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
-               "SMC address must be 4 byte aligned.",
-               return 0;);
-
-       PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
-               "SMC address is beyond the SMC RAM area.",
-               return 0;);
-
-       addr = smcStartAddress;
-
-       while (byteCount >= 4) {
-               /*
-                * Bytes are written into the
-                * SMC address space with the MSB first
-                */
-               data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
-               result = tonga_set_smc_sram_address(smumgr, addr, limit);
-
-               if (result)
-                       goto out;
-
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-
-               src += 4;
-               byteCount -= 4;
-               addr += 4;
-       }
-
-       if (0 != byteCount) {
-               /* Now write odd bytes left, do a read modify write cycle */
-               data = 0;
-
-               result = tonga_set_smc_sram_address(smumgr, addr, limit);
-               if (result)
-                       goto out;
-
-               orig_data = cgs_read_register(smumgr->device,
-                                                       mmSMC_IND_DATA_0);
-               extra_shift = 8 * (4 - byteCount);
-
-               while (byteCount > 0) {
-                       data = (data << 8) + *src++;
-                       byteCount--;
-               }
-
-               data <<= extra_shift;
-               data |= (orig_data & ~((~0UL) << extra_shift));
-
-               result = tonga_set_smc_sram_address(smumgr, addr, limit);
-               if (result)
-                       goto out;
-
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-       }
-
-out:
-       return result;
-}
-
-
-int tonga_program_jump_on_start(struct pp_smumgr *smumgr)
-{
-       static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
-
-       tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
-
-       return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-*/
-static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
-       return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
-                                       SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
-                       && (0x20100 <= cgs_read_ind_register(smumgr->device,
-                                       CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
-{
-       if (smumgr == NULL || smumgr->device == NULL)
-               return -EINVAL;
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       return 0;
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
-       if (smumgr == NULL || smumgr->device == NULL)
-               return -EINVAL;
-
-       if (!tonga_is_smc_ram_running(smumgr))
-               return -1;
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-       PP_ASSERT_WITH_CODE(
-               1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
-               "Failed to send Previous Message.",
-               );
-
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-       PP_ASSERT_WITH_CODE(
-               1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
-               "Failed to send Message.",
-               );
-
-       return 0;
-}
-
-/*
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_without_waiting
-                               (struct pp_smumgr *smumgr, uint16_t msg)
-{
-       if (smumgr == NULL || smumgr->device == NULL)
-               return -EINVAL;
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-       PP_ASSERT_WITH_CODE(
-               1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
-               "Failed to send Previous Message.",
-               );
-       cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-       return 0;
-}
-
-/*
-* Send a message to the SMC with parameter
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
-                               uint16_t msg, uint32_t parameter)
-{
-       if (smumgr == NULL || smumgr->device == NULL)
-               return -EINVAL;
-
-       if (!tonga_is_smc_ram_running(smumgr))
-               return PPSMC_Result_Failed;
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
-       return tonga_send_msg_to_smc(smumgr, msg);
-}
-
-/*
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
-                       struct pp_smumgr *smumgr,
-                       uint16_t msg, uint32_t parameter)
-{
-       if (smumgr == NULL || smumgr->device == NULL)
-               return -EINVAL;
-
-       SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-       cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
-       return tonga_send_msg_to_smc_without_waiting(smumgr, msg);
-}
-
-/*
- * Read a 32bit value from the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    smcAddress the address in the SMC RAM to access.
- * @param    value and output parameter for the data read from the SMC SRAM.
- */
-int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr,
-                                       uint32_t smcAddress, uint32_t *value,
-                                       uint32_t limit)
-{
-       int result;
-
-       result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
-
-       if (0 != result)
-               return result;
-
-       *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
-
-       return 0;
-}
-
-/*
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    smcAddress the address in the SMC RAM to access.
- * @param    value to write to the SMC SRAM.
- */
-int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
-                                       uint32_t smcAddress, uint32_t value,
-                                       uint32_t limit)
-{
-       int result;
-
-       result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
-
-       if (0 != result)
-               return result;
-
-       cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
-
-       return 0;
-}
-
-static int tonga_smu_fini(struct pp_smumgr *smumgr)
-{
-       struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
-
-       smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
-       smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
-
-       if (smumgr->backend != NULL) {
-               kfree(smumgr->backend);
-               smumgr->backend = NULL;
-       }
-
-       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
-       return 0;
-}
-
-static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type)
-{
-       enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
-       switch (fw_type) {
-       case UCODE_ID_SMU:
-               result = CGS_UCODE_ID_SMU;
-               break;
-       case UCODE_ID_SDMA0:
-               result = CGS_UCODE_ID_SDMA0;
-               break;
-       case UCODE_ID_SDMA1:
-               result = CGS_UCODE_ID_SDMA1;
-               break;
-       case UCODE_ID_CP_CE:
-               result = CGS_UCODE_ID_CP_CE;
-               break;
-       case UCODE_ID_CP_PFP:
-               result = CGS_UCODE_ID_CP_PFP;
-               break;
-       case UCODE_ID_CP_ME:
-               result = CGS_UCODE_ID_CP_ME;
-               break;
-       case UCODE_ID_CP_MEC:
-               result = CGS_UCODE_ID_CP_MEC;
-               break;
-       case UCODE_ID_CP_MEC_JT1:
-               result = CGS_UCODE_ID_CP_MEC_JT1;
-               break;
-       case UCODE_ID_CP_MEC_JT2:
-               result = CGS_UCODE_ID_CP_MEC_JT2;
-               break;
-       case UCODE_ID_RLC_G:
-               result = CGS_UCODE_ID_RLC_G;
-               break;
-       default:
-               break;
-       }
-
-       return result;
-}
-
-/**
- * Convert the PPIRI firmware type to SMU type mask.
- * For MEC, we need to check all MEC related type
-*/
-static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType)
-{
-       uint16_t result = 0;
-
-       switch (firmwareType) {
-       case UCODE_ID_SDMA0:
-               result = UCODE_ID_SDMA0_MASK;
-               break;
-       case UCODE_ID_SDMA1:
-               result = UCODE_ID_SDMA1_MASK;
-               break;
-       case UCODE_ID_CP_CE:
-               result = UCODE_ID_CP_CE_MASK;
-               break;
-       case UCODE_ID_CP_PFP:
-               result = UCODE_ID_CP_PFP_MASK;
-               break;
-       case UCODE_ID_CP_ME:
-               result = UCODE_ID_CP_ME_MASK;
-               break;
-       case UCODE_ID_CP_MEC:
-       case UCODE_ID_CP_MEC_JT1:
-       case UCODE_ID_CP_MEC_JT2:
-               result = UCODE_ID_CP_MEC_MASK;
-               break;
-       case UCODE_ID_RLC_G:
-               result = UCODE_ID_RLC_G_MASK;
-               break;
-       default:
-               break;
-       }
-
-       return result;
-}
-
-/**
- * Check if the FW has been loaded,
- * SMU will not return if loading has not finished.
-*/
-static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
-{
-       uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType);
-
-       if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
-                               SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) {
-               printk(KERN_ERR "[ powerplay ] check firmware loading failed\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/* Populate one firmware image to the data structure */
-static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr,
-                               uint16_t firmware_type,
-                               struct SMU_Entry *pentry)
-{
-       int result;
-       struct cgs_firmware_info info = {0};
-
-       result = cgs_get_firmware_info(
-                               smumgr->device,
-                               tonga_convert_fw_type_to_cgs(firmware_type),
-                               &info);
-
-       if (result == 0) {
-               pentry->version = 0;
-               pentry->id = (uint16_t)firmware_type;
-               pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
-               pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
-               pentry->meta_data_addr_high = 0;
-               pentry->meta_data_addr_low = 0;
-               pentry->data_size_byte = info.image_size;
-               pentry->num_register_entries = 0;
-
-               if (firmware_type == UCODE_ID_RLC_G)
-                       pentry->flags = 1;
-               else
-                       pentry->flags = 0;
-       } else {
-               return result;
-       }
-
-       return result;
-}
-
-static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
-{
-       struct tonga_smumgr *tonga_smu =
-               (struct tonga_smumgr *)(smumgr->backend);
-       uint16_t fw_to_load;
-       struct SMU_DRAMData_TOC *toc;
-       /**
-        * First time this gets called during SmuMgr init,
-        * we haven't processed SMU header file yet,
-        * so Soft Register Start offset is unknown.
-        * However, for this case, UcodeLoadStatus is already 0,
-        * so we can skip this if the Soft Registers Start offset is 0.
-        */
-       cgs_write_ind_register(smumgr->device,
-               CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0);
-
-       tonga_send_msg_to_smc_with_parameter(smumgr,
-               PPSMC_MSG_SMU_DRAM_ADDR_HI,
-               tonga_smu->smu_buffer.mc_addr_high);
-       tonga_send_msg_to_smc_with_parameter(smumgr,
-               PPSMC_MSG_SMU_DRAM_ADDR_LO,
-               tonga_smu->smu_buffer.mc_addr_low);
-
-       toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader;
-       toc->num_entries = 0;
-       toc->structure_version = 1;
-
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry(smumgr,
-               UCODE_ID_RLC_G,
-               &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n",
-               return -1);
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry(smumgr,
-               UCODE_ID_CP_CE,
-               &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n",
-               return -1);
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry
-               (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n", return -1);
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry
-               (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n", return -1);
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry
-               (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n", return -1);
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry
-               (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n", return -1);
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry
-               (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n", return -1);
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry
-               (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n", return -1);
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_populate_single_firmware_entry
-               (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
-               "Failed to Get Firmware Entry.\n", return -1);
-
-       tonga_send_msg_to_smc_with_parameter(smumgr,
-               PPSMC_MSG_DRV_DRAM_ADDR_HI,
-               tonga_smu->header_buffer.mc_addr_high);
-       tonga_send_msg_to_smc_with_parameter(smumgr,
-               PPSMC_MSG_DRV_DRAM_ADDR_LO,
-               tonga_smu->header_buffer.mc_addr_low);
-
-       fw_to_load = UCODE_ID_RLC_G_MASK
-                       + UCODE_ID_SDMA0_MASK
-                       + UCODE_ID_SDMA1_MASK
-                       + UCODE_ID_CP_CE_MASK
-                       + UCODE_ID_CP_ME_MASK
-                       + UCODE_ID_CP_PFP_MASK
-                       + UCODE_ID_CP_MEC_MASK;
-
-       PP_ASSERT_WITH_CODE(
-               0 == tonga_send_msg_to_smc_with_parameter_without_waiting(
-               smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
-               "Fail to Request SMU Load uCode", return 0);
-
-       return 0;
-}
-
-static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
-                               uint32_t firmwareType)
-{
-       return 0;
-}
-
-/**
- * Upload the SMC firmware to the SMC microcontroller.
- *
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    pFirmware the data structure containing the various sections of the firmware.
- */
-static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr)
-{
-       const uint8_t *src;
-       uint32_t byte_count;
-       uint32_t *data;
-       struct cgs_firmware_info info = {0};
-
-       if (smumgr == NULL || smumgr->device == NULL)
-               return -EINVAL;
-
-       cgs_get_firmware_info(smumgr->device,
-               tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-
-       if (info.image_size & 3) {
-               printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n");
-               return -EINVAL;
-       }
-
-       if (info.image_size > TONGA_SMC_SIZE) {
-               printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n");
-               return -EINVAL;
-       }
-
-       cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
-       byte_count = info.image_size;
-       src = (const uint8_t *)info.kptr;
-
-       data = (uint32_t *)src;
-       for (; byte_count >= 4; data++, byte_count -= 4)
-               cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
-
-       SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-
-       return 0;
-}
 
 static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
 {
@@ -623,7 +45,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
        SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
                SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-       result = tonga_smu_upload_firmware_image(smumgr);
+       result = smu7_upload_smu_firmware_image(smumgr);
        if (result)
                return result;
 
@@ -653,7 +75,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
        /**
         * Call Test SMU message with 0x20000 offset to trigger SMU start
         */
-       tonga_send_msg_to_smc_offset(smumgr);
+       smu7_send_msg_to_smc_offset(smumgr);
 
        /* Wait for done bit to be set */
        SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
@@ -690,13 +112,13 @@ static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
        SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
                SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-       result = tonga_smu_upload_firmware_image(smumgr);
+       result = smu7_upload_smu_firmware_image(smumgr);
 
        if (result != 0)
                return result;
 
        /* Set smc instruct start point at 0x0 */
-       tonga_program_jump_on_start(smumgr);
+       smu7_program_jump_on_start(smumgr);
 
 
        SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -718,7 +140,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
        int result;
 
        /* Only start SMC if SMC RAM is not running */
-       if (!tonga_is_smc_ram_running(smumgr)) {
+       if (!smu7_is_smc_ram_running(smumgr)) {
                /*Check if SMU is running in protected mode*/
                if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
                                        SMU_FIRMWARE, SMU_MODE)) {
@@ -732,7 +154,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
                }
        }
 
-       result = tonga_request_smu_reload_fw(smumgr);
+       result = smu7_request_smu_load_fw(smumgr);
 
        return result;
 }
@@ -746,67 +168,41 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
  */
 static int tonga_smu_init(struct pp_smumgr *smumgr)
 {
-       struct tonga_smumgr *tonga_smu;
-       uint8_t *internal_buf;
-       uint64_t mc_addr = 0;
-       /* Allocate memory for backend private data */
-       tonga_smu = (struct tonga_smumgr *)(smumgr->backend);
-       tonga_smu->header_buffer.data_size =
-               ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-       tonga_smu->smu_buffer.data_size = 200*4096;
-
-       smu_allocate_memory(smumgr->device,
-               tonga_smu->header_buffer.data_size,
-               CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-               PAGE_SIZE,
-               &mc_addr,
-               &tonga_smu->header_buffer.kaddr,
-               &tonga_smu->header_buffer.handle);
-
-       tonga_smu->pHeader = tonga_smu->header_buffer.kaddr;
-       tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-       tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-       PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader),
-               "Out of memory.",
-               kfree(smumgr->backend);
-               cgs_free_gpu_mem(smumgr->device,
-               (cgs_handle_t)tonga_smu->header_buffer.handle);
-               return -1);
-
-       smu_allocate_memory(smumgr->device,
-               tonga_smu->smu_buffer.data_size,
-               CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-               PAGE_SIZE,
-               &mc_addr,
-               &tonga_smu->smu_buffer.kaddr,
-               &tonga_smu->smu_buffer.handle);
-
-       internal_buf = tonga_smu->smu_buffer.kaddr;
-       tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-       tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-       PP_ASSERT_WITH_CODE((NULL != internal_buf),
-               "Out of memory.",
-               kfree(smumgr->backend);
-               cgs_free_gpu_mem(smumgr->device,
-               (cgs_handle_t)tonga_smu->smu_buffer.handle);
-               return -1;);
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+
+       int i;
+
+       if (smu7_init(smumgr))
+               return -EINVAL;
+
+       for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
+               smu_data->activity_target[i] = 30;
 
        return 0;
 }
 
 static const struct pp_smumgr_func tonga_smu_funcs = {
        .smu_init = &tonga_smu_init,
-       .smu_fini = &tonga_smu_fini,
+       .smu_fini = &smu7_smu_fini,
        .start_smu = &tonga_start_smu,
-       .check_fw_load_finish = &tonga_check_fw_load_finish,
-       .request_smu_load_fw = &tonga_request_smu_reload_fw,
-       .request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw,
-       .send_msg_to_smc = &tonga_send_msg_to_smc,
-       .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter,
+       .check_fw_load_finish = &smu7_check_fw_load_finish,
+       .request_smu_load_fw = &smu7_request_smu_load_fw,
+       .request_smu_load_specific_fw = NULL,
+       .send_msg_to_smc = &smu7_send_msg_to_smc,
+       .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
+       .update_smc_table = tonga_update_smc_table,
+       .get_offsetof = tonga_get_offsetof,
+       .process_firmware_header = tonga_process_firmware_header,
+       .init_smc_table = tonga_init_smc_table,
+       .update_sclk_threshold = tonga_update_sclk_threshold,
+       .thermal_setup_fan_table = tonga_thermal_setup_fan_table,
+       .populate_all_graphic_levels = tonga_populate_all_graphic_levels,
+       .populate_all_memory_levels = tonga_populate_all_memory_levels,
+       .get_mac_definition = tonga_get_mac_definition,
+       .initialize_mc_reg_table = tonga_initialize_mc_reg_table,
+       .is_dpm_running = tonga_is_dpm_running,
 };
 
 int tonga_smum_init(struct pp_smumgr *smumgr)
index 33c788d7f05cc7e990bb6ebfe90770d2a24ccd3b..edb5f203f7f55ab2117251c0ad977d44563cbef7 100644 (file)
 #ifndef _TONGA_SMUMGR_H_
 #define _TONGA_SMUMGR_H_
 
-struct tonga_buffer_entry {
-       uint32_t data_size;
-       uint32_t mc_addr_low;
-       uint32_t mc_addr_high;
-       void *kaddr;
-       unsigned long  handle;
+#include "smu72_discrete.h"
+
+#include "smu7_smumgr.h"
+
+struct tonga_mc_reg_entry {
+       uint32_t mclk_max;
+       uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct tonga_mc_reg_table {
+       uint8_t   last;               /* number of registers*/
+       uint8_t   num_entries;        /* number of entries in mc_reg_table_entry used*/
+       uint16_t  validflag;          /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+       struct tonga_mc_reg_entry    mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+       SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
 };
 
+
 struct tonga_smumgr {
-       uint8_t *pHeader;
-       uint8_t *pMecImage;
-       uint32_t ulSoftRegsStart;
 
-       struct tonga_buffer_entry header_buffer;
-       struct tonga_buffer_entry smu_buffer;
-};
+       struct smu7_smumgr                   smu7_data;
+       struct SMU72_Discrete_DpmTable       smc_state_table;
+       struct SMU72_Discrete_Ulv            ulv_setting;
+       struct SMU72_Discrete_PmFuses  power_tune_table;
+       struct tonga_pt_defaults  *power_tune_defaults;
+       SMU72_Discrete_MCRegisters      mc_regs;
+       struct tonga_mc_reg_table mc_reg_table;
 
-extern int tonga_smum_init(struct pp_smumgr *smumgr);
-extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
-               uint32_t smcStartAddress, const uint8_t *src,
-               uint32_t byteCount, uint32_t limit);
-extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
-               uint32_t *value, uint32_t limit);
-extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
-               uint32_t value, uint32_t limit);
+       uint32_t        activity_target[SMU72_MAX_LEVELS_GRAPHICS];
+
+};
 
 #endif
index ef312bb75fda094b1f51bac52ce391d459341ca0..963a24d46a93d336e2d52bf1f4c7d045c6c2f57e 100644 (file)
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
        spin_lock(&sched->job_list_lock);
        s_job = list_first_entry_or_null(&sched->ring_mirror_list,
                                         struct amd_sched_job, node);
-       if (s_job)
+       if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
                schedule_delayed_work(&s_job->work_tdr, sched->timeout);
 
        list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
index ee0a61c2861b5a33a56c09627db566eb7b1c6a8a..7130b044b004a87cab8af76c0a798c3bcb4e0c0e 100644 (file)
@@ -183,8 +183,6 @@ static void arc_pgu_plane_atomic_update(struct drm_plane *plane,
 }
 
 static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
-       .prepare_fb = NULL,
-       .cleanup_fb = NULL,
        .atomic_update = arc_pgu_plane_atomic_update,
 };
 
index 6d4ff34737cb73746ce37dd7b4c00293d74c1283..28e6471257d0d799d020f7e1dac47007760fcee9 100644 (file)
@@ -198,8 +198,8 @@ static int arcpgu_probe(struct platform_device *pdev)
        int ret;
 
        drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        ret = arcpgu_load(drm);
        if (ret)
index d83b46a3032765c85fc8b00534ac550720002c8f..fb6a418ce6be611176e26bf5fa699503178e8364 100644 (file)
@@ -326,8 +326,8 @@ static int hdlcd_drm_bind(struct device *dev)
                return -ENOMEM;
 
        drm = drm_dev_alloc(&hdlcd_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        drm->dev_private = hdlcd;
        dev_set_drvdata(dev, drm);
index 82171d223f2d4f469ebfeac54aeba04cab4b015a..9280358b8f15219da56f1572c53e53272d27dc59 100644 (file)
@@ -91,7 +91,8 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
 
        drm_atomic_helper_commit_modeset_disables(drm, state);
        drm_atomic_helper_commit_modeset_enables(drm, state);
-       drm_atomic_helper_commit_planes(drm, state, true);
+       drm_atomic_helper_commit_planes(drm, state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
        malidp_atomic_commit_hw_done(state);
 
@@ -310,8 +311,8 @@ static int malidp_bind(struct device *dev)
                return ret;
 
        drm = drm_dev_alloc(&malidp_driver, dev);
-       if (!drm) {
-               ret = -ENOMEM;
+       if (IS_ERR(drm)) {
+               ret = PTR_ERR(drm);
                goto alloc_fail;
        }
 
index 95558fde214bf089b51cbaf4fd74a74e5c5b5536..271d2fb9711cf8b467a459c690be77280107aa37 100644 (file)
@@ -49,6 +49,6 @@ void malidp_de_planes_destroy(struct drm_device *drm);
 int malidp_crtc_init(struct drm_device *drm);
 
 /* often used combination of rotational bits */
-#define MALIDP_ROTATED_MASK    (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))
+#define MALIDP_ROTATED_MASK    (DRM_ROTATE_90 | DRM_ROTATE_270)
 
 #endif  /* __MALIDP_DRV_H__ */
index 725098d6179af73277500abeb2da6bcbaba6b21d..82c193e5e0d61654f5e28fcc961d0b74a31863a2 100644 (file)
@@ -108,7 +108,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
                return -EINVAL;
 
        /* packed RGB888 / BGR888 can't be rotated or flipped */
-       if (state->rotation != BIT(DRM_ROTATE_0) &&
+       if (state->rotation != DRM_ROTATE_0 &&
            (state->fb->pixel_format == DRM_FORMAT_RGB888 ||
             state->fb->pixel_format == DRM_FORMAT_BGR888))
                return -EINVAL;
@@ -188,9 +188,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
        /* setup the rotation and axis flip bits */
        if (plane->state->rotation & DRM_ROTATE_MASK)
                val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
-       if (plane->state->rotation & BIT(DRM_REFLECT_X))
+       if (plane->state->rotation & DRM_REFLECT_X)
                val |= LAYER_V_FLIP;
-       if (plane->state->rotation & BIT(DRM_REFLECT_Y))
+       if (plane->state->rotation & DRM_REFLECT_Y)
                val |= LAYER_H_FLIP;
 
        /* set the 'enable layer' bit */
@@ -255,12 +255,12 @@ int malidp_de_planes_init(struct drm_device *drm)
                        goto cleanup;
 
                if (!drm->mode_config.rotation_property) {
-                       unsigned long flags = BIT(DRM_ROTATE_0) |
-                                             BIT(DRM_ROTATE_90) |
-                                             BIT(DRM_ROTATE_180) |
-                                             BIT(DRM_ROTATE_270) |
-                                             BIT(DRM_REFLECT_X) |
-                                             BIT(DRM_REFLECT_Y);
+                       unsigned long flags = DRM_ROTATE_0 |
+                                             DRM_ROTATE_90 |
+                                             DRM_ROTATE_180 |
+                                             DRM_ROTATE_270 |
+                                             DRM_REFLECT_X |
+                                             DRM_REFLECT_Y;
                        drm->mode_config.rotation_property =
                                drm_mode_create_rotation_property(drm, flags);
                }
@@ -268,7 +268,7 @@ int malidp_de_planes_init(struct drm_device *drm)
                if (drm->mode_config.rotation_property && (id != DE_SMART))
                        drm_object_attach_property(&plane->base.base,
                                                   drm->mode_config.rotation_property,
-                                                  BIT(DRM_ROTATE_0));
+                                                  DRM_ROTATE_0);
 
                drm_plane_helper_add(&plane->base,
                                     &malidp_de_plane_helper_funcs);
index f5ebdd68144579c12dea0c8342ce8dd24e89e2e3..1e0e68f608e460ace90ad6a80077e2b5154b03e2 100644 (file)
@@ -211,7 +211,7 @@ static struct drm_driver armada_drm_driver = {
        .desc                   = "Armada SoC DRM",
        .date                   = "20120730",
        .driver_features        = DRIVER_GEM | DRIVER_MODESET |
-                                 DRIVER_HAVE_IRQ | DRIVER_PRIME,
+                                 DRIVER_PRIME,
        .ioctls                 = armada_ioctls,
        .fops                   = &armada_drm_fops,
 };
index 7d03c51abcb9d6e3dfc9a277be5ad4141c21606b..ca73ad8614fe6ee30f2b86e0bb32c80c816e5c95 100644 (file)
@@ -7,7 +7,6 @@
  * published by the Free Software Foundation.
  */
 #include <linux/errno.h>
-#include <linux/fb.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
index 1ee707ef6b8d0d0b51033903dbe3a189de700f80..152b4e716269c4a2482d697fddb1398c0ff811f6 100644 (file)
@@ -121,7 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        int ret;
 
        ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
-                                           BIT(DRM_ROTATE_0),
+                                           DRM_ROTATE_0,
                                            0, INT_MAX, true, false, &visible);
        if (ret)
                return ret;
index c017a9330a18991cbd05eaa9be7cfeef557de98e..7a86e24e268750aa82e75f3f784594af5170485d 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/tty.h>
 #include <linux/sysrq.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 
 
index b29a41218fc92d8c6c6cfe7157b061e69f29fb24..608df4c90520278e59bfe75d3c3348d66af51e6c 100644 (file)
@@ -150,7 +150,8 @@ static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct ast_bo *astbo = ast_bo(bo);
 
-       return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&astbo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
index a978381ef95bfb3e6562ba34bccf26f0b2208ff4..9b17a66cf0e16245a50360d22d80a44e8602a94f 100644 (file)
@@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
        atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
 }
 
-void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
 {
        struct atmel_hlcdc_crtc_state *state;
 
index d4a3d61b7b06edaa869b03a17e1da51a76580277..5f484310bee99c4ba5bbd986f47223893756db77 100644 (file)
@@ -457,7 +457,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
 
        /* Apply the atomic update. */
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
-       drm_atomic_helper_commit_planes(dev, old_state, false);
+       drm_atomic_helper_commit_planes(dev, old_state, 0);
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
        drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -797,8 +797,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
        int ret;
 
        ddev = drm_dev_alloc(&atmel_hlcdc_dc_driver, &pdev->dev);
-       if (!ddev)
-               return -ENOMEM;
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
 
        ret = atmel_hlcdc_dc_load(ddev);
        if (ret)
index 016c191221f35db2246a3e014b8a4f7294d22649..9d4c030672f0ff8ae8a830e63afe26a7948707c0 100644 (file)
@@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
                        u32 *coeff_tab = heo_upscaling_ycoef;
                        u32 max_memsize;
 
-                       if (state->crtc_w < state->src_w)
+                       if (state->crtc_h < state->src_h)
                                coeff_tab = heo_downscaling_ycoef;
                        for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
                                atmel_hlcdc_layer_update_cfg(&plane->layer,
                                                             33 + i,
                                                             0xffffffff,
                                                             coeff_tab[i]);
-                       factor = ((8 * 256 * state->src_w) - (256 * 4)) /
-                                state->crtc_w;
+                       factor = ((8 * 256 * state->src_h) - (256 * 4)) /
+                                state->crtc_h;
                        factor++;
-                       max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
+                       max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
                                      2048;
-                       if (max_memsize > state->src_w)
+                       if (max_memsize > state->src_h)
                                factor--;
                        factor_reg |= (factor << 16) | 0x80000000;
                }
@@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
 
        if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
             state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
-           (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
+           (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
                cfg |= ATMEL_HLCDC_YUV422ROT;
 
        atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
        /*
         * Swap width and size in case of 90 or 270 degrees rotation
         */
-       if (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+       if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
                tmp = state->crtc_w;
                state->crtc_w = state->crtc_h;
                state->crtc_h = tmp;
@@ -677,7 +677,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
                        return -EINVAL;
 
                switch (state->base.rotation & DRM_ROTATE_MASK) {
-               case BIT(DRM_ROTATE_90):
+               case DRM_ROTATE_90:
                        offset = ((y_offset + state->src_y + patched_src_w - 1) /
                                  ydiv) * fb->pitches[i];
                        offset += ((x_offset + state->src_x) / xdiv) *
@@ -686,7 +686,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
                                          fb->pitches[i];
                        state->pstride[i] = -fb->pitches[i] - state->bpp[i];
                        break;
-               case BIT(DRM_ROTATE_180):
+               case DRM_ROTATE_180:
                        offset = ((y_offset + state->src_y + patched_src_h - 1) /
                                  ydiv) * fb->pitches[i];
                        offset += ((x_offset + state->src_x + patched_src_w - 1) /
@@ -695,7 +695,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
                                           state->bpp[i]) - fb->pitches[i];
                        state->pstride[i] = -2 * state->bpp[i];
                        break;
-               case BIT(DRM_ROTATE_270):
+               case DRM_ROTATE_270:
                        offset = ((y_offset + state->src_y) / ydiv) *
                                 fb->pitches[i];
                        offset += ((x_offset + state->src_x + patched_src_h - 1) /
@@ -705,7 +705,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
                                          (2 * state->bpp[i]);
                        state->pstride[i] = fb->pitches[i] - state->bpp[i];
                        break;
-               case BIT(DRM_ROTATE_0):
+               case DRM_ROTATE_0:
                default:
                        offset = ((y_offset + state->src_y) / ydiv) *
                                 fb->pitches[i];
@@ -755,7 +755,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
 }
 
 static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
-                                       const struct drm_plane_state *new_state)
+                                       struct drm_plane_state *new_state)
 {
        /*
         * FIXME: we should avoid this const -> non-const cast but it's
@@ -780,7 +780,7 @@ static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
 }
 
 static void atmel_hlcdc_plane_cleanup_fb(struct drm_plane *p,
-                               const struct drm_plane_state *old_state)
+                                        struct drm_plane_state *old_state)
 {
        /*
         * FIXME: we should avoid this const -> non-const cast but it's
@@ -905,7 +905,7 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
        if (desc->layout.xstride && desc->layout.pstride)
                drm_object_attach_property(&plane->base.base,
                                plane->base.dev->mode_config.rotation_property,
-                               BIT(DRM_ROTATE_0));
+                               DRM_ROTATE_0);
 
        if (desc->layout.csc) {
                /*
@@ -1056,10 +1056,10 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
 
        dev->mode_config.rotation_property =
                        drm_mode_create_rotation_property(dev,
-                                                         BIT(DRM_ROTATE_0) |
-                                                         BIT(DRM_ROTATE_90) |
-                                                         BIT(DRM_ROTATE_180) |
-                                                         BIT(DRM_ROTATE_270));
+                                                         DRM_ROTATE_0 |
+                                                         DRM_ROTATE_90 |
+                                                         DRM_ROTATE_180 |
+                                                         DRM_ROTATE_270);
        if (!dev->mode_config.rotation_property)
                return ERR_PTR(-ENOMEM);
 
index 19b5adaebe24753d4fe8453eb57f866f788132bb..32dfe418cc983d6d595321932f838473eb2c6066 100644 (file)
@@ -1,5 +1,4 @@
 #include <linux/io.h>
-#include <linux/fb.h>
 #include <linux/console.h>
 
 #include <drm/drmP.h>
index abace82de6eac2de22b747ff54a5612443f69647..534227df23f355722e3ed6d1479bd60e12f75732 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <drm/drm_fb_helper.h>
 
 #include "bochs.h"
 
@@ -153,7 +154,7 @@ static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
 
        ap->ranges[0].base = pci_resource_start(pdev, 0);
        ap->ranges[0].size = pci_resource_len(pdev, 0);
-       remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
        kfree(ap);
 
        return 0;
@@ -162,8 +163,15 @@ static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
 static int bochs_pci_probe(struct pci_dev *pdev,
                           const struct pci_device_id *ent)
 {
+       unsigned long fbsize;
        int ret;
 
+       fbsize = pci_resource_len(pdev, 0);
+       if (fbsize < 4 * 1024 * 1024) {
+               DRM_ERROR("less than 4 MB video memory, ignoring device\n");
+               return -ENOMEM;
+       }
+
        ret = bochs_kick_out_firmware_fb(pdev);
        if (ret)
                return ret;
index 207a2cbcc1133c829dd1b2c28138bab82fbf216d..0b4e5d11704335888c0d6818a44d486675ee97ba 100644 (file)
@@ -178,7 +178,7 @@ static void bochs_encoder_init(struct drm_device *dev)
 }
 
 
-int bochs_connector_get_modes(struct drm_connector *connector)
+static int bochs_connector_get_modes(struct drm_connector *connector)
 {
        int count;
 
index 5c5638a777a12ad12c1e0fdc8d9627d7d78ea50b..269cfca9ca06834dad2fad7359c3df13f4d97c09 100644 (file)
@@ -128,7 +128,8 @@ static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
 {
        struct bochs_bo *bochsbo = bochs_bo(bo);
 
-       return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&bochsbo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
index ec8fb2ed3275b229920da1fe417e4a7c4d54e9d1..8ed3906dd411ecedc49b9010eeaa8ac9fa9914fc 100644 (file)
@@ -922,15 +922,13 @@ static int adv7511_parse_dt(struct device_node *np,
        return 0;
 }
 
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
 static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 {
        struct adv7511_link_config link_config;
        struct adv7511 *adv7511;
        struct device *dev = &i2c->dev;
+       unsigned int main_i2c_addr = i2c->addr << 1;
+       unsigned int edid_i2c_addr = main_i2c_addr + 4;
        unsigned int val;
        int ret;
 
@@ -991,8 +989,10 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 
        regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
        regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
-                    packet_i2c_addr);
-       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+                    main_i2c_addr - 0xa);
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
+                    main_i2c_addr - 2);
+
        adv7511_packet_disable(adv7511, 0xffff);
 
        adv7511->i2c_main = i2c;
index 5eebd15899b112657cf4dd820fb78519dad0130d..d7f7b7ce8ebe271c81eb5b35491f764fe3119b88 100644 (file)
@@ -149,13 +149,12 @@ void adv7533_uninit_cec(struct adv7511 *adv)
        i2c_unregister_device(adv->i2c_cec);
 }
 
-static const int cec_i2c_addr = 0x78;
-
 int adv7533_init_cec(struct adv7511 *adv)
 {
        int ret;
 
-       adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+       adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
+                                    adv->i2c_main->addr - 1);
        if (!adv->i2c_cec)
                return -ENOMEM;
 
index 32715daf73cb70b491c808ef480be679d73db76b..0f2e423106942456655e171f6356b0094ad0634c 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/bridge/analogix_dp.h>
 
 #include "analogix_dp_core.h"
+#include "analogix_dp_reg.h"
 
 #define to_dp(nm)      container_of(nm, struct analogix_dp_device, nm)
 
@@ -97,133 +98,81 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
        return 0;
 }
 
-static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data)
+int analogix_dp_enable_psr(struct device *dev)
 {
-       int i;
-       unsigned char sum = 0;
+       struct analogix_dp_device *dp = dev_get_drvdata(dev);
+       struct edp_vsc_psr psr_vsc;
+
+       if (!dp->psr_support)
+               return -EINVAL;
 
-       for (i = 0; i < EDID_BLOCK_LENGTH; i++)
-               sum = sum + edid_data[i];
+       /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+       memset(&psr_vsc, 0, sizeof(psr_vsc));
+       psr_vsc.sdp_header.HB0 = 0;
+       psr_vsc.sdp_header.HB1 = 0x7;
+       psr_vsc.sdp_header.HB2 = 0x2;
+       psr_vsc.sdp_header.HB3 = 0x8;
 
-       return sum;
+       psr_vsc.DB0 = 0;
+       psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
+
+       analogix_dp_send_psr_spd(dp, &psr_vsc);
+       return 0;
 }
+EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
 
-static int analogix_dp_read_edid(struct analogix_dp_device *dp)
+int analogix_dp_disable_psr(struct device *dev)
 {
-       unsigned char *edid = dp->edid;
-       unsigned int extend_block = 0;
-       unsigned char sum;
-       unsigned char test_vector;
-       int retval;
+       struct analogix_dp_device *dp = dev_get_drvdata(dev);
+       struct edp_vsc_psr psr_vsc;
 
-       /*
-        * EDID device address is 0x50.
-        * However, if necessary, you must have set upper address
-        * into E-EDID in I2C device, 0x30.
-        */
+       if (!dp->psr_support)
+               return -EINVAL;
 
-       /* Read Extension Flag, Number of 128-byte EDID extension blocks */
-       retval = analogix_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
-                                               EDID_EXTENSION_FLAG,
-                                               &extend_block);
-       if (retval)
-               return retval;
+       /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+       memset(&psr_vsc, 0, sizeof(psr_vsc));
+       psr_vsc.sdp_header.HB0 = 0;
+       psr_vsc.sdp_header.HB1 = 0x7;
+       psr_vsc.sdp_header.HB2 = 0x2;
+       psr_vsc.sdp_header.HB3 = 0x8;
 
-       if (extend_block > 0) {
-               dev_dbg(dp->dev, "EDID data includes a single extension!\n");
-
-               /* Read EDID data */
-               retval = analogix_dp_read_bytes_from_i2c(dp,
-                                               I2C_EDID_DEVICE_ADDR,
-                                               EDID_HEADER_PATTERN,
-                                               EDID_BLOCK_LENGTH,
-                                               &edid[EDID_HEADER_PATTERN]);
-               if (retval != 0) {
-                       dev_err(dp->dev, "EDID Read failed!\n");
-                       return -EIO;
-               }
-               sum = analogix_dp_calc_edid_check_sum(edid);
-               if (sum != 0) {
-                       dev_err(dp->dev, "EDID bad checksum!\n");
-                       return -EIO;
-               }
+       psr_vsc.DB0 = 0;
+       psr_vsc.DB1 = 0;
 
-               /* Read additional EDID data */
-               retval = analogix_dp_read_bytes_from_i2c(dp,
-                               I2C_EDID_DEVICE_ADDR,
-                               EDID_BLOCK_LENGTH,
-                               EDID_BLOCK_LENGTH,
-                               &edid[EDID_BLOCK_LENGTH]);
-               if (retval != 0) {
-                       dev_err(dp->dev, "EDID Read failed!\n");
-                       return -EIO;
-               }
-               sum = analogix_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
-               if (sum != 0) {
-                       dev_err(dp->dev, "EDID bad checksum!\n");
-                       return -EIO;
-               }
+       analogix_dp_send_psr_spd(dp, &psr_vsc);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
 
-               analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
-                                               &test_vector);
-               if (test_vector & DP_TEST_LINK_EDID_READ) {
-                       analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TEST_EDID_CHECKSUM,
-                               edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
-                       analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TEST_RESPONSE,
-                               DP_TEST_EDID_CHECKSUM_WRITE);
-               }
-       } else {
-               dev_info(dp->dev, "EDID data does not include any extensions.\n");
-
-               /* Read EDID data */
-               retval = analogix_dp_read_bytes_from_i2c(dp,
-                               I2C_EDID_DEVICE_ADDR, EDID_HEADER_PATTERN,
-                               EDID_BLOCK_LENGTH, &edid[EDID_HEADER_PATTERN]);
-               if (retval != 0) {
-                       dev_err(dp->dev, "EDID Read failed!\n");
-                       return -EIO;
-               }
-               sum = analogix_dp_calc_edid_check_sum(edid);
-               if (sum != 0) {
-                       dev_err(dp->dev, "EDID bad checksum!\n");
-                       return -EIO;
-               }
+static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
+{
+       unsigned char psr_version;
 
-               analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
-                                               &test_vector);
-               if (test_vector & DP_TEST_LINK_EDID_READ) {
-                       analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TEST_EDID_CHECKSUM, edid[EDID_CHECKSUM]);
-                       analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TEST_RESPONSE, DP_TEST_EDID_CHECKSUM_WRITE);
-               }
-       }
+       drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
+       dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
 
-       dev_dbg(dp->dev, "EDID Read success!\n");
-       return 0;
+       return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
 }
 
-static int analogix_dp_handle_edid(struct analogix_dp_device *dp)
+static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
 {
-       u8 buf[12];
-       int i;
-       int retval;
+       unsigned char psr_en;
 
-       /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
-       retval = analogix_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, 12, buf);
-       if (retval)
-               return retval;
+       /* Disable psr function */
+       drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
+       psr_en &= ~DP_PSR_ENABLE;
+       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
 
-       /* Read EDID */
-       for (i = 0; i < 3; i++) {
-               retval = analogix_dp_read_edid(dp);
-               if (!retval)
-                       break;
-       }
+       /* Main-Link transmitter remains active during PSR active states */
+       psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
+       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
 
-       return retval;
+       /* Enable psr function */
+       psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
+                DP_PSR_CRC_VERIFICATION;
+       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+
+       analogix_dp_enable_psr_crc(dp);
 }
 
 static void
@@ -232,15 +181,15 @@ analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp,
 {
        u8 data;
 
-       analogix_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
+       drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
 
        if (enable)
-               analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
-                                              DP_LANE_COUNT_ENHANCED_FRAME_EN |
-                                              DPCD_LANE_COUNT_SET(data));
+               drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+                                  DP_LANE_COUNT_ENHANCED_FRAME_EN |
+                                       DPCD_LANE_COUNT_SET(data));
        else
-               analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
-                                              DPCD_LANE_COUNT_SET(data));
+               drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+                                  DPCD_LANE_COUNT_SET(data));
 }
 
 static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
@@ -248,7 +197,7 @@ static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
        u8 data;
        int retval;
 
-       analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+       drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
        retval = DPCD_ENHANCED_FRAME_CAP(data);
 
        return retval;
@@ -267,8 +216,8 @@ static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
 {
        analogix_dp_set_training_pattern(dp, DP_NONE);
 
-       analogix_dp_write_byte_to_dpcd(dp, DP_TRAINING_PATTERN_SET,
-                                      DP_TRAINING_PATTERN_DISABLE);
+       drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                          DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -313,8 +262,8 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
        /* Setup RX configuration */
        buf[0] = dp->link_train.link_rate;
        buf[1] = dp->link_train.lane_count;
-       retval = analogix_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, 2, buf);
-       if (retval)
+       retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2);
+       if (retval < 0)
                return retval;
 
        /* Set TX pre-emphasis to minimum */
@@ -338,20 +287,22 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
        analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
 
        /* Set RX training pattern */
-       retval = analogix_dp_write_byte_to_dpcd(dp,
-                       DP_TRAINING_PATTERN_SET,
-                       DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
-       if (retval)
+       retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                   DP_LINK_SCRAMBLING_DISABLE |
+                                       DP_TRAINING_PATTERN_1);
+       if (retval < 0)
                return retval;
 
        for (lane = 0; lane < lane_count; lane++)
                buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
                            DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
 
-       retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
-                                                lane_count, buf);
+       retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf,
+                                  lane_count);
+       if (retval < 0)
+               return retval;
 
-       return retval;
+       return 0;
 }
 
 static unsigned char analogix_dp_get_lane_status(u8 link_status[2], int lane)
@@ -503,25 +454,23 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
 
        lane_count = dp->link_train.lane_count;
 
-       retval =  analogix_dp_read_bytes_from_dpcd(dp,
-                       DP_LANE0_1_STATUS, 2, link_status);
-       if (retval)
+       retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2);
+       if (retval < 0)
                return retval;
 
-       retval =  analogix_dp_read_bytes_from_dpcd(dp,
-                       DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
-       if (retval)
+       retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+                                 adjust_request, 2);
+       if (retval < 0)
                return retval;
 
        if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) {
                /* set training pattern 2 for EQ */
                analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
 
-               retval = analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TRAINING_PATTERN_SET,
-                               DP_LINK_SCRAMBLING_DISABLE |
-                               DP_TRAINING_PATTERN_2);
-               if (retval)
+               retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                           DP_LINK_SCRAMBLING_DISABLE |
+                                               DP_TRAINING_PATTERN_2);
+               if (retval < 0)
                        return retval;
 
                dev_info(dp->dev, "Link Training Clock Recovery success\n");
@@ -559,13 +508,12 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
                analogix_dp_set_lane_link_training(dp,
                        dp->link_train.training_lane[lane], lane);
 
-       retval = analogix_dp_write_bytes_to_dpcd(dp,
-                       DP_TRAINING_LANE0_SET, lane_count,
-                       dp->link_train.training_lane);
-       if (retval)
+       retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+                                  dp->link_train.training_lane, lane_count);
+       if (retval < 0)
                return retval;
 
-       return retval;
+       return 0;
 }
 
 static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
@@ -578,9 +526,8 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 
        lane_count = dp->link_train.lane_count;
 
-       retval = analogix_dp_read_bytes_from_dpcd(dp,
-                       DP_LANE0_1_STATUS, 2, link_status);
-       if (retval)
+       retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2);
+       if (retval < 0)
                return retval;
 
        if (analogix_dp_clock_recovery_ok(link_status, lane_count)) {
@@ -588,14 +535,14 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
                return -EIO;
        }
 
-       retval = analogix_dp_read_bytes_from_dpcd(dp,
-                       DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
-       if (retval)
+       retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+                                 adjust_request, 2);
+       if (retval < 0)
                return retval;
 
-       retval = analogix_dp_read_byte_from_dpcd(dp,
-                       DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
-       if (retval)
+       retval = drm_dp_dpcd_readb(&dp->aux, DP_LANE_ALIGN_STATUS_UPDATED,
+                                  &link_align);
+       if (retval < 0)
                return retval;
 
        analogix_dp_get_adjust_training_lane(dp, adjust_request);
@@ -636,10 +583,12 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
                analogix_dp_set_lane_link_training(dp,
                        dp->link_train.training_lane[lane], lane);
 
-       retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
-                       lane_count, dp->link_train.training_lane);
+       retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+                                  dp->link_train.training_lane, lane_count);
+       if (retval < 0)
+               return retval;
 
-       return retval;
+       return 0;
 }
 
 static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
@@ -653,7 +602,7 @@ static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
         * For DP rev.1.2, Maximum link rate of Main Link lanes
         * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps
         */
-       analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
+       drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data);
        *bandwidth = data;
 }
 
@@ -666,7 +615,7 @@ static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
         * For DP rev.1.1, Maximum number of Main Link lanes
         * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
         */
-       analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+       drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
        *lane_count = DPCD_MAX_LANE_COUNT(data);
 }
 
@@ -835,19 +784,15 @@ static void analogix_dp_enable_scramble(struct analogix_dp_device *dp,
        if (enable) {
                analogix_dp_enable_scrambling(dp);
 
-               analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
-                                               &data);
-               analogix_dp_write_byte_to_dpcd(dp,
-                       DP_TRAINING_PATTERN_SET,
-                       (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
+               drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
+               drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                  (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
        } else {
                analogix_dp_disable_scrambling(dp);
 
-               analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
-                                               &data);
-               analogix_dp_write_byte_to_dpcd(dp,
-                       DP_TRAINING_PATTERN_SET,
-                       (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
+               drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
+               drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                  (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
        }
 }
 
@@ -921,21 +866,85 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
 
        /* Enable video */
        analogix_dp_start_video(dp);
+
+       dp->psr_support = analogix_dp_detect_sink_psr(dp);
+       if (dp->psr_support)
+               analogix_dp_enable_sink_psr(dp);
 }
 
-int analogix_dp_get_modes(struct drm_connector *connector)
+/*
+ * This function is a bit of a catch-all for panel preparation, hopefully
+ * simplifying the logic of functions that need to prepare/unprepare the panel
+ * below.
+ *
+ * If @prepare is true, this function will prepare the panel. Conversely, if it
+ * is false, the panel will be unprepared.
+ *
+ * If @is_modeset_prepare is true, the function will disregard the current state
+ * of the panel and either prepare/unprepare the panel based on @prepare. Once
+ * it finishes, it will update dp->panel_is_modeset to reflect the current state
+ * of the panel.
+ */
+static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
+                                    bool prepare, bool is_modeset_prepare)
 {
-       struct analogix_dp_device *dp = to_dp(connector);
-       struct edid *edid = (struct edid *)dp->edid;
-       int num_modes = 0;
+       int ret = 0;
 
-       if (analogix_dp_handle_edid(dp) == 0) {
-               drm_mode_connector_update_edid_property(&dp->connector, edid);
-               num_modes += drm_add_edid_modes(&dp->connector, edid);
-       }
+       if (!dp->plat_data->panel)
+               return 0;
 
-       if (dp->plat_data->panel)
+       mutex_lock(&dp->panel_lock);
+
+       /*
+        * Exit early if this is a temporary prepare/unprepare and we're already
+        * modeset (since we neither want to prepare twice or unprepare early).
+        */
+       if (dp->panel_is_modeset && !is_modeset_prepare)
+               goto out;
+
+       if (prepare)
+               ret = drm_panel_prepare(dp->plat_data->panel);
+       else
+               ret = drm_panel_unprepare(dp->plat_data->panel);
+
+       if (ret)
+               goto out;
+
+       if (is_modeset_prepare)
+               dp->panel_is_modeset = prepare;
+
+out:
+       mutex_unlock(&dp->panel_lock);
+       return ret;
+}
+
+static int analogix_dp_get_modes(struct drm_connector *connector)
+{
+       struct analogix_dp_device *dp = to_dp(connector);
+       struct edid *edid;
+       int ret, num_modes = 0;
+
+       if (dp->plat_data->panel) {
                num_modes += drm_panel_get_modes(dp->plat_data->panel);
+       } else {
+               ret = analogix_dp_prepare_panel(dp, true, false);
+               if (ret) {
+                       DRM_ERROR("Failed to prepare panel (%d)\n", ret);
+                       return 0;
+               }
+
+               edid = drm_get_edid(connector, &dp->aux.ddc);
+               if (edid) {
+                       drm_mode_connector_update_edid_property(&dp->connector,
+                                                               edid);
+                       num_modes += drm_add_edid_modes(&dp->connector, edid);
+                       kfree(edid);
+               }
+
+               ret = analogix_dp_prepare_panel(dp, false, false);
+               if (ret)
+                       DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
+       }
 
        if (dp->plat_data->get_modes)
                num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
@@ -956,15 +965,30 @@ static const struct drm_connector_helper_funcs analogix_dp_connector_helper_func
        .best_encoder = analogix_dp_best_encoder,
 };
 
-enum drm_connector_status
+static enum drm_connector_status
 analogix_dp_detect(struct drm_connector *connector, bool force)
 {
        struct analogix_dp_device *dp = to_dp(connector);
+       enum drm_connector_status status = connector_status_disconnected;
+       int ret;
+
+       if (dp->plat_data->panel)
+               return connector_status_connected;
 
-       if (analogix_dp_detect_hpd(dp))
+       ret = analogix_dp_prepare_panel(dp, true, false);
+       if (ret) {
+               DRM_ERROR("Failed to prepare panel (%d)\n", ret);
                return connector_status_disconnected;
+       }
+
+       if (!analogix_dp_detect_hpd(dp))
+               status = connector_status_connected;
 
-       return connector_status_connected;
+       ret = analogix_dp_prepare_panel(dp, false, false);
+       if (ret)
+               DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
+
+       return status;
 }
 
 static void analogix_dp_connector_destroy(struct drm_connector *connector)
@@ -1035,6 +1059,16 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
        return 0;
 }
 
+static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
+{
+       struct analogix_dp_device *dp = bridge->driver_private;
+       int ret;
+
+       ret = analogix_dp_prepare_panel(dp, true, true);
+       if (ret)
+               DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+}
+
 static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
 {
        struct analogix_dp_device *dp = bridge->driver_private;
@@ -1058,6 +1092,7 @@ static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
 static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 {
        struct analogix_dp_device *dp = bridge->driver_private;
+       int ret;
 
        if (dp->dpms_mode != DRM_MODE_DPMS_ON)
                return;
@@ -1077,6 +1112,10 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 
        pm_runtime_put_sync(dp->dev);
 
+       ret = analogix_dp_prepare_panel(dp, false, true);
+       if (ret)
+               DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+
        dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
@@ -1165,9 +1204,9 @@ static void analogix_dp_bridge_nop(struct drm_bridge *bridge)
 }
 
 static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+       .pre_enable = analogix_dp_bridge_pre_enable,
        .enable = analogix_dp_bridge_enable,
        .disable = analogix_dp_bridge_disable,
-       .pre_enable = analogix_dp_bridge_nop,
        .post_disable = analogix_dp_bridge_nop,
        .mode_set = analogix_dp_bridge_mode_set,
        .attach = analogix_dp_bridge_attach,
@@ -1231,6 +1270,14 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
        return 0;
 }
 
+static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
+                                      struct drm_dp_aux_msg *msg)
+{
+       struct analogix_dp_device *dp = to_dp(aux);
+
+       return analogix_dp_transfer(dp, msg);
+}
+
 int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
                     struct analogix_dp_plat_data *plat_data)
 {
@@ -1254,6 +1301,9 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
        dp->dev = &pdev->dev;
        dp->dpms_mode = DRM_MODE_DPMS_OFF;
 
+       mutex_init(&dp->panel_lock);
+       dp->panel_is_modeset = false;
+
        /*
         * platform dp driver need containor_of the plat_data to get
         * the driver private data, so we need to store the point of
@@ -1333,13 +1383,6 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
 
        phy_power_on(dp->phy);
 
-       if (dp->plat_data->panel) {
-               if (drm_panel_prepare(dp->plat_data->panel)) {
-                       DRM_ERROR("failed to setup the panel\n");
-                       return -EBUSY;
-               }
-       }
-
        analogix_dp_init_dp(dp);
 
        ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1355,6 +1398,14 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
        dp->drm_dev = drm_dev;
        dp->encoder = dp->plat_data->encoder;
 
+       dp->aux.name = "DP-AUX";
+       dp->aux.transfer = analogix_dpaux_transfer;
+       dp->aux.dev = &pdev->dev;
+
+       ret = drm_dp_aux_register(&dp->aux);
+       if (ret)
+               goto err_disable_pm_runtime;
+
        ret = analogix_dp_create_bridge(drm_dev, dp);
        if (ret) {
                DRM_ERROR("failed to create bridge (%d)\n", ret);
index b45638043ec41308681902d06907786a58456522..5c6a28806129a4e6d2eb668db7e306f9d772877c 100644 (file)
 #define MAX_CR_LOOP 5
 #define MAX_EQ_LOOP 5
 
-/* I2C EDID Chip ID, Slave Address */
-#define I2C_EDID_DEVICE_ADDR                   0x50
-#define I2C_E_EDID_DEVICE_ADDR                 0x30
-
-#define EDID_BLOCK_LENGTH                      0x80
-#define EDID_HEADER_PATTERN                    0x00
-#define EDID_EXTENSION_FLAG                    0x7e
-#define EDID_CHECKSUM                          0x7f
-
 /* DP_MAX_LANE_COUNT */
 #define DPCD_ENHANCED_FRAME_CAP(x)             (((x) >> 7) & 0x1)
 #define DPCD_MAX_LANE_COUNT(x)                 ((x) & 0x1f)
@@ -166,6 +157,7 @@ struct analogix_dp_device {
        struct drm_device       *drm_dev;
        struct drm_connector    connector;
        struct drm_bridge       *bridge;
+       struct drm_dp_aux       aux;
        struct clk              *clock;
        unsigned int            irq;
        void __iomem            *reg_base;
@@ -176,7 +168,10 @@ struct analogix_dp_device {
        int                     dpms_mode;
        int                     hpd_gpio;
        bool                    force_hpd;
-       unsigned char           edid[EDID_BLOCK_LENGTH * 2];
+       bool                    psr_support;
+
+       struct mutex            panel_lock;
+       bool                    panel_is_modeset;
 
        struct analogix_dp_plat_data *plat_data;
 };
@@ -206,33 +201,6 @@ void analogix_dp_reset_aux(struct analogix_dp_device *dp);
 void analogix_dp_init_aux(struct analogix_dp_device *dp);
 int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp);
 void analogix_dp_enable_sw_function(struct analogix_dp_device *dp);
-int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp);
-int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
-                                  unsigned int reg_addr,
-                                  unsigned char data);
-int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
-                                   unsigned int reg_addr,
-                                   unsigned char *data);
-int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
-                                   unsigned int reg_addr,
-                                   unsigned int count,
-                                   unsigned char data[]);
-int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
-                                    unsigned int reg_addr,
-                                    unsigned int count,
-                                    unsigned char data[]);
-int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
-                                 unsigned int device_addr,
-                                 unsigned int reg_addr);
-int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
-                                  unsigned int device_addr,
-                                  unsigned int reg_addr,
-                                  unsigned int *data);
-int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
-                                   unsigned int device_addr,
-                                   unsigned int reg_addr,
-                                   unsigned int count,
-                                   unsigned char edid[]);
 void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype);
 void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype);
 void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count);
@@ -278,4 +246,10 @@ int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp);
 void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp);
 void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
 void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
+void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
+void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+                             struct edp_vsc_psr *vsc);
+ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+                            struct drm_dp_aux_msg *msg);
+
 #endif /* _ANALOGIX_DP_CORE_H */
index 48030f0cf4971672df899c9ca2cf2cac41f07fad..cd37ac058675f7c00038dd774ac083fdb8196fd8 100644 (file)
@@ -585,330 +585,6 @@ int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
        return retval;
 }
 
-int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
-                                   unsigned int reg_addr,
-                                   unsigned char *data)
-{
-       u32 reg;
-       int i;
-       int retval;
-
-       for (i = 0; i < 3; i++) {
-               /* Clear AUX CH data buffer */
-               reg = BUF_CLR;
-               writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-               /* Select DPCD device address */
-               reg = AUX_ADDR_7_0(reg_addr);
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-               reg = AUX_ADDR_15_8(reg_addr);
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-               reg = AUX_ADDR_19_16(reg_addr);
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-               /*
-                * Set DisplayPort transaction and read 1 byte
-                * If bit 3 is 1, DisplayPort transaction.
-                * If Bit 3 is 0, I2C transaction.
-                */
-               reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-               /* Start AUX transaction */
-               retval = analogix_dp_start_aux_transaction(dp);
-               if (retval == 0)
-                       break;
-
-               dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-       }
-
-       /* Read data buffer */
-       reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-       *data = (unsigned char)(reg & 0xff);
-
-       return retval;
-}
-
-int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
-                                   unsigned int reg_addr,
-                                   unsigned int count,
-                                   unsigned char data[])
-{
-       u32 reg;
-       unsigned int start_offset;
-       unsigned int cur_data_count;
-       unsigned int cur_data_idx;
-       int i;
-       int retval = 0;
-
-       /* Clear AUX CH data buffer */
-       reg = BUF_CLR;
-       writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-       start_offset = 0;
-       while (start_offset < count) {
-               /* Buffer size of AUX CH is 16 * 4bytes */
-               if ((count - start_offset) > 16)
-                       cur_data_count = 16;
-               else
-                       cur_data_count = count - start_offset;
-
-               for (i = 0; i < 3; i++) {
-                       /* Select DPCD device address */
-                       reg = AUX_ADDR_7_0(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-                       reg = AUX_ADDR_15_8(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-                       reg = AUX_ADDR_19_16(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-                       for (cur_data_idx = 0; cur_data_idx < cur_data_count;
-                            cur_data_idx++) {
-                               reg = data[start_offset + cur_data_idx];
-                               writel(reg, dp->reg_base +
-                                      ANALOGIX_DP_BUF_DATA_0 +
-                                      4 * cur_data_idx);
-                       }
-
-                       /*
-                        * Set DisplayPort transaction and write
-                        * If bit 3 is 1, DisplayPort transaction.
-                        * If Bit 3 is 0, I2C transaction.
-                        */
-                       reg = AUX_LENGTH(cur_data_count) |
-                               AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-                       /* Start AUX transaction */
-                       retval = analogix_dp_start_aux_transaction(dp);
-                       if (retval == 0)
-                               break;
-
-                       dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-                               __func__);
-               }
-
-               start_offset += cur_data_count;
-       }
-
-       return retval;
-}
-
-int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
-                                    unsigned int reg_addr,
-                                    unsigned int count,
-                                    unsigned char data[])
-{
-       u32 reg;
-       unsigned int start_offset;
-       unsigned int cur_data_count;
-       unsigned int cur_data_idx;
-       int i;
-       int retval = 0;
-
-       /* Clear AUX CH data buffer */
-       reg = BUF_CLR;
-       writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-       start_offset = 0;
-       while (start_offset < count) {
-               /* Buffer size of AUX CH is 16 * 4bytes */
-               if ((count - start_offset) > 16)
-                       cur_data_count = 16;
-               else
-                       cur_data_count = count - start_offset;
-
-               /* AUX CH Request Transaction process */
-               for (i = 0; i < 3; i++) {
-                       /* Select DPCD device address */
-                       reg = AUX_ADDR_7_0(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-                       reg = AUX_ADDR_15_8(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-                       reg = AUX_ADDR_19_16(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-                       /*
-                        * Set DisplayPort transaction and read
-                        * If bit 3 is 1, DisplayPort transaction.
-                        * If Bit 3 is 0, I2C transaction.
-                        */
-                       reg = AUX_LENGTH(cur_data_count) |
-                               AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-                       /* Start AUX transaction */
-                       retval = analogix_dp_start_aux_transaction(dp);
-                       if (retval == 0)
-                               break;
-
-                       dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-                               __func__);
-               }
-
-               for (cur_data_idx = 0; cur_data_idx < cur_data_count;
-                   cur_data_idx++) {
-                       reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
-                                                + 4 * cur_data_idx);
-                       data[start_offset + cur_data_idx] =
-                               (unsigned char)reg;
-               }
-
-               start_offset += cur_data_count;
-       }
-
-       return retval;
-}
-
-int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
-                                 unsigned int device_addr,
-                                 unsigned int reg_addr)
-{
-       u32 reg;
-       int retval;
-
-       /* Set EDID device address */
-       reg = device_addr;
-       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-       writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-       writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-       /* Set offset from base address of EDID device */
-       writel(reg_addr, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-
-       /*
-        * Set I2C transaction and write address
-        * If bit 3 is 1, DisplayPort transaction.
-        * If Bit 3 is 0, I2C transaction.
-        */
-       reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
-               AUX_TX_COMM_WRITE;
-       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-       /* Start AUX transaction */
-       retval = analogix_dp_start_aux_transaction(dp);
-       if (retval != 0)
-               dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-
-       return retval;
-}
-
-int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
-                                  unsigned int device_addr,
-                                  unsigned int reg_addr,
-                                  unsigned int *data)
-{
-       u32 reg;
-       int i;
-       int retval;
-
-       for (i = 0; i < 3; i++) {
-               /* Clear AUX CH data buffer */
-               reg = BUF_CLR;
-               writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-               /* Select EDID device */
-               retval = analogix_dp_select_i2c_device(dp, device_addr,
-                                                      reg_addr);
-               if (retval != 0)
-                       continue;
-
-               /*
-                * Set I2C transaction and read data
-                * If bit 3 is 1, DisplayPort transaction.
-                * If Bit 3 is 0, I2C transaction.
-                */
-               reg = AUX_TX_COMM_I2C_TRANSACTION |
-                       AUX_TX_COMM_READ;
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-               /* Start AUX transaction */
-               retval = analogix_dp_start_aux_transaction(dp);
-               if (retval == 0)
-                       break;
-
-               dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-       }
-
-       /* Read data */
-       if (retval == 0)
-               *data = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-
-       return retval;
-}
-
-int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
-                                   unsigned int device_addr,
-                                   unsigned int reg_addr,
-                                   unsigned int count,
-                                   unsigned char edid[])
-{
-       u32 reg;
-       unsigned int i, j;
-       unsigned int cur_data_idx;
-       unsigned int defer = 0;
-       int retval = 0;
-
-       for (i = 0; i < count; i += 16) {
-               for (j = 0; j < 3; j++) {
-                       /* Clear AUX CH data buffer */
-                       reg = BUF_CLR;
-                       writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-                       /* Set normal AUX CH command */
-                       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
-                       reg &= ~ADDR_ONLY;
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
-
-                       /*
-                        * If Rx sends defer, Tx sends only reads
-                        * request without sending address
-                        */
-                       if (!defer)
-                               retval = analogix_dp_select_i2c_device(dp,
-                                               device_addr, reg_addr + i);
-                       else
-                               defer = 0;
-
-                       if (retval == 0) {
-                               /*
-                                * Set I2C transaction and write data
-                                * If bit 3 is 1, DisplayPort transaction.
-                                * If Bit 3 is 0, I2C transaction.
-                                */
-                               reg = AUX_LENGTH(16) |
-                                       AUX_TX_COMM_I2C_TRANSACTION |
-                                       AUX_TX_COMM_READ;
-                               writel(reg, dp->reg_base +
-                                       ANALOGIX_DP_AUX_CH_CTL_1);
-
-                               /* Start AUX transaction */
-                               retval = analogix_dp_start_aux_transaction(dp);
-                               if (retval == 0)
-                                       break;
-
-                               dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-                                       __func__);
-                       }
-                       /* Check if Rx sends defer */
-                       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
-                       if (reg == AUX_RX_COMM_AUX_DEFER ||
-                           reg == AUX_RX_COMM_I2C_DEFER) {
-                               dev_err(dp->dev, "Defer: %d\n\n", reg);
-                               defer = 1;
-                       }
-               }
-
-               for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
-                       reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
-                                                + 4 * cur_data_idx);
-                       edid[i + cur_data_idx] = (unsigned char)reg;
-               }
-       }
-
-       return retval;
-}
-
 void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
 {
        u32 reg;
@@ -1073,34 +749,22 @@ void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
 
 u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
 {
-       u32 reg;
-
-       reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
-       return reg;
+       return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
 {
-       u32 reg;
-
-       reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
-       return reg;
+       return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
 {
-       u32 reg;
-
-       reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
-       return reg;
+       return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
 {
-       u32 reg;
-
-       reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
-       return reg;
+       return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
 }
 
 void analogix_dp_reset_macro(struct analogix_dp_device *dp)
@@ -1322,3 +986,181 @@ void analogix_dp_disable_scrambling(struct analogix_dp_device *dp)
        reg |= SCRAMBLING_DISABLE;
        writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
 }
+
+void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp)
+{
+       writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON);
+}
+
+void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+                             struct edp_vsc_psr *vsc)
+{
+       unsigned int val;
+
+       /* don't send info frame */
+       val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+       val &= ~IF_EN;
+       writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+       /* configure single frame update mode */
+       writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE,
+              dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL);
+
+       /* configure VSC HB0~HB3 */
+       writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0);
+       writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1);
+       writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2);
+       writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3);
+
+       /* configure reused VSC PB0~PB3, magic number from vendor */
+       writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0);
+       writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1);
+       writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2);
+       writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3);
+
+       /* configure DB0 / DB1 values */
+       writel(vsc->DB0, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
+       writel(vsc->DB1, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
+
+       /* set reuse spd inforframe */
+       val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+       val |= REUSE_SPD_EN;
+       writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+
+       /* mark info frame update */
+       val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+       val = (val | IF_UP) & ~IF_EN;
+       writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+       /* send info frame */
+       val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+       val |= IF_EN;
+       writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+}
+
+ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+                            struct drm_dp_aux_msg *msg)
+{
+       u32 reg;
+       u8 *buffer = msg->buffer;
+       int timeout_loop = 0;
+       unsigned int i;
+       int num_transferred = 0;
+
+       /* Buffer size of AUX CH is 16 bytes */
+       if (WARN_ON(msg->size > 16))
+               return -E2BIG;
+
+       /* Clear AUX CH data buffer */
+       reg = BUF_CLR;
+       writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+       switch (msg->request & ~DP_AUX_I2C_MOT) {
+       case DP_AUX_I2C_WRITE:
+               reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_I2C_TRANSACTION;
+               if (msg->request & DP_AUX_I2C_MOT)
+                       reg |= AUX_TX_COMM_MOT;
+               break;
+
+       case DP_AUX_I2C_READ:
+               reg = AUX_TX_COMM_READ | AUX_TX_COMM_I2C_TRANSACTION;
+               if (msg->request & DP_AUX_I2C_MOT)
+                       reg |= AUX_TX_COMM_MOT;
+               break;
+
+       case DP_AUX_NATIVE_WRITE:
+               reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_DP_TRANSACTION;
+               break;
+
+       case DP_AUX_NATIVE_READ:
+               reg = AUX_TX_COMM_READ | AUX_TX_COMM_DP_TRANSACTION;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       reg |= AUX_LENGTH(msg->size);
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+       /* Select DPCD device address */
+       reg = AUX_ADDR_7_0(msg->address);
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+       reg = AUX_ADDR_15_8(msg->address);
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+       reg = AUX_ADDR_19_16(msg->address);
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+       if (!(msg->request & DP_AUX_I2C_READ)) {
+               for (i = 0; i < msg->size; i++) {
+                       reg = buffer[i];
+                       writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+                              4 * i);
+                       num_transferred++;
+               }
+       }
+
+       /* Enable AUX CH operation */
+       reg = AUX_EN;
+
+       /* Zero-sized messages specify address-only transactions. */
+       if (msg->size < 1)
+               reg |= ADDR_ONLY;
+
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+
+       /* Is AUX CH command reply received? */
+       /* TODO: Wait for an interrupt instead of looping? */
+       reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+       while (!(reg & RPLY_RECEIV)) {
+               timeout_loop++;
+               if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
+                       dev_err(dp->dev, "AUX CH command reply failed!\n");
+                       return -ETIMEDOUT;
+               }
+               reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+               usleep_range(10, 11);
+       }
+
+       /* Clear interrupt source for AUX CH command reply */
+       writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+       /* Clear interrupt source for AUX CH access error */
+       reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+       if (reg & AUX_ERR) {
+               writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
+               return -EREMOTEIO;
+       }
+
+       /* Check AUX CH error access status */
+       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
+       if ((reg & AUX_STATUS_MASK)) {
+               dev_err(dp->dev, "AUX CH error happened: %d\n\n",
+                       reg & AUX_STATUS_MASK);
+               return -EREMOTEIO;
+       }
+
+       if (msg->request & DP_AUX_I2C_READ) {
+               for (i = 0; i < msg->size; i++) {
+                       reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+                                   4 * i);
+                       buffer[i] = (unsigned char)reg;
+                       num_transferred++;
+               }
+       }
+
+       /* Check if Rx sends defer */
+       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
+       if (reg == AUX_RX_COMM_AUX_DEFER)
+               msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+       else if (reg == AUX_RX_COMM_I2C_DEFER)
+               msg->reply = DP_AUX_I2C_REPLY_DEFER;
+       else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE ||
+                (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_READ)
+               msg->reply = DP_AUX_I2C_REPLY_ACK;
+       else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE ||
+                (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
+               msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+
+       return num_transferred;
+}
index cdcc6c5add5e9c27befb644520784edaec9c6a4a..40200c652533b1fd4909c23010ce5c6828d2998e 100644 (file)
@@ -22,6 +22,8 @@
 #define ANALOGIX_DP_VIDEO_CTL_8                        0x3C
 #define ANALOGIX_DP_VIDEO_CTL_10               0x44
 
+#define ANALOGIX_DP_SPDIF_AUDIO_CTL_0          0xD8
+
 #define ANALOGIX_DP_PLL_REG_1                  0xfc
 #define ANALOGIX_DP_PLL_REG_2                  0x9e4
 #define ANALOGIX_DP_PLL_REG_3                  0x9e8
 
 #define ANALOGIX_DP_PD                         0x12c
 
+#define ANALOGIX_DP_IF_TYPE                    0x244
+#define ANALOGIX_DP_IF_PKT_DB1                 0x254
+#define ANALOGIX_DP_IF_PKT_DB2                 0x258
+#define ANALOGIX_DP_SPD_HB0                    0x2F8
+#define ANALOGIX_DP_SPD_HB1                    0x2FC
+#define ANALOGIX_DP_SPD_HB2                    0x300
+#define ANALOGIX_DP_SPD_HB3                    0x304
+#define ANALOGIX_DP_SPD_PB0                    0x308
+#define ANALOGIX_DP_SPD_PB1                    0x30C
+#define ANALOGIX_DP_SPD_PB2                    0x310
+#define ANALOGIX_DP_SPD_PB3                    0x314
+#define ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL      0x318
+#define ANALOGIX_DP_VSC_SHADOW_DB0             0x31C
+#define ANALOGIX_DP_VSC_SHADOW_DB1             0x320
+
 #define ANALOGIX_DP_LANE_MAP                   0x35C
 
 #define ANALOGIX_DP_ANALOG_CTL_1               0x370
 
 #define ANALOGIX_DP_SOC_GENERAL_CTL            0x800
 
+#define ANALOGIX_DP_CRC_CON                    0x890
+
 /* ANALOGIX_DP_TX_SW_RESET */
 #define RESET_DP_TX                            (0x1 << 0)
 
 #define VID_CHK_UPDATE_TYPE_SHIFT              (4)
 #define VID_CHK_UPDATE_TYPE_1                  (0x1 << 4)
 #define VID_CHK_UPDATE_TYPE_0                  (0x0 << 4)
+#define REUSE_SPD_EN                           (0x1 << 3)
 
 /* ANALOGIX_DP_VIDEO_CTL_8 */
 #define VID_HRES_TH(x)                         (((x) & 0xf) << 4)
 #define REF_CLK_27M                            (0x0 << 0)
 #define REF_CLK_MASK                           (0x1 << 0)
 
+/* ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL */
+#define PSR_FRAME_UP_TYPE_BURST                        (0x1 << 0)
+#define PSR_FRAME_UP_TYPE_SINGLE               (0x0 << 0)
+#define PSR_CRC_SEL_HARDWARE                   (0x1 << 1)
+#define PSR_CRC_SEL_MANUALLY                   (0x0 << 1)
+
 /* ANALOGIX_DP_LANE_MAP */
 #define LANE3_MAP_LOGIC_LANE_0                 (0x0 << 6)
 #define LANE3_MAP_LOGIC_LANE_1                 (0x1 << 6)
 #define VIDEO_MODE_SLAVE_MODE                  (0x1 << 0)
 #define VIDEO_MODE_MASTER_MODE                 (0x0 << 0)
 
+/* ANALOGIX_DP_PKT_SEND_CTL */
+#define IF_UP                                  (0x1 << 4)
+#define IF_EN                                  (0x1 << 0)
+
+/* ANALOGIX_DP_CRC_CON */
+#define PSR_VID_CRC_FLUSH                      (0x1 << 2)
+#define PSR_VID_CRC_ENABLE                     (0x1 << 0)
+
 #endif /* _ANALOGIX_DP_REG_H */
index 122bb015f4a9e644d0978419c2b080fc1df8277b..8f2d1379c8809b8b8c428fb521c58bf1282d7c52 100644 (file)
@@ -640,7 +640,6 @@ static struct platform_driver snd_dw_hdmi_driver = {
        .remove = snd_dw_hdmi_remove,
        .driver = {
                .name = DRIVER_NAME,
-               .owner = THIS_MODULE,
                .pm = PM_OPS,
        },
 };
index 77ab47341658a2e109b323ee9393fc5456bdda12..66ad8e6fb11ed8beb54d08e062f4bccc06618936 100644 (file)
@@ -940,10 +940,11 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
         */
 
        /*
-        * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6,
-        * active aspect present in bit 6 rather than 4.
+        * AVI data byte 1 differences: Colorspace in bits 0,1 rather than 5,6,
+        * scan info in bits 4,5 rather than 0,1 and active aspect present in
+        * bit 6 rather than 4.
         */
-       val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3);
+       val = (frame.scan_mode & 3) << 4 | (frame.colorspace & 3);
        if (frame.active_aspect & 15)
                val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT;
        if (frame.top_bar || frame.bottom_bar)
@@ -1812,9 +1813,6 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
        /* Disable all interrupts */
        hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
 
-       hdmi->connector.funcs->destroy(&hdmi->connector);
-       hdmi->encoder->funcs->destroy(hdmi->encoder);
-
        clk_disable_unprepare(hdmi->iahb_clk);
        clk_disable_unprepare(hdmi->isfr_clk);
        i2c_put_adapter(hdmi->ddc);
index 583b8ce614e3ba68b5d4cc93c580da92d4bd6226..f1b39a2645cc2be14287547c61a57060e5a31f15 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/err.h>
-#include <linux/fb.h>
 #include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
index b05f7eae32ce756e39f656adcbadc2ddfb55893c..6c76d125995bef3f20b566292aeeb7ad9122c86d 100644 (file)
@@ -57,7 +57,7 @@ static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
 #ifdef CONFIG_X86
        primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
-       remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
        kfree(ap);
 
        return 0;
index 3b5be7272357f3a4f415a83a009a924e1da68450..daecf1ad76a454845775a802c914384d53fa4c9e 100644 (file)
@@ -13,8 +13,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
-#include <linux/fb.h>
-
 #include "cirrus_drv.h"
 
 static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
index 80446e2d3ab6efd00f3d3e777e4178b26ee6ae5d..76bcb43e7c06ac2dcf56a4d183e61c869a3544c5 100644 (file)
@@ -185,14 +185,23 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
                goto out;
        }
 
+       /*
+        * cirrus_modeset_init() is initializing/registering the emulated fbdev
+        * and DRM internals can access/test some of the fields in
+        * mode_config->funcs as part of the fbdev registration process.
+        * Make sure dev->mode_config.funcs is properly set to avoid
+        * dereferencing a NULL pointer.
+        * FIXME: mode_config.funcs assignment should probably be done in
+        * cirrus_modeset_init() (that's a common pattern seen in other DRM
+        * drivers).
+        */
+       dev->mode_config.funcs = &cirrus_mode_funcs;
        r = cirrus_modeset_init(cdev);
        if (r) {
                dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
                goto out;
        }
 
-       dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
-
        return 0;
 out:
        cirrus_driver_unload(dev);
index 1cc9ee607128772295bf6f49c8ba9ad93f73abd1..bb2438dd8733f4c2c64618629abf1e946395f02a 100644 (file)
@@ -150,7 +150,8 @@ static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *fi
 {
        struct cirrus_bo *cirrusbo = cirrus_bo(bo);
 
-       return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&cirrusbo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
index 605bd243fb36d1fc69bf79f9aa46cc46268175ec..d621c8a4cf00b5445bb4c1cc36c810f0480efd22 100644 (file)
@@ -430,9 +430,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
  * intact so it can still be used. It is safe to call this if AGP is disabled or
  * was already removed.
  *
- * If DRIVER_MODESET is active, nothing is done to protect the modesetting
- * resources from getting destroyed. Drivers are responsible of cleaning them up
- * during device shutdown.
+ * Cleanup is only done for drivers who have DRIVER_LEGACY set.
  */
 void drm_legacy_agp_clear(struct drm_device *dev)
 {
@@ -440,7 +438,7 @@ void drm_legacy_agp_clear(struct drm_device *dev)
 
        if (!dev->agp)
                return;
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return;
 
        list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
index fa393075797210eb51a0188a823cfd33759bbaa2..23739609427d86b9cd64d81ddad719bf5fc2bd78 100644 (file)
@@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                                        val,
                                        -1,
                                        &replaced);
-               state->color_mgmt_changed = replaced;
+               state->color_mgmt_changed |= replaced;
                return ret;
        } else if (property == config->ctm_property) {
                ret = drm_atomic_replace_property_blob_from_id(crtc,
@@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                                        val,
                                        sizeof(struct drm_color_ctm),
                                        &replaced);
-               state->color_mgmt_changed = replaced;
+               state->color_mgmt_changed |= replaced;
                return ret;
        } else if (property == config->gamma_lut_property) {
                ret = drm_atomic_replace_property_blob_from_id(crtc,
@@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                                        val,
                                        -1,
                                        &replaced);
-               state->color_mgmt_changed = replaced;
+               state->color_mgmt_changed |= replaced;
                return ret;
        } else if (crtc->funcs->atomic_set_property)
                return crtc->funcs->atomic_set_property(crtc, state, property, val);
@@ -837,8 +837,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
        /* Check whether this plane supports the fb pixel format. */
        ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
        if (ret) {
-               DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
-                                drm_get_format_name(state->fb->pixel_format));
+               char *format_name = drm_get_format_name(state->fb->pixel_format);
+               DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
+               kfree(format_name);
                return ret;
        }
 
@@ -1690,7 +1691,7 @@ retry:
                                goto out;
                        }
 
-                       prop = drm_property_find(dev, prop_id);
+                       prop = drm_mode_obj_find_prop_id(obj, prop_id);
                        if (!prop) {
                                drm_mode_object_unreference(obj);
                                ret = -ENOENT;
index 20be86d89a202cf65ab9708f97c26572ac7cc5d6..c3f83476f99601c2ff91711b602c902b55170f71 100644 (file)
@@ -594,7 +594,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
        struct drm_plane_state *plane_state;
        int i, ret = 0;
 
-       ret = drm_atomic_helper_normalize_zpos(dev, state);
+       ret = drm_atomic_normalize_zpos(dev, state);
        if (ret)
                return ret;
 
@@ -749,6 +749,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
                /* Right function depends upon target state. */
                if (crtc->state->enable && funcs->prepare)
                        funcs->prepare(crtc);
+               else if (funcs->atomic_disable)
+                       funcs->atomic_disable(crtc, old_crtc_state);
                else if (funcs->disable)
                        funcs->disable(crtc);
                else
@@ -886,8 +888,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
                 * Each encoder has at most one connector (since we always steal
                 * it away), so we won't call mode_set hooks twice.
                 */
-               if (funcs && funcs->mode_set)
+               if (funcs && funcs->atomic_mode_set) {
+                       funcs->atomic_mode_set(encoder, new_crtc_state,
+                                              connector->state);
+               } else if (funcs && funcs->mode_set) {
                        funcs->mode_set(encoder, mode, adjusted_mode);
+               }
 
                drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
        }
@@ -1003,29 +1009,46 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
  * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
  * @dev: DRM device
  * @state: atomic state object with old state structures
+ * @pre_swap: if true, do an interruptible wait
  *
  * For implicit sync, driver should fish the exclusive fence out from the
  * incoming fb's and stash it in the drm_plane_state.  This is called after
  * drm_atomic_helper_swap_state() so it uses the current plane state (and
  * just uses the atomic state to find the changed planes)
+ *
+ * Returns zero if success or < 0 if fence_wait() fails.
  */
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
-                           struct drm_atomic_state *state)
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+                                     struct drm_atomic_state *state,
+                                     bool pre_swap)
 {
        struct drm_plane *plane;
        struct drm_plane_state *plane_state;
-       int i;
+       int i, ret;
 
        for_each_plane_in_state(state, plane, plane_state, i) {
-               if (!plane->state->fence)
+               if (!pre_swap)
+                       plane_state = plane->state;
+
+               if (!plane_state->fence)
                        continue;
 
-               WARN_ON(!plane->state->fb);
+               WARN_ON(!plane_state->fb);
+
+               /*
+                * If waiting for fences pre-swap (ie: nonblock), userspace can
+                * still interrupt the operation. Instead of blocking until the
+                * timer expires, make the wait interruptible.
+                */
+               ret = fence_wait(plane_state->fence, pre_swap);
+               if (ret)
+                       return ret;
 
-               fence_wait(plane->state->fence, false);
-               fence_put(plane->state->fence);
-               plane->state->fence = NULL;
+               fence_put(plane_state->fence);
+               plane_state->fence = NULL;
        }
+
+       return 0;
 }
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
 
@@ -1142,7 +1165,8 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
  *
  *     drm_atomic_helper_commit_modeset_enables(dev, state);
  *
- *     drm_atomic_helper_commit_planes(dev, state, true);
+ *     drm_atomic_helper_commit_planes(dev, state,
+ *                                     DRM_PLANE_COMMIT_ACTIVE_ONLY);
  *
  * for committing the atomic update to hardware.  See the kerneldoc entries for
  * these three functions for more details.
@@ -1153,7 +1177,7 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
-       drm_atomic_helper_commit_planes(dev, state, false);
+       drm_atomic_helper_commit_planes(dev, state, 0);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
@@ -1172,7 +1196,7 @@ static void commit_tail(struct drm_atomic_state *state)
 
        funcs = dev->mode_config.helper_private;
 
-       drm_atomic_helper_wait_for_fences(dev, state);
+       drm_atomic_helper_wait_for_fences(dev, state, false);
 
        drm_atomic_helper_wait_for_dependencies(state);
 
@@ -1231,6 +1255,12 @@ int drm_atomic_helper_commit(struct drm_device *dev,
        if (ret)
                return ret;
 
+       if (!nonblock) {
+               ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * This is the point of no return - everything below never fails except
         * when the hw goes bonghits. Which means we can commit the new state on
@@ -1631,6 +1661,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
                funcs = plane->helper_private;
 
+               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
+                       continue;
+
                if (funcs->prepare_fb) {
                        ret = funcs->prepare_fb(plane, plane_state);
                        if (ret)
@@ -1647,18 +1680,20 @@ fail:
                if (j >= i)
                        continue;
 
+               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
+                       continue;
+
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
                        funcs->cleanup_fb(plane, plane_state);
-
        }
 
        return ret;
 }
 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
 
-bool plane_crtc_active(struct drm_plane_state *state)
+static bool plane_crtc_active(const struct drm_plane_state *state)
 {
        return state->crtc && state->crtc->state->active;
 }
@@ -1667,7 +1702,7 @@ bool plane_crtc_active(struct drm_plane_state *state)
  * drm_atomic_helper_commit_planes - commit plane state
  * @dev: DRM device
  * @old_state: atomic state object with old state structures
- * @active_only: Only commit on active CRTC if set
+ * @flags: flags for committing plane state
  *
  * This function commits the new plane state using the plane and atomic helper
  * functions for planes and crtcs. It assumes that the atomic state has already
@@ -1687,25 +1722,34 @@ bool plane_crtc_active(struct drm_plane_state *state)
  * most drivers don't need to be immediately notified of plane updates for a
  * disabled CRTC.
  *
- * Unless otherwise needed, drivers are advised to set the @active_only
- * parameters to true in order not to receive plane update notifications related
- * to a disabled CRTC. This avoids the need to manually ignore plane updates in
+ * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
+ * @flags in order not to receive plane update notifications related to a
+ * disabled CRTC. This avoids the need to manually ignore plane updates in
  * driver code when the driver and/or hardware can't or just don't need to deal
  * with updates on disabled CRTCs, for example when supporting runtime PM.
  *
- * The drm_atomic_helper_commit() default implementation only sets @active_only
- * to false to most closely match the behaviour of the legacy helpers. This should
- * not be copied blindly by drivers.
+ * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
+ * display controllers require to disable a CRTC's planes when the CRTC is
+ * disabled. This function would skip the ->atomic_disable call for a plane if
+ * the CRTC of the old plane state needs a modesetting operation. Of course,
+ * the drivers need to disable the planes in their CRTC disable callbacks
+ * since no one else would do that.
+ *
+ * The drm_atomic_helper_commit() default implementation doesn't set the
+ * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
+ * This should not be copied blindly by drivers.
  */
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
                                     struct drm_atomic_state *old_state,
-                                    bool active_only)
+                                    uint32_t flags)
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state;
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state;
        int i;
+       bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
+       bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
 
        for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
                const struct drm_crtc_helper_funcs *funcs;
@@ -1749,10 +1793,19 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
                /*
                 * Special-case disabling the plane if drivers support it.
                 */
-               if (disabling && funcs->atomic_disable)
+               if (disabling && funcs->atomic_disable) {
+                       struct drm_crtc_state *crtc_state;
+
+                       crtc_state = old_plane_state->crtc->state;
+
+                       if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+                           no_disable)
+                               continue;
+
                        funcs->atomic_disable(plane, old_plane_state);
-               else if (plane->state->crtc || disabling)
+               } else if (plane->state->crtc || disabling) {
                        funcs->atomic_update(plane, old_plane_state);
+               }
        }
 
        for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -1831,12 +1884,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
 
 /**
  * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
- * @crtc: CRTC
+ * @old_crtc_state: atomic state object with the old CRTC state
  * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
  *
  * Disables all planes associated with the given CRTC. This can be
- * used for instance in the CRTC helper disable callback to disable
- * all planes before shutting down the display pipeline.
+ * used for instance in the CRTC helper atomic_disable callback to disable
+ * all planes.
  *
  * If the atomic-parameter is set the function calls the CRTC's
  * atomic_begin hook before and atomic_flush hook after disabling the
@@ -1845,9 +1898,11 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
  * It is a bug to call this function without having implemented the
  * ->atomic_disable() plane hook.
  */
-void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
-                                             bool atomic)
+void
+drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
+                                        bool atomic)
 {
+       struct drm_crtc *crtc = old_crtc_state->crtc;
        const struct drm_crtc_helper_funcs *crtc_funcs =
                crtc->helper_private;
        struct drm_plane *plane;
@@ -1855,11 +1910,11 @@ void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
        if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
                crtc_funcs->atomic_begin(crtc, NULL);
 
-       drm_for_each_plane(plane, crtc->dev) {
+       drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
                const struct drm_plane_helper_funcs *plane_funcs =
                        plane->helper_private;
 
-               if (plane->state->crtc != crtc || !plane_funcs)
+               if (!plane_funcs)
                        continue;
 
                WARN_ON(!plane_funcs->atomic_disable);
@@ -1894,6 +1949,9 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
        for_each_plane_in_state(old_state, plane, plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
+               if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
+                       continue;
+
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
@@ -2354,7 +2412,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
        primary_state->crtc_h = vdisplay;
        primary_state->src_x = set->x << 16;
        primary_state->src_y = set->y << 16;
-       if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+       if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
                primary_state->src_w = vdisplay << 16;
                primary_state->src_h = hdisplay << 16;
        } else {
@@ -3039,7 +3097,7 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
 
        if (plane->state) {
                plane->state->plane = plane;
-               plane->state->rotation = BIT(DRM_ROTATE_0);
+               plane->state->rotation = DRM_ROTATE_0;
        }
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
index 4153e8a193af39c01d6ff20326818d052cef4287..6b143514a5668a7c401f4c87194e0764ec05a893 100644 (file)
@@ -251,7 +251,7 @@ void drm_master_release(struct drm_file *file_priv)
        if (!drm_is_current_master(file_priv))
                goto out;
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+       if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
                /*
                 * Since the master is disappearing, so is the
                 * possibility to lock.
index f3c0942bd7568ab0221faa7d12f0e862ae81b7b4..85172a977bf3eccf5ec3ea3b1a83797364a396e4 100644 (file)
  */
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
+#include <drm/drm_blend.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/sort.h>
 
-#include "drm_internal.h"
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The basic plane composition model supported by standard plane properties only
+ * has a source rectangle (in logical pixels within the &drm_framebuffer), with
+ * sub-pixel accuracy, which is scaled up to a pixel-aligned destination
+ * rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is
+ * defined by the horizontal and vertical visible pixels (stored in @hdisplay
+ * and @vdisplay) of the requested mode (stored in @mode in the
+ * &drm_crtc_state). These two rectangles are both stored in the
+ * &drm_plane_state.
+ *
+ * For the atomic ioctl the following standard (atomic) properties on the plane object
+ * encode the basic plane composition model:
+ *
+ * SRC_X:
+ *     X coordinate offset for the source rectangle within the
+ *     &drm_framebuffer, in 16.16 fixed point. Must be positive.
+ * SRC_Y:
+ *     Y coordinate offset for the source rectangle within the
+ *     &drm_framebuffer, in 16.16 fixed point. Must be positive.
+ * SRC_W:
+ *     Width for the source rectangle within the &drm_framebuffer, in 16.16
+ *     fixed point. SRC_X plus SRC_W must be within the width of the source
+ *     framebuffer. Must be positive.
+ * SRC_H:
+ *     Height for the source rectangle within the &drm_framebuffer, in 16.16
+ *     fixed point. SRC_Y plus SRC_H must be within the height of the source
+ *     framebuffer. Must be positive.
+ * CRTC_X:
+ *     X coordinate offset for the destination rectangle. Can be negative.
+ * CRTC_Y:
+ *     Y coordinate offset for the destination rectangle. Can be negative.
+ * CRTC_W:
+ *     Width for the destination rectangle. CRTC_X plus CRTC_W can extend past
+ *     the currently visible horizontal area of the &drm_crtc.
+ * CRTC_H:
+ *     Height for the destination rectangle. CRTC_Y plus CRTC_H can extend past
+ *     the currently visible vertical area of the &drm_crtc.
+ * FB_ID:
+ *     Mode object ID of the &drm_framebuffer this plane should scan out.
+ * CRTC_ID:
+ *     Mode object ID of the &drm_crtc this plane should be connected to.
+ *
+ * Note that the source rectangle must fully lie within the bounds of the
+ * &drm_framebuffer. The destination rectangle can lie outside of the visible
+ * area of the current mode of the CRTC. It must be apprpriately clipped by the
+ * driver, which can be done by calling drm_plane_helper_check_update(). Drivers
+ * are also allowed to round the subpixel sampling positions appropriately, but
+ * only to the next full pixel. No pixel outside of the source rectangle may
+ * ever be sampled, which is important when applying more sophisticated
+ * filtering than just a bilinear one when scaling. The filtering mode when
+ * scaling is unspecified.
+ *
+ * On top of this basic transformation additional properties can be exposed by
+ * the driver:
+ *
+ * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ *   rotation and reflection step between the source and destination rectangles.
+ *   Without this property the rectangle is only scaled, but not rotated or
+ *   reflected.
+ *
+ * - Z position is set up with drm_plane_create_zpos_immutable_property() and
+ *   drm_plane_create_zpos_property(). It controls the visibility of overlapping
+ *   planes. Without this property the primary plane is always below the cursor
+ *   plane, and ordering between all other planes is undefined.
+ *
+ * Note that all the property extensions described here apply either to the
+ * plane or the CRTC (e.g. for the background color, which currently is not
+ * exposed and assumed to be black).
+ */
+
+/**
+ * drm_mode_create_rotation_property - create a new rotation property
+ * @dev: DRM device
+ * @supported_rotations: bitmask of supported rotations and reflections
+ *
+ * This creates a new property with the selected support for transformations.
+ * The resulting property should be stored in @rotation_property in
+ * &drm_mode_config. It then must be attached to each plane which supports
+ * rotations using drm_object_attach_property().
+ *
+ * FIXME: Probably better if the rotation property is created on each plane,
+ * like the zpos property. Otherwise it's not possible to allow different
+ * rotation modes on different planes.
+ *
+ * Since a rotation by 180° degress is the same as reflecting both along the x
+ * and the y axis the rotation property is somewhat redundant. Drivers can use
+ * drm_rotation_simplify() to normalize values of this property.
+ *
+ * The property exposed to userspace is a bitmask property (see
+ * drm_property_create_bitmask()) called "rotation" and has the following
+ * bitmask enumaration values:
+ *
+ * DRM_ROTATE_0:
+ *     "rotate-0"
+ * DRM_ROTATE_90:
+ *     "rotate-90"
+ * DRM_ROTATE_180:
+ *     "rotate-180"
+ * DRM_ROTATE_270:
+ *     "rotate-270"
+ * DRM_REFLECT_X:
+ *     "reflect-x"
+ * DRM_REFELCT_Y:
+ *     "reflect-y"
+ *
+ * Rotation is the specified amount in degrees in counter clockwise direction,
+ * the X and Y axis are within the source rectangle, i.e.  the X/Y axis before
+ * rotation. After reflection, the rotation is applied to the image sampled from
+ * the source rectangle, before scaling it to fit the destination rectangle.
+ */
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+                                                      unsigned int supported_rotations)
+{
+       static const struct drm_prop_enum_list props[] = {
+               { __builtin_ffs(DRM_ROTATE_0) - 1,   "rotate-0" },
+               { __builtin_ffs(DRM_ROTATE_90) - 1,  "rotate-90" },
+               { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
+               { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
+               { __builtin_ffs(DRM_REFLECT_X) - 1,  "reflect-x" },
+               { __builtin_ffs(DRM_REFLECT_Y) - 1,  "reflect-y" },
+       };
+
+       return drm_property_create_bitmask(dev, 0, "rotation",
+                                          props, ARRAY_SIZE(props),
+                                          supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
+
+/**
+ * drm_rotation_simplify() - Try to simplify the rotation
+ * @rotation: Rotation to be simplified
+ * @supported_rotations: Supported rotations
+ *
+ * Attempt to simplify the rotation to a form that is supported.
+ * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * one could call this function like this:
+ *
+ * drm_rotation_simplify(rotation, DRM_ROTATE_0 |
+ *                       DRM_ROTATE_90 | DRM_ROTATE_180 |
+ *                       DRM_ROTATE_270 | DRM_REFLECT_Y);
+ *
+ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * transforms the hardware supports, this function may not
+ * be able to produce a supported transform, so the caller should
+ * check the result afterwards.
+ */
+unsigned int drm_rotation_simplify(unsigned int rotation,
+                                  unsigned int supported_rotations)
+{
+       if (rotation & ~supported_rotations) {
+               rotation ^= DRM_REFLECT_X | DRM_REFLECT_Y;
+               rotation = (rotation & DRM_REFLECT_MASK) |
+                          BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
+       }
+
+       return rotation;
+}
+EXPORT_SYMBOL(drm_rotation_simplify);
 
 /**
  * drm_plane_create_zpos_property - create mutable zpos property
  * If zpos of some planes cannot be changed (like fixed background or
  * cursor/topmost planes), driver should adjust min/max values and assign those
  * planes immutable zpos property with lower or higher values (for more
- * information, see drm_mode_create_zpos_immutable_property() function). In such
+ * information, see drm_plane_create_zpos_immutable_property() function). In such
  * case driver should also assign proper initial zpos values for all planes in
  * its plane_reset() callback, so the planes will be always sorted properly.
  *
+ * See also drm_atomic_normalize_zpos().
+ *
+ * The property exposed to userspace is called "zpos".
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
@@ -88,7 +253,9 @@ EXPORT_SYMBOL(drm_plane_create_zpos_property);
  * support for it in drm core. Using this property driver lets userspace
  * to get the arrangement of the planes for blending operation and notifies
  * it that the hardware (or driver) doesn't support changing of the planes'
- * order.
+ * order. For mutable zpos see drm_plane_create_zpos_property().
+ *
+ * The property exposed to userspace is called "zpos".
  *
  * Returns:
  * Zero on success, negative errno on failure.
@@ -127,20 +294,6 @@ static int drm_atomic_state_zpos_cmp(const void *a, const void *b)
                return sa->plane->base.id - sb->plane->base.id;
 }
 
-/**
- * drm_atomic_helper_crtc_normalize_zpos - calculate normalized zpos values
- * @crtc: crtc with planes, which have to be considered for normalization
- * @crtc_state: new atomic state to apply
- *
- * This function checks new states of all planes assigned to given crtc and
- * calculates normalized zpos value for them. Planes are compared first by their
- * zpos values, then by plane id (if zpos equals). Plane with lowest zpos value
- * is at the bottom. The plane_state->normalized_zpos is then filled with unique
- * values from 0 to number of active planes in crtc minus one.
- *
- * RETURNS
- * Zero for success or -errno
- */
 static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
                                          struct drm_crtc_state *crtc_state)
 {
@@ -193,20 +346,25 @@ done:
 }
 
 /**
- * drm_atomic_helper_normalize_zpos - calculate normalized zpos values for all
- *                                   crtcs
+ * drm_atomic_normalize_zpos - calculate normalized zpos values for all crtcs
  * @dev: DRM device
  * @state: atomic state of DRM device
  *
  * This function calculates normalized zpos value for all modified planes in
- * the provided atomic state of DRM device. For more information, see
- * drm_atomic_helper_crtc_normalize_zpos() function.
+ * the provided atomic state of DRM device.
+ *
+ * For every CRTC this function checks new states of all planes assigned to
+ * it and calculates normalized zpos value for these planes. Planes are compared
+ * first by their zpos values, then by plane id (if zpos is equal). The plane
+ * with lowest zpos value is at the bottom. The plane_state->normalized_zpos is
+ * then filled with unique values from 0 to number of active planes in crtc
+ * minus one.
  *
  * RETURNS
  * Zero for success or -errno
  */
-int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
-                                    struct drm_atomic_state *state)
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+                             struct drm_atomic_state *state)
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
@@ -236,3 +394,4 @@ int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
        }
        return 0;
 }
+EXPORT_SYMBOL(drm_atomic_normalize_zpos);
index 255543086590610d9c80b7f90ba27a6088858756..0ee052b7c21af76f9e172075c4b5671af018d2f0 100644 (file)
 
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 
-#include <drm/drm_crtc.h>
-
-#include "drm/drmP.h"
+#include <drm/drm_bridge.h>
 
 /**
  * DOC: overview
@@ -98,11 +97,11 @@ EXPORT_SYMBOL(drm_bridge_remove);
  * @dev: DRM device
  * @bridge: bridge control structure
  *
- * called by a kms driver to link one of our encoder/bridge to the given
+ * Called by a kms driver to link one of our encoder/bridge to the given
  * bridge.
  *
  * Note that setting up links between the bridge and our encoder/bridge
- * objects needs to be handled by the kms driver itself
+ * objects needs to be handled by the kms driver itself.
  *
  * RETURNS:
  * Zero on success, error code on failure
@@ -124,6 +123,31 @@ int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
 }
 EXPORT_SYMBOL(drm_bridge_attach);
 
+/**
+ * drm_bridge_detach - deassociate given bridge from its DRM device
+ *
+ * @bridge: bridge control structure
+ *
+ * Called by a kms driver to unlink the given bridge from its DRM device.
+ *
+ * Note that tearing down links between the bridge and our encoder/bridge
+ * objects needs to be handled by the kms driver itself.
+ */
+void drm_bridge_detach(struct drm_bridge *bridge)
+{
+       if (WARN_ON(!bridge))
+               return;
+
+       if (WARN_ON(!bridge->dev))
+               return;
+
+       if (bridge->funcs->detach)
+               bridge->funcs->detach(bridge);
+
+       bridge->dev = NULL;
+}
+EXPORT_SYMBOL(drm_bridge_detach);
+
 /**
  * DOC: bridge callbacks
  *
index c3a12cd8bd0de963e410d921c25e0bb7eeba46c9..adb1dd7fde5f381afb9a0bd443f2e05312d71a5f 100644 (file)
@@ -397,7 +397,7 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
                return -EPERM;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        err = drm_addmap_core(dev, map->offset, map->size, map->type,
@@ -443,7 +443,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
        int i;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        idx = map->offset;
@@ -545,7 +545,7 @@ EXPORT_SYMBOL(drm_legacy_rmmap_locked);
 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
 {
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return;
 
        mutex_lock(&dev->struct_mutex);
@@ -558,7 +558,7 @@ void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
 {
        struct drm_map_list *r_list, *list_temp;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return;
 
        mutex_lock(&dev->struct_mutex);
@@ -595,7 +595,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
        int ret;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        mutex_lock(&dev->struct_mutex);
@@ -755,7 +755,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev,
                return -EINVAL;
        }
 
-       entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
        if (!entry->buflist) {
                mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
@@ -905,14 +905,14 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
                return -EINVAL;
        }
 
-       entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
        if (!entry->buflist) {
                mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
 
-       entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+       entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
        if (!entry->seglist) {
                kfree(entry->buflist);
                mutex_unlock(&dev->struct_mutex);
@@ -923,8 +923,9 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
        /* Keep the original pagelist until we know all the allocations
         * have succeeded
         */
-       temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
-                              sizeof(*dma->pagelist), GFP_KERNEL);
+       temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
+                                     sizeof(*dma->pagelist),
+                                     GFP_KERNEL);
        if (!temp_pagelist) {
                kfree(entry->buflist);
                kfree(entry->seglist);
@@ -1116,8 +1117,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
                return -EINVAL;
        }
 
-       entry->buflist = kzalloc(count * sizeof(*entry->buflist),
-                               GFP_KERNEL);
+       entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
        if (!entry->buflist) {
                mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
@@ -1220,7 +1220,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
        struct drm_buf_desc *request = data;
        int ret;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1266,7 +1266,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
        int i;
        int count;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1347,7 +1347,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data,
        int order;
        struct drm_buf_entry *entry;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1395,7 +1395,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
        int idx;
        struct drm_buf *buf;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1450,7 +1450,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data,
        struct drm_buf_map *request = data;
        int i;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1530,7 +1530,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data,
 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (dev->driver->dma_ioctl)
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
new file mode 100644 (file)
index 0000000..d28ffdd
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_color_mgmt.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Color management or color space adjustments is supported through a set of 5
+ * properties on the &drm_crtc object. They are set up by calling
+ * drm_crtc_enable_color_mgmt().
+ *
+ * "DEGAMMA_LUT”:
+ *     Blob property to set the degamma lookup table (LUT) mapping pixel data
+ *     from the framebuffer before it is given to the transformation matrix.
+ *     The data is interpreted as an array of struct &drm_color_lut elements.
+ *     Hardware might choose not to use the full precision of the LUT elements
+ *     nor use all the elements of the LUT (for example the hardware might
+ *     choose to interpolate between LUT[0] and LUT[4]).
+ *
+ * “DEGAMMA_LUT_SIZE”:
+ *     Unsinged range property to give the size of the lookup table to be set
+ *     on the DEGAMMA_LUT property (the size depends on the underlying
+ *     hardware). If drivers support multiple LUT sizes then they should
+ *     publish the largest size, and sub-sample smaller sized LUTs (e.g. for
+ *     split-gamma modes) appropriately.
+ *
+ * “CTM”:
+ *     Blob property to set the current transformation matrix (CTM) apply to
+ *     pixel data after the lookup through the degamma LUT and before the
+ *     lookup through the gamma LUT. The data is interpreted as a struct
+ *     &drm_color_ctm.
+ *
+ * “GAMMA_LUT”:
+ *     Blob property to set the gamma lookup table (LUT) mapping pixel data
+ *     after the transformation matrix to data sent to the connector. The
+ *     data is interpreted as an array of struct &drm_color_lut elements.
+ *     Hardware might choose not to use the full precision of the LUT elements
+ *     nor use all the elements of the LUT (for example the hardware might
+ *     choose to interpolate between LUT[0] and LUT[4]).
+ *
+ * “GAMMA_LUT_SIZE”:
+ *     Unsigned range property to give the size of the lookup table to be set
+ *     on the GAMMA_LUT property (the size depends on the underlying hardware).
+ *     If drivers support multiple LUT sizes then they should publish the
+ *     largest size, and sub-sample smaller sized LUTs (e.g. for split-gamma
+ *     modes) appropriately.
+ *
+ * There is also support for a legacy gamma table, which is set up by calling
+ * drm_mode_crtc_set_gamma_size(). Drivers which support both should use
+ * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
+ * "GAMMA_LUT" property above.
+ */
+
+/**
+ * drm_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @has_ctm: whether to attach ctm_property for CSC matrix
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction
+ * properties on a CRTC. This includes 3 degamma, csc and gamma
+ * properties that userspace can set and 2 size properties to inform
+ * the userspace of the lut sizes. Each of the properties are
+ * optional. The gamma and degamma properties are only attached if
+ * their size is not 0 and ctm_property is only attached if has_ctm is
+ * true.
+ */
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+                               uint degamma_lut_size,
+                               bool has_ctm,
+                               uint gamma_lut_size)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+
+       if (degamma_lut_size) {
+               drm_object_attach_property(&crtc->base,
+                                          config->degamma_lut_property, 0);
+               drm_object_attach_property(&crtc->base,
+                                          config->degamma_lut_size_property,
+                                          degamma_lut_size);
+       }
+
+       if (has_ctm)
+               drm_object_attach_property(&crtc->base,
+                                          config->ctm_property, 0);
+
+       if (gamma_lut_size) {
+               drm_object_attach_property(&crtc->base,
+                                          config->gamma_lut_property, 0);
+               drm_object_attach_property(&crtc->base,
+                                          config->gamma_lut_size_property,
+                                          gamma_lut_size);
+       }
+}
+EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
+
+/**
+ * drm_mode_crtc_set_gamma_size - set the gamma table size
+ * @crtc: CRTC to set the gamma table size for
+ * @gamma_size: size of the gamma table
+ *
+ * Drivers which support gamma tables should set this to the supported gamma
+ * table size when initializing the CRTC. Currently the drm core only supports a
+ * fixed gamma table size.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                int gamma_size)
+{
+       uint16_t *r_base, *g_base, *b_base;
+       int i;
+
+       crtc->gamma_size = gamma_size;
+
+       crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
+                                   GFP_KERNEL);
+       if (!crtc->gamma_store) {
+               crtc->gamma_size = 0;
+               return -ENOMEM;
+       }
+
+       r_base = crtc->gamma_store;
+       g_base = r_base + gamma_size;
+       b_base = g_base + gamma_size;
+       for (i = 0; i < gamma_size; i++) {
+               r_base[i] = i << 8;
+               g_base[i] = i << 8;
+               b_base[i] = i << 8;
+       }
+
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+/**
+ * drm_mode_gamma_set_ioctl - set the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
+ * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       drm_modeset_lock_all(dev);
+       crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+       if (!crtc) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       if (crtc->funcs->gamma_set == NULL) {
+               ret = -ENOSYS;
+               goto out;
+       }
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+
+}
+
+/**
+ * drm_mode_gamma_get_ioctl - get the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Copy the current gamma table into the storage provided. This also provides
+ * the gamma table size the driver expects, which can be used to size the
+ * allocated storage.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       drm_modeset_lock_all(dev);
+       crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+       if (!crtc) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
new file mode 100644 (file)
index 0000000..26bb78c
--- /dev/null
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * In DRM connectors are the general abstraction for display sinks, and include
+ * als fixed panels or anything else that can display pixels in some form. As
+ * opposed to all other KMS objects representing hardware (like CRTC, encoder or
+ * plane abstractions) connectors can be hotplugged and unplugged at runtime.
+ * Hence they are reference-counted using drm_connector_reference() and
+ * drm_connector_unreference().
+ *
+ * KMS driver must create, initialize, register and attach at a struct
+ * &drm_connector for each such sink. The instance is created as other KMS
+ * objects and initialized by setting the following fields.
+ *
+ * The connector is then registered with a call to drm_connector_init() with a
+ * pointer to the connector functions and a connector type, and exposed through
+ * sysfs with a call to drm_connector_register().
+ *
+ * Connectors must be attached to an encoder to be used. For devices that map
+ * connectors to encoders 1:1, the connector should be attached at
+ * initialization time with a call to drm_mode_connector_attach_encoder(). The
+ * driver must also set the struct &drm_connector encoder field to point to the
+ * attached encoder.
+ *
+ * For connectors which are not fixed (like built-in panels) the driver needs to
+ * support hotplug notifications. The simplest way to do that is by using the
+ * probe helpers, see drm_kms_helper_poll_init() for connectors which don't have
+ * hardware support for hotplug interrupts. Connectors with hardware hotplug
+ * support can instead use e.g. drm_helper_hpd_irq_event().
+ */
+
+struct drm_conn_prop_enum_list {
+       int type;
+       const char *name;
+       struct ida ida;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
+       { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+       { DRM_MODE_CONNECTOR_VGA, "VGA" },
+       { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+       { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+       { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+       { DRM_MODE_CONNECTOR_Composite, "Composite" },
+       { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+       { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+       { DRM_MODE_CONNECTOR_Component, "Component" },
+       { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+       { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+       { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+       { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+       { DRM_MODE_CONNECTOR_TV, "TV" },
+       { DRM_MODE_CONNECTOR_eDP, "eDP" },
+       { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+       { DRM_MODE_CONNECTOR_DSI, "DSI" },
+       { DRM_MODE_CONNECTOR_DPI, "DPI" },
+};
+
+void drm_connector_ida_init(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+               ida_init(&drm_connector_enum_list[i].ida);
+}
+
+void drm_connector_ida_destroy(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+               ida_destroy(&drm_connector_enum_list[i].ida);
+}
+
+/**
+ * drm_connector_get_cmdline_mode - reads the user's cmdline mode
+ * @connector: connector to quwery
+ *
+ * The kernel supports per-connector configuration of its consoles through
+ * use of the video= parameter. This function parses that option and
+ * extracts the user's specified mode (or enable/disable status) for a
+ * particular connector. This is typically only used during the early fbdev
+ * setup.
+ */
+static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
+{
+       struct drm_cmdline_mode *mode = &connector->cmdline_mode;
+       char *option = NULL;
+
+       if (fb_get_options(connector->name, &option))
+               return;
+
+       if (!drm_mode_parse_command_line_for_connector(option,
+                                                      connector,
+                                                      mode))
+               return;
+
+       if (mode->force) {
+               const char *s;
+
+               switch (mode->force) {
+               case DRM_FORCE_OFF:
+                       s = "OFF";
+                       break;
+               case DRM_FORCE_ON_DIGITAL:
+                       s = "ON - dig";
+                       break;
+               default:
+               case DRM_FORCE_ON:
+                       s = "ON";
+                       break;
+               }
+
+               DRM_INFO("forcing %s connector %s\n", connector->name, s);
+               connector->force = mode->force;
+       }
+
+       DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+                     connector->name,
+                     mode->xres, mode->yres,
+                     mode->refresh_specified ? mode->refresh : 60,
+                     mode->rb ? " reduced blanking" : "",
+                     mode->margins ? " with margins" : "",
+                     mode->interlace ?  " interlaced" : "");
+}
+
+static void drm_connector_free(struct kref *kref)
+{
+       struct drm_connector *connector =
+               container_of(kref, struct drm_connector, base.refcount);
+       struct drm_device *dev = connector->dev;
+
+       drm_mode_object_unregister(dev, &connector->base);
+       connector->funcs->destroy(connector);
+}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+                      struct drm_connector *connector,
+                      const struct drm_connector_funcs *funcs,
+                      int connector_type)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       int ret;
+       struct ida *connector_ida =
+               &drm_connector_enum_list[connector_type].ida;
+
+       drm_modeset_lock_all(dev);
+
+       ret = drm_mode_object_get_reg(dev, &connector->base,
+                                     DRM_MODE_OBJECT_CONNECTOR,
+                                     false, drm_connector_free);
+       if (ret)
+               goto out_unlock;
+
+       connector->base.properties = &connector->properties;
+       connector->dev = dev;
+       connector->funcs = funcs;
+
+       ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+       if (ret < 0)
+               goto out_put;
+       connector->index = ret;
+       ret = 0;
+
+       connector->connector_type = connector_type;
+       connector->connector_type_id =
+               ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+       if (connector->connector_type_id < 0) {
+               ret = connector->connector_type_id;
+               goto out_put_id;
+       }
+       connector->name =
+               kasprintf(GFP_KERNEL, "%s-%d",
+                         drm_connector_enum_list[connector_type].name,
+                         connector->connector_type_id);
+       if (!connector->name) {
+               ret = -ENOMEM;
+               goto out_put_type_id;
+       }
+
+       INIT_LIST_HEAD(&connector->probed_modes);
+       INIT_LIST_HEAD(&connector->modes);
+       connector->edid_blob_ptr = NULL;
+       connector->status = connector_status_unknown;
+
+       drm_connector_get_cmdline_mode(connector);
+
+       /* We should add connectors at the end to avoid upsetting the connector
+        * index too much. */
+       list_add_tail(&connector->head, &config->connector_list);
+       config->num_connector++;
+
+       if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+               drm_object_attach_property(&connector->base,
+                                             config->edid_property,
+                                             0);
+
+       drm_object_attach_property(&connector->base,
+                                     config->dpms_property, 0);
+
+       if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+               drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
+       }
+
+       connector->debugfs_entry = NULL;
+out_put_type_id:
+       if (ret)
+               ida_remove(connector_ida, connector->connector_type_id);
+out_put_id:
+       if (ret)
+               ida_remove(&config->connector_ida, connector->index);
+out_put:
+       if (ret)
+               drm_mode_object_unregister(dev, &connector->base);
+
+out_unlock:
+       drm_modeset_unlock_all(dev);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * @connector: connector to attach
+ * @encoder: encoder to attach @connector to
+ *
+ * This function links up a connector to an encoder. Note that the routing
+ * restrictions between encoders and crtcs are exposed to userspace through the
+ * possible_clones and possible_crtcs bitmasks.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+                                     struct drm_encoder *encoder)
+{
+       int i;
+
+       /*
+        * In the past, drivers have attempted to model the static association
+        * of connector to encoder in simple connector/encoder devices using a
+        * direct assignment of connector->encoder = encoder. This connection
+        * is a logical one and the responsibility of the core, so drivers are
+        * expected not to mess with this.
+        *
+        * Note that the error return should've been enough here, but a large
+        * majority of drivers ignores the return value, so add in a big WARN
+        * to get people's attention.
+        */
+       if (WARN_ON(connector->encoder))
+               return -EINVAL;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0) {
+                       connector->encoder_ids[i] = encoder->base.id;
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+static void drm_mode_remove(struct drm_connector *connector,
+                           struct drm_display_mode *mode)
+{
+       list_del(&mode->head);
+       drm_mode_destroy(connector->dev, mode);
+}
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+
+       /* The connector should have been removed from userspace long before
+        * it is finally destroyed.
+        */
+       if (WARN_ON(connector->registered))
+               drm_connector_unregister(connector);
+
+       if (connector->tile_group) {
+               drm_mode_put_tile_group(dev, connector->tile_group);
+               connector->tile_group = NULL;
+       }
+
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->modes, head)
+               drm_mode_remove(connector, mode);
+
+       ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
+                  connector->connector_type_id);
+
+       ida_remove(&dev->mode_config.connector_ida,
+                  connector->index);
+
+       kfree(connector->display_info.bus_formats);
+       drm_mode_object_unregister(dev, &connector->base);
+       kfree(connector->name);
+       connector->name = NULL;
+       list_del(&connector->head);
+       dev->mode_config.num_connector--;
+
+       WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
+       if (connector->state && connector->funcs->atomic_destroy_state)
+               connector->funcs->atomic_destroy_state(connector,
+                                                      connector->state);
+
+       memset(connector, 0, sizeof(*connector));
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+/**
+ * drm_connector_register - register a connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_register(struct drm_connector *connector)
+{
+       int ret;
+
+       if (connector->registered)
+               return 0;
+
+       ret = drm_sysfs_connector_add(connector);
+       if (ret)
+               return ret;
+
+       ret = drm_debugfs_connector_add(connector);
+       if (ret) {
+               goto err_sysfs;
+       }
+
+       if (connector->funcs->late_register) {
+               ret = connector->funcs->late_register(connector);
+               if (ret)
+                       goto err_debugfs;
+       }
+
+       drm_mode_object_register(connector->dev, &connector->base);
+
+       connector->registered = true;
+       return 0;
+
+err_debugfs:
+       drm_debugfs_connector_remove(connector);
+err_sysfs:
+       drm_sysfs_connector_remove(connector);
+       return ret;
+}
+EXPORT_SYMBOL(drm_connector_register);
+
+/**
+ * drm_connector_unregister - unregister a connector
+ * @connector: the connector to unregister
+ *
+ * Unregister userspace interfaces for a connector
+ */
+void drm_connector_unregister(struct drm_connector *connector)
+{
+       if (!connector->registered)
+               return;
+
+       if (connector->funcs->early_unregister)
+               connector->funcs->early_unregister(connector);
+
+       drm_sysfs_connector_remove(connector);
+       drm_debugfs_connector_remove(connector);
+
+       connector->registered = false;
+}
+EXPORT_SYMBOL(drm_connector_unregister);
+
+void drm_connector_unregister_all(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+
+       /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               drm_connector_unregister(connector);
+}
+
+int drm_connector_register_all(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+       int ret;
+
+       /* FIXME: taking the mode config mutex ends up in a clash with
+        * fbcon/backlight registration */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               ret = drm_connector_register(connector);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+
+err:
+       mutex_unlock(&dev->mode_config.mutex);
+       drm_connector_unregister_all(dev);
+       return ret;
+}
+
+/**
+ * drm_get_connector_status_name - return a string for connector status
+ * @status: connector status to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+       if (status == connector_status_connected)
+               return "connected";
+       else if (status == connector_status_disconnected)
+               return "disconnected";
+       else
+               return "unknown";
+}
+EXPORT_SYMBOL(drm_get_connector_status_name);
+
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
+       { SubPixelUnknown, "Unknown" },
+       { SubPixelHorizontalRGB, "Horizontal RGB" },
+       { SubPixelHorizontalBGR, "Horizontal BGR" },
+       { SubPixelVerticalRGB, "Vertical RGB" },
+       { SubPixelVerticalBGR, "Vertical BGR" },
+       { SubPixelNone, "None" },
+};
+
+/**
+ * drm_get_subpixel_order_name - return a string for a given subpixel enum
+ * @order: enum of subpixel_order
+ *
+ * Note you could abuse this and return something out of bounds, but that
+ * would be a caller error.  No unscrubbed user data should make it here.
+ */
+const char *drm_get_subpixel_order_name(enum subpixel_order order)
+{
+       return drm_subpixel_enum_list[order].name;
+}
+EXPORT_SYMBOL(drm_get_subpixel_order_name);
+
+static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
+       { DRM_MODE_DPMS_ON, "On" },
+       { DRM_MODE_DPMS_STANDBY, "Standby" },
+       { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+       { DRM_MODE_DPMS_OFF, "Off" }
+};
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/**
+ * drm_display_info_set_bus_formats - set the supported bus formats
+ * @info: display info to store bus formats in
+ * @formats: array containing the supported bus formats
+ * @num_formats: the number of entries in the fmts array
+ *
+ * Store the supported bus formats in display info structure.
+ * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
+ * a full list of available formats.
+ */
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+                                    const u32 *formats,
+                                    unsigned int num_formats)
+{
+       u32 *fmts = NULL;
+
+       if (!formats && num_formats)
+               return -EINVAL;
+
+       if (formats && num_formats) {
+               fmts = kmemdup(formats, sizeof(*formats) * num_formats,
+                              GFP_KERNEL);
+               if (!fmts)
+                       return -ENOMEM;
+       }
+
+       kfree(info->bus_formats);
+       info->bus_formats = fmts;
+       info->num_bus_formats = num_formats;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_display_info_set_bus_formats);
+
+/* Optional connector properties. */
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
+       { DRM_MODE_SCALE_NONE, "None" },
+       { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+       { DRM_MODE_SCALE_CENTER, "Center" },
+       { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
+       { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
+       { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
+       { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+};
+
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+                drm_dvi_i_subconnector_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+                drm_tv_subconnector_enum_list)
+
+int drm_connector_create_standard_properties(struct drm_device *dev)
+{
+       struct drm_property *prop;
+
+       prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+                                  DRM_MODE_PROP_IMMUTABLE,
+                                  "EDID", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.edid_property = prop;
+
+       prop = drm_property_create_enum(dev, 0,
+                                  "DPMS", drm_dpms_enum_list,
+                                  ARRAY_SIZE(drm_dpms_enum_list));
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.dpms_property = prop;
+
+       prop = drm_property_create(dev,
+                                  DRM_MODE_PROP_BLOB |
+                                  DRM_MODE_PROP_IMMUTABLE,
+                                  "PATH", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.path_property = prop;
+
+       prop = drm_property_create(dev,
+                                  DRM_MODE_PROP_BLOB |
+                                  DRM_MODE_PROP_IMMUTABLE,
+                                  "TILE", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.tile_property = prop;
+
+       return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+       struct drm_property *dvi_i_selector;
+       struct drm_property *dvi_i_subconnector;
+
+       if (dev->mode_config.dvi_i_select_subconnector_property)
+               return 0;
+
+       dvi_i_selector =
+               drm_property_create_enum(dev, 0,
+                                   "select subconnector",
+                                   drm_dvi_i_select_enum_list,
+                                   ARRAY_SIZE(drm_dvi_i_select_enum_list));
+       dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+       dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+                                   "subconnector",
+                                   drm_dvi_i_subconnector_enum_list,
+                                   ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+       dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.  Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev,
+                                 unsigned int num_modes,
+                                 const char * const modes[])
+{
+       struct drm_property *tv_selector;
+       struct drm_property *tv_subconnector;
+       unsigned int i;
+
+       if (dev->mode_config.tv_select_subconnector_property)
+               return 0;
+
+       /*
+        * Basic connector properties
+        */
+       tv_selector = drm_property_create_enum(dev, 0,
+                                         "select subconnector",
+                                         drm_tv_select_enum_list,
+                                         ARRAY_SIZE(drm_tv_select_enum_list));
+       if (!tv_selector)
+               goto nomem;
+
+       dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+       tv_subconnector =
+               drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+                                   "subconnector",
+                                   drm_tv_subconnector_enum_list,
+                                   ARRAY_SIZE(drm_tv_subconnector_enum_list));
+       if (!tv_subconnector)
+               goto nomem;
+       dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+       /*
+        * Other, TV specific properties: margins & TV modes.
+        */
+       dev->mode_config.tv_left_margin_property =
+               drm_property_create_range(dev, 0, "left margin", 0, 100);
+       if (!dev->mode_config.tv_left_margin_property)
+               goto nomem;
+
+       dev->mode_config.tv_right_margin_property =
+               drm_property_create_range(dev, 0, "right margin", 0, 100);
+       if (!dev->mode_config.tv_right_margin_property)
+               goto nomem;
+
+       dev->mode_config.tv_top_margin_property =
+               drm_property_create_range(dev, 0, "top margin", 0, 100);
+       if (!dev->mode_config.tv_top_margin_property)
+               goto nomem;
+
+       dev->mode_config.tv_bottom_margin_property =
+               drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+       if (!dev->mode_config.tv_bottom_margin_property)
+               goto nomem;
+
+       dev->mode_config.tv_mode_property =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "mode", num_modes);
+       if (!dev->mode_config.tv_mode_property)
+               goto nomem;
+
+       for (i = 0; i < num_modes; i++)
+               drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+                                     i, modes[i]);
+
+       dev->mode_config.tv_brightness_property =
+               drm_property_create_range(dev, 0, "brightness", 0, 100);
+       if (!dev->mode_config.tv_brightness_property)
+               goto nomem;
+
+       dev->mode_config.tv_contrast_property =
+               drm_property_create_range(dev, 0, "contrast", 0, 100);
+       if (!dev->mode_config.tv_contrast_property)
+               goto nomem;
+
+       dev->mode_config.tv_flicker_reduction_property =
+               drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+       if (!dev->mode_config.tv_flicker_reduction_property)
+               goto nomem;
+
+       dev->mode_config.tv_overscan_property =
+               drm_property_create_range(dev, 0, "overscan", 0, 100);
+       if (!dev->mode_config.tv_overscan_property)
+               goto nomem;
+
+       dev->mode_config.tv_saturation_property =
+               drm_property_create_range(dev, 0, "saturation", 0, 100);
+       if (!dev->mode_config.tv_saturation_property)
+               goto nomem;
+
+       dev->mode_config.tv_hue_property =
+               drm_property_create_range(dev, 0, "hue", 0, 100);
+       if (!dev->mode_config.tv_hue_property)
+               goto nomem;
+
+       return 0;
+nomem:
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+       struct drm_property *scaling_mode;
+
+       if (dev->mode_config.scaling_mode_property)
+               return 0;
+
+       scaling_mode =
+               drm_property_create_enum(dev, 0, "scaling mode",
+                               drm_scaling_mode_enum_list,
+                                   ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+       dev->mode_config.scaling_mode_property = scaling_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_aspect_ratio_property - create aspect ratio property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
+{
+       if (dev->mode_config.aspect_ratio_property)
+               return 0;
+
+       dev->mode_config.aspect_ratio_property =
+               drm_property_create_enum(dev, 0, "aspect ratio",
+                               drm_aspect_ratio_enum_list,
+                               ARRAY_SIZE(drm_aspect_ratio_enum_list));
+
+       if (dev->mode_config.aspect_ratio_property == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
+
+/**
+ * drm_mode_create_suggested_offset_properties - create suggests offset properties
+ * @dev: DRM device
+ *
+ * Create the the suggested x/y offset property for connectors.
+ */
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
+{
+       if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
+               return 0;
+
+       dev->mode_config.suggested_x_property =
+               drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
+
+       dev->mode_config.suggested_y_property =
+               drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
+
+       if (dev->mode_config.suggested_x_property == NULL ||
+           dev->mode_config.suggested_y_property == NULL)
+               return -ENOMEM;
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
+
+/**
+ * drm_mode_connector_set_path_property - set tile property on connector
+ * @connector: connector to set property on.
+ * @path: path to use for property; must not be NULL.
+ *
+ * This creates a property to expose to userspace to specify a
+ * connector path. This is mainly used for DisplayPort MST where
+ * connectors have a topology and we want to allow userspace to give
+ * them more meaningful names.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+                                        const char *path)
+{
+       struct drm_device *dev = connector->dev;
+       int ret;
+
+       ret = drm_property_replace_global_blob(dev,
+                                              &connector->path_blob_ptr,
+                                              strlen(path) + 1,
+                                              path,
+                                              &connector->base,
+                                              dev->mode_config.path_property);
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+
+/**
+ * drm_mode_connector_set_tile_property - set tile property on connector
+ * @connector: connector to set property on.
+ *
+ * This looks up the tile information for a connector, and creates a
+ * property for userspace to parse if it exists. The property is of
+ * the form of 8 integers using ':' as a separator.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       char tile[256];
+       int ret;
+
+       if (!connector->has_tile) {
+               ret  = drm_property_replace_global_blob(dev,
+                                                       &connector->tile_blob_ptr,
+                                                       0,
+                                                       NULL,
+                                                       &connector->base,
+                                                       dev->mode_config.tile_property);
+               return ret;
+       }
+
+       snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
+                connector->tile_group->id, connector->tile_is_single_monitor,
+                connector->num_h_tile, connector->num_v_tile,
+                connector->tile_h_loc, connector->tile_v_loc,
+                connector->tile_h_size, connector->tile_v_size);
+
+       ret = drm_property_replace_global_blob(dev,
+                                              &connector->tile_blob_ptr,
+                                              strlen(tile) + 1,
+                                              tile,
+                                              &connector->base,
+                                              dev->mode_config.tile_property);
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+
+/**
+ * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * @connector: drm connector
+ * @edid: new value of the edid property
+ *
+ * This function creates a new blob modeset object and assigns its id to the
+ * connector's edid property.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+                                           const struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       size_t size = 0;
+       int ret;
+
+       /* ignore requests to set edid when overridden */
+       if (connector->override_edid)
+               return 0;
+
+       if (edid)
+               size = EDID_LENGTH * (1 + edid->extensions);
+
+       ret = drm_property_replace_global_blob(dev,
+                                              &connector->edid_blob_ptr,
+                                              size,
+                                              edid,
+                                              &connector->base,
+                                              dev->mode_config.edid_property);
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+                                   struct drm_property *property,
+                                   uint64_t value)
+{
+       int ret = -EINVAL;
+       struct drm_connector *connector = obj_to_connector(obj);
+
+       /* Do DPMS ourselves */
+       if (property == connector->dev->mode_config.dpms_property) {
+               ret = (*connector->funcs->dpms)(connector, (int)value);
+       } else if (connector->funcs->set_property)
+               ret = connector->funcs->set_property(connector, property, value);
+
+       /* store the property value if successful */
+       if (!ret)
+               drm_object_property_set_value(&connector->base, property, value);
+       return ret;
+}
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+                                      void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_connector_set_property *conn_set_prop = data;
+       struct drm_mode_obj_set_property obj_set_prop = {
+               .value = conn_set_prop->value,
+               .prop_id = conn_set_prop->prop_id,
+               .obj_id = conn_set_prop->connector_id,
+               .obj_type = DRM_MODE_OBJECT_CONNECTOR
+       };
+
+       /* It does all the locking and checking we need */
+       return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
+{
+       /* For atomic drivers only state objects are synchronously updated and
+        * protected by modeset locks, so check those first. */
+       if (connector->state)
+               return connector->state->best_encoder;
+       return connector->encoder;
+}
+
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+                                        const struct drm_file *file_priv)
+{
+       /*
+        * If user-space hasn't configured the driver to expose the stereo 3D
+        * modes, don't expose them.
+        */
+       if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+               return false;
+
+       return true;
+}
+
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_get_connector *out_resp = data;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct drm_display_mode *mode;
+       int mode_count = 0;
+       int encoders_count = 0;
+       int ret = 0;
+       int copied = 0;
+       int i;
+       struct drm_mode_modeinfo u_mode;
+       struct drm_mode_modeinfo __user *mode_ptr;
+       uint32_t __user *encoder_ptr;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       connector = drm_connector_lookup(dev, out_resp->connector_id);
+       if (!connector) {
+               ret = -ENOENT;
+               goto out_unlock;
+       }
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+               if (connector->encoder_ids[i] != 0)
+                       encoders_count++;
+
+       if (out_resp->count_modes == 0) {
+               connector->funcs->fill_modes(connector,
+                                            dev->mode_config.max_width,
+                                            dev->mode_config.max_height);
+       }
+
+       /* delayed so we get modes regardless of pre-fill_modes state */
+       list_for_each_entry(mode, &connector->modes, head)
+               if (drm_mode_expose_to_userspace(mode, file_priv))
+                       mode_count++;
+
+       out_resp->connector_id = connector->base.id;
+       out_resp->connector_type = connector->connector_type;
+       out_resp->connector_type_id = connector->connector_type_id;
+       out_resp->mm_width = connector->display_info.width_mm;
+       out_resp->mm_height = connector->display_info.height_mm;
+       out_resp->subpixel = connector->display_info.subpixel_order;
+       out_resp->connection = connector->status;
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       encoder = drm_connector_get_encoder(connector);
+       if (encoder)
+               out_resp->encoder_id = encoder->base.id;
+       else
+               out_resp->encoder_id = 0;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if ((out_resp->count_modes >= mode_count) && mode_count) {
+               copied = 0;
+               mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
+               list_for_each_entry(mode, &connector->modes, head) {
+                       if (!drm_mode_expose_to_userspace(mode, file_priv))
+                               continue;
+
+                       drm_mode_convert_to_umode(&u_mode, mode);
+                       if (copy_to_user(mode_ptr + copied,
+                                        &u_mode, sizeof(u_mode))) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       out_resp->count_modes = mode_count;
+
+       ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+                       (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+                       (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+                       &out_resp->count_props);
+       if (ret)
+               goto out;
+
+       if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+               copied = 0;
+               encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+                       if (connector->encoder_ids[i] != 0) {
+                               if (put_user(connector->encoder_ids[i],
+                                            encoder_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_encoders = encoders_count;
+
+out:
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+       drm_connector_unreference(connector);
+out_unlock:
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return ret;
+}
+
index 192a5f9eeb74213610fdf8f4a10529fdf88555ea..3c4000facb360a012f78b12599d918a276145ca6 100644 (file)
@@ -54,7 +54,7 @@ struct drm_ctx_list {
 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return;
 
        mutex_lock(&dev->struct_mutex);
@@ -92,7 +92,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
 void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 {
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return;
 
        idr_init(&dev->ctx_idr);
@@ -109,7 +109,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
 {
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return;
 
        mutex_lock(&dev->struct_mutex);
@@ -131,7 +131,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
        struct drm_ctx_list *pos, *tmp;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return;
 
        mutex_lock(&dev->ctxlist_mutex);
@@ -177,7 +177,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
        struct drm_map_list *_entry;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        mutex_lock(&dev->struct_mutex);
@@ -225,7 +225,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
        struct drm_map_list *r_list = NULL;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        mutex_lock(&dev->struct_mutex);
@@ -329,7 +329,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
        int i;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (res->count >= DRM_RESERVED_CONTEXTS) {
@@ -363,7 +363,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
        struct drm_ctx *ctx = data;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        ctx->handle = drm_legacy_ctxbitmap_next(dev);
@@ -410,7 +410,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
        struct drm_ctx *ctx = data;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        /* This is 0, because we don't handle any context flags */
@@ -436,7 +436,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
        struct drm_ctx *ctx = data;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        DRM_DEBUG("%d\n", ctx->handle);
@@ -460,7 +460,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
        struct drm_ctx *ctx = data;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        DRM_DEBUG("%d\n", ctx->handle);
@@ -486,7 +486,7 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
        struct drm_ctx *ctx = data;
 
        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-           drm_core_check_feature(dev, DRIVER_MODESET))
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        DRM_DEBUG("%d\n", ctx->handle);
index f1d9f0569d7f86514773e517bafba9326de82a45..2d7bedf286473c20b0da3ba7bd5a3c945f0c7be6 100644 (file)
 #include <drm/drm_modeset_lock.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_auth.h>
+#include <drm/drm_framebuffer.h>
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
 
-static struct drm_framebuffer *
-internal_framebuffer_create(struct drm_device *dev,
-                           const struct drm_mode_fb_cmd2 *r,
-                           struct drm_file *file_priv);
-
-/* Avoid boilerplate.  I'm tired of typing. */
-#define DRM_ENUM_NAME_FN(fnname, list)                         \
-       const char *fnname(int val)                             \
-       {                                                       \
-               int i;                                          \
-               for (i = 0; i < ARRAY_SIZE(list); i++) {        \
-                       if (list[i].type == val)                \
-                               return list[i].name;            \
-               }                                               \
-               return "(unknown)";                             \
-       }
-
 /*
  * Global properties
  */
-static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
-       { DRM_MODE_DPMS_ON, "On" },
-       { DRM_MODE_DPMS_STANDBY, "Standby" },
-       { DRM_MODE_DPMS_SUSPEND, "Suspend" },
-       { DRM_MODE_DPMS_OFF, "Off" }
-};
-
-DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-
 static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
        { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
        { DRM_PLANE_TYPE_PRIMARY, "Primary" },
@@ -82,320 +57,6 @@ static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
 /*
  * Optional properties
  */
-static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
-       { DRM_MODE_SCALE_NONE, "None" },
-       { DRM_MODE_SCALE_FULLSCREEN, "Full" },
-       { DRM_MODE_SCALE_CENTER, "Center" },
-       { DRM_MODE_SCALE_ASPECT, "Full aspect" },
-};
-
-static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
-       { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
-       { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
-       { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
-};
-
-/*
- * Non-global properties, but "required" for certain connectors.
- */
-static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
-       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
-       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
-       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-
-static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
-       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
-       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
-       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
-                drm_dvi_i_subconnector_enum_list)
-
-static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
-       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
-       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
-       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
-       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
-       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-
-static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
-       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
-       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
-       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
-       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
-       { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
-                drm_tv_subconnector_enum_list)
-
-static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
-       { DRM_MODE_DIRTY_OFF,      "Off"      },
-       { DRM_MODE_DIRTY_ON,       "On"       },
-       { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
-};
-
-struct drm_conn_prop_enum_list {
-       int type;
-       const char *name;
-       struct ida ida;
-};
-
-/*
- * Connector and encoder types.
- */
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
-       { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
-       { DRM_MODE_CONNECTOR_VGA, "VGA" },
-       { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
-       { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
-       { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
-       { DRM_MODE_CONNECTOR_Composite, "Composite" },
-       { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
-       { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
-       { DRM_MODE_CONNECTOR_Component, "Component" },
-       { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
-       { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
-       { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
-       { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
-       { DRM_MODE_CONNECTOR_TV, "TV" },
-       { DRM_MODE_CONNECTOR_eDP, "eDP" },
-       { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
-       { DRM_MODE_CONNECTOR_DSI, "DSI" },
-       { DRM_MODE_CONNECTOR_DPI, "DPI" },
-};
-
-static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
-       { DRM_MODE_ENCODER_NONE, "None" },
-       { DRM_MODE_ENCODER_DAC, "DAC" },
-       { DRM_MODE_ENCODER_TMDS, "TMDS" },
-       { DRM_MODE_ENCODER_LVDS, "LVDS" },
-       { DRM_MODE_ENCODER_TVDAC, "TV" },
-       { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
-       { DRM_MODE_ENCODER_DSI, "DSI" },
-       { DRM_MODE_ENCODER_DPMST, "DP MST" },
-       { DRM_MODE_ENCODER_DPI, "DPI" },
-};
-
-static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
-       { SubPixelUnknown, "Unknown" },
-       { SubPixelHorizontalRGB, "Horizontal RGB" },
-       { SubPixelHorizontalBGR, "Horizontal BGR" },
-       { SubPixelVerticalRGB, "Vertical RGB" },
-       { SubPixelVerticalBGR, "Vertical BGR" },
-       { SubPixelNone, "None" },
-};
-
-void drm_connector_ida_init(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
-               ida_init(&drm_connector_enum_list[i].ida);
-}
-
-void drm_connector_ida_destroy(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
-               ida_destroy(&drm_connector_enum_list[i].ida);
-}
-
-/**
- * drm_get_connector_status_name - return a string for connector status
- * @status: connector status to compute name of
- *
- * In contrast to the other drm_get_*_name functions this one here returns a
- * const pointer and hence is threadsafe.
- */
-const char *drm_get_connector_status_name(enum drm_connector_status status)
-{
-       if (status == connector_status_connected)
-               return "connected";
-       else if (status == connector_status_disconnected)
-               return "disconnected";
-       else
-               return "unknown";
-}
-EXPORT_SYMBOL(drm_get_connector_status_name);
-
-/**
- * drm_get_subpixel_order_name - return a string for a given subpixel enum
- * @order: enum of subpixel_order
- *
- * Note you could abuse this and return something out of bounds, but that
- * would be a caller error.  No unscrubbed user data should make it here.
- */
-const char *drm_get_subpixel_order_name(enum subpixel_order order)
-{
-       return drm_subpixel_enum_list[order].name;
-}
-EXPORT_SYMBOL(drm_get_subpixel_order_name);
-
-/*
- * Internal function to assign a slot in the object idr and optionally
- * register the object into the idr.
- */
-static int drm_mode_object_get_reg(struct drm_device *dev,
-                                  struct drm_mode_object *obj,
-                                  uint32_t obj_type,
-                                  bool register_obj,
-                                  void (*obj_free_cb)(struct kref *kref))
-{
-       int ret;
-
-       mutex_lock(&dev->mode_config.idr_mutex);
-       ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
-       if (ret >= 0) {
-               /*
-                * Set up the object linking under the protection of the idr
-                * lock so that other users can't see inconsistent state.
-                */
-               obj->id = ret;
-               obj->type = obj_type;
-               if (obj_free_cb) {
-                       obj->free_cb = obj_free_cb;
-                       kref_init(&obj->refcount);
-               }
-       }
-       mutex_unlock(&dev->mode_config.idr_mutex);
-
-       return ret < 0 ? ret : 0;
-}
-
-/**
- * drm_mode_object_get - allocate a new modeset identifier
- * @dev: DRM device
- * @obj: object pointer, used to generate unique ID
- * @obj_type: object type
- *
- * Create a unique identifier based on @ptr in @dev's identifier space.  Used
- * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
- * modeset identifiers are _not_ reference counted. Hence don't use this for
- * reference counted modeset objects like framebuffers.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_mode_object_get(struct drm_device *dev,
-                       struct drm_mode_object *obj, uint32_t obj_type)
-{
-       return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
-}
-
-static void drm_mode_object_register(struct drm_device *dev,
-                                    struct drm_mode_object *obj)
-{
-       mutex_lock(&dev->mode_config.idr_mutex);
-       idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
-       mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-/**
- * drm_mode_object_unregister - free a modeset identifer
- * @dev: DRM device
- * @object: object to free
- *
- * Free @id from @dev's unique identifier pool.
- * This function can be called multiple times, and guards against
- * multiple removals.
- * These modeset identifiers are _not_ reference counted. Hence don't use this
- * for reference counted modeset objects like framebuffers.
- */
-void drm_mode_object_unregister(struct drm_device *dev,
-                        struct drm_mode_object *object)
-{
-       mutex_lock(&dev->mode_config.idr_mutex);
-       if (object->id) {
-               idr_remove(&dev->mode_config.crtc_idr, object->id);
-               object->id = 0;
-       }
-       mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-static struct drm_mode_object *_object_find(struct drm_device *dev,
-               uint32_t id, uint32_t type)
-{
-       struct drm_mode_object *obj = NULL;
-
-       mutex_lock(&dev->mode_config.idr_mutex);
-       obj = idr_find(&dev->mode_config.crtc_idr, id);
-       if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
-               obj = NULL;
-       if (obj && obj->id != id)
-               obj = NULL;
-
-       if (obj && obj->free_cb) {
-               if (!kref_get_unless_zero(&obj->refcount))
-                       obj = NULL;
-       }
-       mutex_unlock(&dev->mode_config.idr_mutex);
-
-       return obj;
-}
-
-/**
- * drm_mode_object_find - look up a drm object with static lifetime
- * @dev: drm device
- * @id: id of the mode object
- * @type: type of the mode object
- *
- * This function is used to look up a modeset object. It will acquire a
- * reference for reference counted objects. This reference must be dropped again
- * by callind drm_mode_object_unreference().
- */
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
-               uint32_t id, uint32_t type)
-{
-       struct drm_mode_object *obj = NULL;
-
-       obj = _object_find(dev, id, type);
-       return obj;
-}
-EXPORT_SYMBOL(drm_mode_object_find);
-
-/**
- * drm_mode_object_unreference - decr the object refcnt
- * @obj: mode_object
- *
- * This functions decrements the object's refcount if it is a refcounted modeset
- * object. It is a no-op on any other object. This is used to drop references
- * acquired with drm_mode_object_reference().
- */
-void drm_mode_object_unreference(struct drm_mode_object *obj)
-{
-       if (obj->free_cb) {
-               DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
-               kref_put(&obj->refcount, obj->free_cb);
-       }
-}
-EXPORT_SYMBOL(drm_mode_object_unreference);
-
-/**
- * drm_mode_object_reference - incr the object refcnt
- * @obj: mode_object
- *
- * This functions increments the object's refcount if it is a refcounted modeset
- * object. It is a no-op on any other object. References should be dropped again
- * by calling drm_mode_object_unreference().
- */
-void drm_mode_object_reference(struct drm_mode_object *obj)
-{
-       if (obj->free_cb) {
-               DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
-               kref_get(&obj->refcount);
-       }
-}
-EXPORT_SYMBOL(drm_mode_object_reference);
-
 /**
  * drm_crtc_force_disable - Forcibly turn off a CRTC
  * @crtc: CRTC to turn off
@@ -441,199 +102,6 @@ out:
 }
 EXPORT_SYMBOL(drm_crtc_force_disable_all);
 
-static void drm_framebuffer_free(struct kref *kref)
-{
-       struct drm_framebuffer *fb =
-                       container_of(kref, struct drm_framebuffer, base.refcount);
-       struct drm_device *dev = fb->dev;
-
-       /*
-        * The lookup idr holds a weak reference, which has not necessarily been
-        * removed at this point. Check for that.
-        */
-       drm_mode_object_unregister(dev, &fb->base);
-
-       fb->funcs->destroy(fb);
-}
-
-/**
- * drm_framebuffer_init - initialize a framebuffer
- * @dev: DRM device
- * @fb: framebuffer to be initialized
- * @funcs: ... with these functions
- *
- * Allocates an ID for the framebuffer's parent mode object, sets its mode
- * functions & device file and adds it to the master fd list.
- *
- * IMPORTANT:
- * This functions publishes the fb and makes it available for concurrent access
- * by other users. Which means by this point the fb _must_ be fully set up -
- * since all the fb attributes are invariant over its lifetime, no further
- * locking but only correct reference counting is required.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
-                        const struct drm_framebuffer_funcs *funcs)
-{
-       int ret;
-
-       INIT_LIST_HEAD(&fb->filp_head);
-       fb->dev = dev;
-       fb->funcs = funcs;
-
-       ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
-                                     false, drm_framebuffer_free);
-       if (ret)
-               goto out;
-
-       mutex_lock(&dev->mode_config.fb_lock);
-       dev->mode_config.num_fb++;
-       list_add(&fb->head, &dev->mode_config.fb_list);
-       mutex_unlock(&dev->mode_config.fb_lock);
-
-       drm_mode_object_register(dev, &fb->base);
-out:
-       return ret;
-}
-EXPORT_SYMBOL(drm_framebuffer_init);
-
-/**
- * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
- * @dev: drm device
- * @id: id of the fb object
- *
- * If successful, this grabs an additional reference to the framebuffer -
- * callers need to make sure to eventually unreference the returned framebuffer
- * again, using @drm_framebuffer_unreference.
- */
-struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
-                                              uint32_t id)
-{
-       struct drm_mode_object *obj;
-       struct drm_framebuffer *fb = NULL;
-
-       obj = _object_find(dev, id, DRM_MODE_OBJECT_FB);
-       if (obj)
-               fb = obj_to_fb(obj);
-       return fb;
-}
-EXPORT_SYMBOL(drm_framebuffer_lookup);
-
-/**
- * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
- * @fb: fb to unregister
- *
- * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
- * those used for fbdev. Note that the caller must hold a reference of it's own,
- * i.e. the object may not be destroyed through this call (since it'll lead to a
- * locking inversion).
- */
-void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
-{
-       struct drm_device *dev;
-
-       if (!fb)
-               return;
-
-       dev = fb->dev;
-
-       /* Mark fb as reaped and drop idr ref. */
-       drm_mode_object_unregister(dev, &fb->base);
-}
-EXPORT_SYMBOL(drm_framebuffer_unregister_private);
-
-/**
- * drm_framebuffer_cleanup - remove a framebuffer object
- * @fb: framebuffer to remove
- *
- * Cleanup framebuffer. This function is intended to be used from the drivers
- * ->destroy callback. It can also be used to clean up driver private
- * framebuffers embedded into a larger structure.
- *
- * Note that this function does not remove the fb from active usuage - if it is
- * still used anywhere, hilarity can ensue since userspace could call getfb on
- * the id and get back -EINVAL. Obviously no concern at driver unload time.
- *
- * Also, the framebuffer will not be removed from the lookup idr - for
- * user-created framebuffers this will happen in in the rmfb ioctl. For
- * driver-private objects (e.g. for fbdev) drivers need to explicitly call
- * drm_framebuffer_unregister_private.
- */
-void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
-{
-       struct drm_device *dev = fb->dev;
-
-       mutex_lock(&dev->mode_config.fb_lock);
-       list_del(&fb->head);
-       dev->mode_config.num_fb--;
-       mutex_unlock(&dev->mode_config.fb_lock);
-}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
-
-/**
- * drm_framebuffer_remove - remove and unreference a framebuffer object
- * @fb: framebuffer to remove
- *
- * Scans all the CRTCs and planes in @dev's mode_config.  If they're
- * using @fb, removes it, setting it to NULL. Then drops the reference to the
- * passed-in framebuffer. Might take the modeset locks.
- *
- * Note that this function optimizes the cleanup away if the caller holds the
- * last reference to the framebuffer. It is also guaranteed to not take the
- * modeset locks in this case.
- */
-void drm_framebuffer_remove(struct drm_framebuffer *fb)
-{
-       struct drm_device *dev;
-       struct drm_crtc *crtc;
-       struct drm_plane *plane;
-
-       if (!fb)
-               return;
-
-       dev = fb->dev;
-
-       WARN_ON(!list_empty(&fb->filp_head));
-
-       /*
-        * drm ABI mandates that we remove any deleted framebuffers from active
-        * useage. But since most sane clients only remove framebuffers they no
-        * longer need, try to optimize this away.
-        *
-        * Since we're holding a reference ourselves, observing a refcount of 1
-        * means that we're the last holder and can skip it. Also, the refcount
-        * can never increase from 1 again, so we don't need any barriers or
-        * locks.
-        *
-        * Note that userspace could try to race with use and instate a new
-        * usage _after_ we've cleared all current ones. End result will be an
-        * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
-        * in this manner.
-        */
-       if (drm_framebuffer_read_refcount(fb) > 1) {
-               drm_modeset_lock_all(dev);
-               /* remove from any CRTC */
-               drm_for_each_crtc(crtc, dev) {
-                       if (crtc->primary->fb == fb) {
-                               /* should turn off the crtc */
-                               if (drm_crtc_force_disable(crtc))
-                                       DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
-                       }
-               }
-
-               drm_for_each_plane(plane, dev) {
-                       if (plane->fb == fb)
-                               drm_plane_force_disable(plane);
-               }
-               drm_modeset_unlock_all(dev);
-       }
-
-       drm_framebuffer_unreference(fb);
-}
-EXPORT_SYMBOL(drm_framebuffer_remove);
-
 DEFINE_WW_CLASS(crtc_ww_class);
 
 static unsigned int drm_num_crtcs(struct drm_device *dev)
@@ -683,7 +151,11 @@ static void drm_crtc_unregister_all(struct drm_device *dev)
  * @funcs: callbacks for the new CRTC
  * @name: printf style format string for the CRTC name, or NULL for default name
  *
- * Inits a new object created as base part of a driver crtc object.
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
  *
  * Returns:
  * Zero on success, error code on failure.
@@ -783,4712 +255,650 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
-/*
- * drm_mode_remove - remove and free a mode
- * @connector: connector list to modify
- * @mode: mode to remove
- *
- * Remove @mode from @connector's mode list, then free it.
- */
-static void drm_mode_remove(struct drm_connector *connector,
-                           struct drm_display_mode *mode)
+int drm_modeset_register_all(struct drm_device *dev)
 {
-       list_del(&mode->head);
-       drm_mode_destroy(connector->dev, mode);
-}
+       int ret;
 
-/**
- * drm_display_info_set_bus_formats - set the supported bus formats
- * @info: display info to store bus formats in
- * @formats: array containing the supported bus formats
- * @num_formats: the number of entries in the fmts array
- *
- * Store the supported bus formats in display info structure.
- * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
- * a full list of available formats.
- */
-int drm_display_info_set_bus_formats(struct drm_display_info *info,
-                                    const u32 *formats,
-                                    unsigned int num_formats)
-{
-       u32 *fmts = NULL;
+       ret = drm_plane_register_all(dev);
+       if (ret)
+               goto err_plane;
 
-       if (!formats && num_formats)
-               return -EINVAL;
+       ret = drm_crtc_register_all(dev);
+       if  (ret)
+               goto err_crtc;
 
-       if (formats && num_formats) {
-               fmts = kmemdup(formats, sizeof(*formats) * num_formats,
-                              GFP_KERNEL);
-               if (!fmts)
-                       return -ENOMEM;
-       }
+       ret = drm_encoder_register_all(dev);
+       if (ret)
+               goto err_encoder;
 
-       kfree(info->bus_formats);
-       info->bus_formats = fmts;
-       info->num_bus_formats = num_formats;
+       ret = drm_connector_register_all(dev);
+       if (ret)
+               goto err_connector;
 
        return 0;
+
+err_connector:
+       drm_encoder_unregister_all(dev);
+err_encoder:
+       drm_crtc_unregister_all(dev);
+err_crtc:
+       drm_plane_unregister_all(dev);
+err_plane:
+       return ret;
 }
-EXPORT_SYMBOL(drm_display_info_set_bus_formats);
 
-/**
- * drm_connector_get_cmdline_mode - reads the user's cmdline mode
- * @connector: connector to quwery
- *
- * The kernel supports per-connector configration of its consoles through
- * use of the video= parameter. This function parses that option and
- * extracts the user's specified mode (or enable/disable status) for a
- * particular connector. This is typically only used during the early fbdev
- * setup.
- */
-static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
+void drm_modeset_unregister_all(struct drm_device *dev)
 {
-       struct drm_cmdline_mode *mode = &connector->cmdline_mode;
-       char *option = NULL;
-
-       if (fb_get_options(connector->name, &option))
-               return;
-
-       if (!drm_mode_parse_command_line_for_connector(option,
-                                                      connector,
-                                                      mode))
-               return;
-
-       if (mode->force) {
-               const char *s;
-
-               switch (mode->force) {
-               case DRM_FORCE_OFF:
-                       s = "OFF";
-                       break;
-               case DRM_FORCE_ON_DIGITAL:
-                       s = "ON - dig";
-                       break;
-               default:
-               case DRM_FORCE_ON:
-                       s = "ON";
-                       break;
-               }
-
-               DRM_INFO("forcing %s connector %s\n", connector->name, s);
-               connector->force = mode->force;
-       }
-
-       DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
-                     connector->name,
-                     mode->xres, mode->yres,
-                     mode->refresh_specified ? mode->refresh : 60,
-                     mode->rb ? " reduced blanking" : "",
-                     mode->margins ? " with margins" : "",
-                     mode->interlace ?  " interlaced" : "");
+       drm_connector_unregister_all(dev);
+       drm_encoder_unregister_all(dev);
+       drm_crtc_unregister_all(dev);
+       drm_plane_unregister_all(dev);
 }
 
-static void drm_connector_free(struct kref *kref)
+static int drm_mode_create_standard_properties(struct drm_device *dev)
 {
-       struct drm_connector *connector =
-               container_of(kref, struct drm_connector, base.refcount);
-       struct drm_device *dev = connector->dev;
-
-       drm_mode_object_unregister(dev, &connector->base);
-       connector->funcs->destroy(connector);
-}
+       struct drm_property *prop;
+       int ret;
 
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @connector_type: user visible type of the connector
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_init(struct drm_device *dev,
-                      struct drm_connector *connector,
-                      const struct drm_connector_funcs *funcs,
-                      int connector_type)
-{
-       struct drm_mode_config *config = &dev->mode_config;
-       int ret;
-       struct ida *connector_ida =
-               &drm_connector_enum_list[connector_type].ida;
-
-       drm_modeset_lock_all(dev);
-
-       ret = drm_mode_object_get_reg(dev, &connector->base,
-                                     DRM_MODE_OBJECT_CONNECTOR,
-                                     false, drm_connector_free);
-       if (ret)
-               goto out_unlock;
-
-       connector->base.properties = &connector->properties;
-       connector->dev = dev;
-       connector->funcs = funcs;
-
-       ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
-       if (ret < 0)
-               goto out_put;
-       connector->index = ret;
-       ret = 0;
-
-       connector->connector_type = connector_type;
-       connector->connector_type_id =
-               ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
-       if (connector->connector_type_id < 0) {
-               ret = connector->connector_type_id;
-               goto out_put_id;
-       }
-       connector->name =
-               kasprintf(GFP_KERNEL, "%s-%d",
-                         drm_connector_enum_list[connector_type].name,
-                         connector->connector_type_id);
-       if (!connector->name) {
-               ret = -ENOMEM;
-               goto out_put_type_id;
-       }
-
-       INIT_LIST_HEAD(&connector->probed_modes);
-       INIT_LIST_HEAD(&connector->modes);
-       connector->edid_blob_ptr = NULL;
-       connector->status = connector_status_unknown;
-
-       drm_connector_get_cmdline_mode(connector);
-
-       /* We should add connectors at the end to avoid upsetting the connector
-        * index too much. */
-       list_add_tail(&connector->head, &config->connector_list);
-       config->num_connector++;
-
-       if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
-               drm_object_attach_property(&connector->base,
-                                             config->edid_property,
-                                             0);
-
-       drm_object_attach_property(&connector->base,
-                                     config->dpms_property, 0);
-
-       if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
-               drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
-       }
-
-       connector->debugfs_entry = NULL;
-out_put_type_id:
-       if (ret)
-               ida_remove(connector_ida, connector->connector_type_id);
-out_put_id:
-       if (ret)
-               ida_remove(&config->connector_ida, connector->index);
-out_put:
-       if (ret)
-               drm_mode_object_unregister(dev, &connector->base);
-
-out_unlock:
-       drm_modeset_unlock_all(dev);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_connector_init);
-
-/**
- * drm_connector_cleanup - cleans up an initialised connector
- * @connector: connector to cleanup
- *
- * Cleans up the connector but doesn't free the object.
- */
-void drm_connector_cleanup(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_display_mode *mode, *t;
-
-       /* The connector should have been removed from userspace long before
-        * it is finally destroyed.
-        */
-       if (WARN_ON(connector->registered))
-               drm_connector_unregister(connector);
-
-       if (connector->tile_group) {
-               drm_mode_put_tile_group(dev, connector->tile_group);
-               connector->tile_group = NULL;
-       }
-
-       list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
-               drm_mode_remove(connector, mode);
-
-       list_for_each_entry_safe(mode, t, &connector->modes, head)
-               drm_mode_remove(connector, mode);
-
-       ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
-                  connector->connector_type_id);
-
-       ida_remove(&dev->mode_config.connector_ida,
-                  connector->index);
-
-       kfree(connector->display_info.bus_formats);
-       drm_mode_object_unregister(dev, &connector->base);
-       kfree(connector->name);
-       connector->name = NULL;
-       list_del(&connector->head);
-       dev->mode_config.num_connector--;
-
-       WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
-       if (connector->state && connector->funcs->atomic_destroy_state)
-               connector->funcs->atomic_destroy_state(connector,
-                                                      connector->state);
-
-       memset(connector, 0, sizeof(*connector));
-}
-EXPORT_SYMBOL(drm_connector_cleanup);
-
-/**
- * drm_connector_register - register a connector
- * @connector: the connector to register
- *
- * Register userspace interfaces for a connector
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_register(struct drm_connector *connector)
-{
-       int ret;
-
-       if (connector->registered)
-               return 0;
-
-       ret = drm_sysfs_connector_add(connector);
+       ret = drm_connector_create_standard_properties(dev);
        if (ret)
                return ret;
 
-       ret = drm_debugfs_connector_add(connector);
-       if (ret) {
-               goto err_sysfs;
-       }
-
-       if (connector->funcs->late_register) {
-               ret = connector->funcs->late_register(connector);
-               if (ret)
-                       goto err_debugfs;
-       }
+       prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+                                       "type", drm_plane_type_enum_list,
+                                       ARRAY_SIZE(drm_plane_type_enum_list));
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.plane_type_property = prop;
 
-       drm_mode_object_register(connector->dev, &connector->base);
+       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+                       "SRC_X", 0, UINT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_src_x = prop;
 
-       connector->registered = true;
-       return 0;
+       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+                       "SRC_Y", 0, UINT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_src_y = prop;
 
-err_debugfs:
-       drm_debugfs_connector_remove(connector);
-err_sysfs:
-       drm_sysfs_connector_remove(connector);
-       return ret;
-}
-EXPORT_SYMBOL(drm_connector_register);
+       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+                       "SRC_W", 0, UINT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_src_w = prop;
 
-/**
- * drm_connector_unregister - unregister a connector
- * @connector: the connector to unregister
- *
- * Unregister userspace interfaces for a connector
- */
-void drm_connector_unregister(struct drm_connector *connector)
-{
-       if (!connector->registered)
-               return;
+       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+                       "SRC_H", 0, UINT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_src_h = prop;
 
-       if (connector->funcs->early_unregister)
-               connector->funcs->early_unregister(connector);
+       prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+                       "CRTC_X", INT_MIN, INT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_crtc_x = prop;
 
-       drm_sysfs_connector_remove(connector);
-       drm_debugfs_connector_remove(connector);
+       prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+                       "CRTC_Y", INT_MIN, INT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_crtc_y = prop;
 
-       connector->registered = false;
-}
-EXPORT_SYMBOL(drm_connector_unregister);
+       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+                       "CRTC_W", 0, INT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_crtc_w = prop;
 
-static void drm_connector_unregister_all(struct drm_device *dev)
-{
-       struct drm_connector *connector;
+       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+                       "CRTC_H", 0, INT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_crtc_h = prop;
 
-       /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-               drm_connector_unregister(connector);
-}
+       prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+                       "FB_ID", DRM_MODE_OBJECT_FB);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_fb_id = prop;
 
-static int drm_connector_register_all(struct drm_device *dev)
-{
-       struct drm_connector *connector;
-       int ret;
+       prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+                       "CRTC_ID", DRM_MODE_OBJECT_CRTC);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_crtc_id = prop;
 
-       mutex_lock(&dev->mode_config.mutex);
+       prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
+                       "ACTIVE");
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_active = prop;
 
-       drm_for_each_connector(connector, dev) {
-               ret = drm_connector_register(connector);
-               if (ret)
-                       goto err;
-       }
+       prop = drm_property_create(dev,
+                       DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+                       "MODE_ID", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.prop_mode_id = prop;
 
-       mutex_unlock(&dev->mode_config.mutex);
+       prop = drm_property_create(dev,
+                       DRM_MODE_PROP_BLOB,
+                       "DEGAMMA_LUT", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.degamma_lut_property = prop;
 
-       return 0;
+       prop = drm_property_create_range(dev,
+                       DRM_MODE_PROP_IMMUTABLE,
+                       "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.degamma_lut_size_property = prop;
 
-err:
-       mutex_unlock(&dev->mode_config.mutex);
-       drm_connector_unregister_all(dev);
-       return ret;
-}
+       prop = drm_property_create(dev,
+                       DRM_MODE_PROP_BLOB,
+                       "CTM", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.ctm_property = prop;
 
-static int drm_encoder_register_all(struct drm_device *dev)
-{
-       struct drm_encoder *encoder;
-       int ret = 0;
+       prop = drm_property_create(dev,
+                       DRM_MODE_PROP_BLOB,
+                       "GAMMA_LUT", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.gamma_lut_property = prop;
 
-       drm_for_each_encoder(encoder, dev) {
-               if (encoder->funcs->late_register)
-                       ret = encoder->funcs->late_register(encoder);
-               if (ret)
-                       return ret;
-       }
+       prop = drm_property_create_range(dev,
+                       DRM_MODE_PROP_IMMUTABLE,
+                       "GAMMA_LUT_SIZE", 0, UINT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.gamma_lut_size_property = prop;
 
        return 0;
 }
 
-static void drm_encoder_unregister_all(struct drm_device *dev)
-{
-       struct drm_encoder *encoder;
-
-       drm_for_each_encoder(encoder, dev) {
-               if (encoder->funcs->early_unregister)
-                       encoder->funcs->early_unregister(encoder);
-       }
-}
-
-/**
- * drm_encoder_init - Init a preallocated encoder
- * @dev: drm device
- * @encoder: the encoder to init
- * @funcs: callbacks for this encoder
- * @encoder_type: user visible type of the encoder
- * @name: printf style format string for the encoder name, or NULL for default name
- *
- * Initialises a preallocated encoder. Encoder should be
- * subclassed as part of driver encoder objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_encoder_init(struct drm_device *dev,
-                     struct drm_encoder *encoder,
-                     const struct drm_encoder_funcs *funcs,
-                     int encoder_type, const char *name, ...)
-{
-       int ret;
-
-       drm_modeset_lock_all(dev);
-
-       ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
-       if (ret)
-               goto out_unlock;
-
-       encoder->dev = dev;
-       encoder->encoder_type = encoder_type;
-       encoder->funcs = funcs;
-       if (name) {
-               va_list ap;
-
-               va_start(ap, name);
-               encoder->name = kvasprintf(GFP_KERNEL, name, ap);
-               va_end(ap);
-       } else {
-               encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
-                                         drm_encoder_enum_list[encoder_type].name,
-                                         encoder->base.id);
-       }
-       if (!encoder->name) {
-               ret = -ENOMEM;
-               goto out_put;
-       }
-
-       list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
-       encoder->index = dev->mode_config.num_encoder++;
-
-out_put:
-       if (ret)
-               drm_mode_object_unregister(dev, &encoder->base);
-
-out_unlock:
-       drm_modeset_unlock_all(dev);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_encoder_init);
-
-/**
- * drm_encoder_cleanup - cleans up an initialised encoder
- * @encoder: encoder to cleanup
- *
- * Cleans up the encoder but doesn't free the object.
- */
-void drm_encoder_cleanup(struct drm_encoder *encoder)
-{
-       struct drm_device *dev = encoder->dev;
-
-       /* Note that the encoder_list is considered to be static; should we
-        * remove the drm_encoder at runtime we would have to decrement all
-        * the indices on the drm_encoder after us in the encoder_list.
-        */
-
-       drm_modeset_lock_all(dev);
-       drm_mode_object_unregister(dev, &encoder->base);
-       kfree(encoder->name);
-       list_del(&encoder->head);
-       dev->mode_config.num_encoder--;
-       drm_modeset_unlock_all(dev);
-
-       memset(encoder, 0, sizeof(*encoder));
-}
-EXPORT_SYMBOL(drm_encoder_cleanup);
-
-static unsigned int drm_num_planes(struct drm_device *dev)
-{
-       unsigned int num = 0;
-       struct drm_plane *tmp;
-
-       drm_for_each_plane(tmp, dev) {
-               num++;
-       }
-
-       return num;
-}
-
-/**
- * drm_universal_plane_init - Initialize a new universal plane object
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (%DRM_FORMAT_*)
- * @format_count: number of elements in @formats
- * @type: type of plane (overlay, primary, cursor)
- * @name: printf style format string for the plane name, or NULL for default name
- *
- * Initializes a plane object of type @type.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
-                            unsigned long possible_crtcs,
-                            const struct drm_plane_funcs *funcs,
-                            const uint32_t *formats, unsigned int format_count,
-                            enum drm_plane_type type,
-                            const char *name, ...)
-{
-       struct drm_mode_config *config = &dev->mode_config;
-       int ret;
-
-       ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
-       if (ret)
-               return ret;
-
-       drm_modeset_lock_init(&plane->mutex);
-
-       plane->base.properties = &plane->properties;
-       plane->dev = dev;
-       plane->funcs = funcs;
-       plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
-                                           GFP_KERNEL);
-       if (!plane->format_types) {
-               DRM_DEBUG_KMS("out of memory when allocating plane\n");
-               drm_mode_object_unregister(dev, &plane->base);
-               return -ENOMEM;
-       }
-
-       if (name) {
-               va_list ap;
-
-               va_start(ap, name);
-               plane->name = kvasprintf(GFP_KERNEL, name, ap);
-               va_end(ap);
-       } else {
-               plane->name = kasprintf(GFP_KERNEL, "plane-%d",
-                                       drm_num_planes(dev));
-       }
-       if (!plane->name) {
-               kfree(plane->format_types);
-               drm_mode_object_unregister(dev, &plane->base);
-               return -ENOMEM;
-       }
-
-       memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
-       plane->format_count = format_count;
-       plane->possible_crtcs = possible_crtcs;
-       plane->type = type;
-
-       list_add_tail(&plane->head, &config->plane_list);
-       plane->index = config->num_total_plane++;
-       if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-               config->num_overlay_plane++;
-
-       drm_object_attach_property(&plane->base,
-                                  config->plane_type_property,
-                                  plane->type);
-
-       if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
-               drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
-               drm_object_attach_property(&plane->base, config->prop_src_x, 0);
-               drm_object_attach_property(&plane->base, config->prop_src_y, 0);
-               drm_object_attach_property(&plane->base, config->prop_src_w, 0);
-               drm_object_attach_property(&plane->base, config->prop_src_h, 0);
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_universal_plane_init);
-
-static int drm_plane_register_all(struct drm_device *dev)
-{
-       struct drm_plane *plane;
-       int ret = 0;
-
-       drm_for_each_plane(plane, dev) {
-               if (plane->funcs->late_register)
-                       ret = plane->funcs->late_register(plane);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static void drm_plane_unregister_all(struct drm_device *dev)
-{
-       struct drm_plane *plane;
-
-       drm_for_each_plane(plane, dev) {
-               if (plane->funcs->early_unregister)
-                       plane->funcs->early_unregister(plane);
-       }
-}
-
-/**
- * drm_plane_init - Initialize a legacy plane
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (%DRM_FORMAT_*)
- * @format_count: number of elements in @formats
- * @is_primary: plane type (primary vs overlay)
- *
- * Legacy API to initialize a DRM plane.
- *
- * New drivers should call drm_universal_plane_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
-                  unsigned long possible_crtcs,
-                  const struct drm_plane_funcs *funcs,
-                  const uint32_t *formats, unsigned int format_count,
-                  bool is_primary)
-{
-       enum drm_plane_type type;
-
-       type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
-       return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
-                                       formats, format_count, type, NULL);
-}
-EXPORT_SYMBOL(drm_plane_init);
-
-/**
- * drm_plane_cleanup - Clean up the core plane usage
- * @plane: plane to cleanup
- *
- * This function cleans up @plane and removes it from the DRM mode setting
- * core. Note that the function does *not* free the plane structure itself,
- * this is the responsibility of the caller.
- */
-void drm_plane_cleanup(struct drm_plane *plane)
-{
-       struct drm_device *dev = plane->dev;
-
-       drm_modeset_lock_all(dev);
-       kfree(plane->format_types);
-       drm_mode_object_unregister(dev, &plane->base);
-
-       BUG_ON(list_empty(&plane->head));
-
-       /* Note that the plane_list is considered to be static; should we
-        * remove the drm_plane at runtime we would have to decrement all
-        * the indices on the drm_plane after us in the plane_list.
-        */
-
-       list_del(&plane->head);
-       dev->mode_config.num_total_plane--;
-       if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-               dev->mode_config.num_overlay_plane--;
-       drm_modeset_unlock_all(dev);
-
-       WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
-       if (plane->state && plane->funcs->atomic_destroy_state)
-               plane->funcs->atomic_destroy_state(plane, plane->state);
-
-       kfree(plane->name);
-
-       memset(plane, 0, sizeof(*plane));
-}
-EXPORT_SYMBOL(drm_plane_cleanup);
-
-/**
- * drm_plane_from_index - find the registered plane at an index
- * @dev: DRM device
- * @idx: index of registered plane to find for
- *
- * Given a plane index, return the registered plane from DRM device's
- * list of planes with matching index.
- */
-struct drm_plane *
-drm_plane_from_index(struct drm_device *dev, int idx)
-{
-       struct drm_plane *plane;
-
-       drm_for_each_plane(plane, dev)
-               if (idx == plane->index)
-                       return plane;
-
-       return NULL;
-}
-EXPORT_SYMBOL(drm_plane_from_index);
-
-/**
- * drm_plane_force_disable - Forcibly disable a plane
- * @plane: plane to disable
- *
- * Forces the plane to be disabled.
- *
- * Used when the plane's current framebuffer is destroyed,
- * and when restoring fbdev mode.
- */
-void drm_plane_force_disable(struct drm_plane *plane)
-{
-       int ret;
-
-       if (!plane->fb)
-               return;
-
-       plane->old_fb = plane->fb;
-       ret = plane->funcs->disable_plane(plane);
-       if (ret) {
-               DRM_ERROR("failed to disable plane with busy fb\n");
-               plane->old_fb = NULL;
-               return;
-       }
-       /* disconnect the plane from the fb and crtc: */
-       drm_framebuffer_unreference(plane->old_fb);
-       plane->old_fb = NULL;
-       plane->fb = NULL;
-       plane->crtc = NULL;
-}
-EXPORT_SYMBOL(drm_plane_force_disable);
-
-int drm_modeset_register_all(struct drm_device *dev)
-{
-       int ret;
-
-       ret = drm_plane_register_all(dev);
-       if (ret)
-               goto err_plane;
-
-       ret = drm_crtc_register_all(dev);
-       if  (ret)
-               goto err_crtc;
-
-       ret = drm_encoder_register_all(dev);
-       if (ret)
-               goto err_encoder;
-
-       ret = drm_connector_register_all(dev);
-       if (ret)
-               goto err_connector;
-
-       return 0;
-
-err_connector:
-       drm_encoder_unregister_all(dev);
-err_encoder:
-       drm_crtc_unregister_all(dev);
-err_crtc:
-       drm_plane_unregister_all(dev);
-err_plane:
-       return ret;
-}
-
-void drm_modeset_unregister_all(struct drm_device *dev)
-{
-       drm_connector_unregister_all(dev);
-       drm_encoder_unregister_all(dev);
-       drm_crtc_unregister_all(dev);
-       drm_plane_unregister_all(dev);
-}
-
-static int drm_mode_create_standard_properties(struct drm_device *dev)
-{
-       struct drm_property *prop;
-
-       /*
-        * Standard properties (apply to all connectors)
-        */
-       prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
-                                  DRM_MODE_PROP_IMMUTABLE,
-                                  "EDID", 0);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.edid_property = prop;
-
-       prop = drm_property_create_enum(dev, 0,
-                                  "DPMS", drm_dpms_enum_list,
-                                  ARRAY_SIZE(drm_dpms_enum_list));
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.dpms_property = prop;
-
-       prop = drm_property_create(dev,
-                                  DRM_MODE_PROP_BLOB |
-                                  DRM_MODE_PROP_IMMUTABLE,
-                                  "PATH", 0);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.path_property = prop;
-
-       prop = drm_property_create(dev,
-                                  DRM_MODE_PROP_BLOB |
-                                  DRM_MODE_PROP_IMMUTABLE,
-                                  "TILE", 0);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.tile_property = prop;
-
-       prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-                                       "type", drm_plane_type_enum_list,
-                                       ARRAY_SIZE(drm_plane_type_enum_list));
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.plane_type_property = prop;
-
-       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
-                       "SRC_X", 0, UINT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_src_x = prop;
-
-       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
-                       "SRC_Y", 0, UINT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_src_y = prop;
-
-       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
-                       "SRC_W", 0, UINT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_src_w = prop;
-
-       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
-                       "SRC_H", 0, UINT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_src_h = prop;
-
-       prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
-                       "CRTC_X", INT_MIN, INT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_crtc_x = prop;
-
-       prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
-                       "CRTC_Y", INT_MIN, INT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_crtc_y = prop;
-
-       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
-                       "CRTC_W", 0, INT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_crtc_w = prop;
-
-       prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
-                       "CRTC_H", 0, INT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_crtc_h = prop;
-
-       prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
-                       "FB_ID", DRM_MODE_OBJECT_FB);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_fb_id = prop;
-
-       prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
-                       "CRTC_ID", DRM_MODE_OBJECT_CRTC);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_crtc_id = prop;
-
-       prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
-                       "ACTIVE");
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_active = prop;
-
-       prop = drm_property_create(dev,
-                       DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
-                       "MODE_ID", 0);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.prop_mode_id = prop;
-
-       prop = drm_property_create(dev,
-                       DRM_MODE_PROP_BLOB,
-                       "DEGAMMA_LUT", 0);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.degamma_lut_property = prop;
-
-       prop = drm_property_create_range(dev,
-                       DRM_MODE_PROP_IMMUTABLE,
-                       "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.degamma_lut_size_property = prop;
-
-       prop = drm_property_create(dev,
-                       DRM_MODE_PROP_BLOB,
-                       "CTM", 0);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.ctm_property = prop;
-
-       prop = drm_property_create(dev,
-                       DRM_MODE_PROP_BLOB,
-                       "GAMMA_LUT", 0);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.gamma_lut_property = prop;
-
-       prop = drm_property_create_range(dev,
-                       DRM_MODE_PROP_IMMUTABLE,
-                       "GAMMA_LUT_SIZE", 0, UINT_MAX);
-       if (!prop)
-               return -ENOMEM;
-       dev->mode_config.gamma_lut_size_property = prop;
-
-       return 0;
-}
-
-/**
- * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
- * @dev: DRM device
- *
- * Called by a driver the first time a DVI-I connector is made.
- */
-int drm_mode_create_dvi_i_properties(struct drm_device *dev)
-{
-       struct drm_property *dvi_i_selector;
-       struct drm_property *dvi_i_subconnector;
-
-       if (dev->mode_config.dvi_i_select_subconnector_property)
-               return 0;
-
-       dvi_i_selector =
-               drm_property_create_enum(dev, 0,
-                                   "select subconnector",
-                                   drm_dvi_i_select_enum_list,
-                                   ARRAY_SIZE(drm_dvi_i_select_enum_list));
-       dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
-
-       dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-                                   "subconnector",
-                                   drm_dvi_i_subconnector_enum_list,
-                                   ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
-       dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
-
-/**
- * drm_create_tv_properties - create TV specific connector properties
- * @dev: DRM device
- * @num_modes: number of different TV formats (modes) supported
- * @modes: array of pointers to strings containing name of each format
- *
- * Called by a driver's TV initialization routine, this function creates
- * the TV specific connector properties for a given device.  Caller is
- * responsible for allocating a list of format names and passing them to
- * this routine.
- */
-int drm_mode_create_tv_properties(struct drm_device *dev,
-                                 unsigned int num_modes,
-                                 const char * const modes[])
-{
-       struct drm_property *tv_selector;
-       struct drm_property *tv_subconnector;
-       unsigned int i;
-
-       if (dev->mode_config.tv_select_subconnector_property)
-               return 0;
-
-       /*
-        * Basic connector properties
-        */
-       tv_selector = drm_property_create_enum(dev, 0,
-                                         "select subconnector",
-                                         drm_tv_select_enum_list,
-                                         ARRAY_SIZE(drm_tv_select_enum_list));
-       if (!tv_selector)
-               goto nomem;
-
-       dev->mode_config.tv_select_subconnector_property = tv_selector;
-
-       tv_subconnector =
-               drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-                                   "subconnector",
-                                   drm_tv_subconnector_enum_list,
-                                   ARRAY_SIZE(drm_tv_subconnector_enum_list));
-       if (!tv_subconnector)
-               goto nomem;
-       dev->mode_config.tv_subconnector_property = tv_subconnector;
-
-       /*
-        * Other, TV specific properties: margins & TV modes.
-        */
-       dev->mode_config.tv_left_margin_property =
-               drm_property_create_range(dev, 0, "left margin", 0, 100);
-       if (!dev->mode_config.tv_left_margin_property)
-               goto nomem;
-
-       dev->mode_config.tv_right_margin_property =
-               drm_property_create_range(dev, 0, "right margin", 0, 100);
-       if (!dev->mode_config.tv_right_margin_property)
-               goto nomem;
-
-       dev->mode_config.tv_top_margin_property =
-               drm_property_create_range(dev, 0, "top margin", 0, 100);
-       if (!dev->mode_config.tv_top_margin_property)
-               goto nomem;
-
-       dev->mode_config.tv_bottom_margin_property =
-               drm_property_create_range(dev, 0, "bottom margin", 0, 100);
-       if (!dev->mode_config.tv_bottom_margin_property)
-               goto nomem;
-
-       dev->mode_config.tv_mode_property =
-               drm_property_create(dev, DRM_MODE_PROP_ENUM,
-                                   "mode", num_modes);
-       if (!dev->mode_config.tv_mode_property)
-               goto nomem;
-
-       for (i = 0; i < num_modes; i++)
-               drm_property_add_enum(dev->mode_config.tv_mode_property, i,
-                                     i, modes[i]);
-
-       dev->mode_config.tv_brightness_property =
-               drm_property_create_range(dev, 0, "brightness", 0, 100);
-       if (!dev->mode_config.tv_brightness_property)
-               goto nomem;
-
-       dev->mode_config.tv_contrast_property =
-               drm_property_create_range(dev, 0, "contrast", 0, 100);
-       if (!dev->mode_config.tv_contrast_property)
-               goto nomem;
-
-       dev->mode_config.tv_flicker_reduction_property =
-               drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
-       if (!dev->mode_config.tv_flicker_reduction_property)
-               goto nomem;
-
-       dev->mode_config.tv_overscan_property =
-               drm_property_create_range(dev, 0, "overscan", 0, 100);
-       if (!dev->mode_config.tv_overscan_property)
-               goto nomem;
-
-       dev->mode_config.tv_saturation_property =
-               drm_property_create_range(dev, 0, "saturation", 0, 100);
-       if (!dev->mode_config.tv_saturation_property)
-               goto nomem;
-
-       dev->mode_config.tv_hue_property =
-               drm_property_create_range(dev, 0, "hue", 0, 100);
-       if (!dev->mode_config.tv_hue_property)
-               goto nomem;
-
-       return 0;
-nomem:
-       return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_create_tv_properties);
-
-/**
- * drm_mode_create_scaling_mode_property - create scaling mode property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_scaling_mode_property(struct drm_device *dev)
-{
-       struct drm_property *scaling_mode;
-
-       if (dev->mode_config.scaling_mode_property)
-               return 0;
-
-       scaling_mode =
-               drm_property_create_enum(dev, 0, "scaling mode",
-                               drm_scaling_mode_enum_list,
-                                   ARRAY_SIZE(drm_scaling_mode_enum_list));
-
-       dev->mode_config.scaling_mode_property = scaling_mode;
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
-
-/**
- * drm_mode_create_aspect_ratio_property - create aspect ratio property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
-{
-       if (dev->mode_config.aspect_ratio_property)
-               return 0;
-
-       dev->mode_config.aspect_ratio_property =
-               drm_property_create_enum(dev, 0, "aspect ratio",
-                               drm_aspect_ratio_enum_list,
-                               ARRAY_SIZE(drm_aspect_ratio_enum_list));
-
-       if (dev->mode_config.aspect_ratio_property == NULL)
-               return -ENOMEM;
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
-
-/**
- * drm_mode_create_dirty_property - create dirty property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dirty_info_property(struct drm_device *dev)
-{
-       struct drm_property *dirty_info;
-
-       if (dev->mode_config.dirty_info_property)
-               return 0;
-
-       dirty_info =
-               drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-                                   "dirty",
-                                   drm_dirty_info_enum_list,
-                                   ARRAY_SIZE(drm_dirty_info_enum_list));
-       dev->mode_config.dirty_info_property = dirty_info;
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-
-/**
- * drm_mode_create_suggested_offset_properties - create suggests offset properties
- * @dev: DRM device
- *
- * Create the the suggested x/y offset property for connectors.
- */
-int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
-{
-       if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
-               return 0;
-
-       dev->mode_config.suggested_x_property =
-               drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
-
-       dev->mode_config.suggested_y_property =
-               drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
-
-       if (dev->mode_config.suggested_x_property == NULL ||
-           dev->mode_config.suggested_y_property == NULL)
-               return -ENOMEM;
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
-
-/**
- * drm_mode_getresources - get graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a set of configuration description structures and return
- * them to the user, including CRTC, connector and framebuffer configuration.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getresources(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv)
-{
-       struct drm_mode_card_res *card_res = data;
-       struct list_head *lh;
-       struct drm_framebuffer *fb;
-       struct drm_connector *connector;
-       struct drm_crtc *crtc;
-       struct drm_encoder *encoder;
-       int ret = 0;
-       int connector_count = 0;
-       int crtc_count = 0;
-       int fb_count = 0;
-       int encoder_count = 0;
-       int copied = 0;
-       uint32_t __user *fb_id;
-       uint32_t __user *crtc_id;
-       uint32_t __user *connector_id;
-       uint32_t __user *encoder_id;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-
-       mutex_lock(&file_priv->fbs_lock);
-       /*
-        * For the non-control nodes we need to limit the list of resources
-        * by IDs in the group list for this node
-        */
-       list_for_each(lh, &file_priv->fbs)
-               fb_count++;
-
-       /* handle this in 4 parts */
-       /* FBs */
-       if (card_res->count_fbs >= fb_count) {
-               copied = 0;
-               fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
-               list_for_each_entry(fb, &file_priv->fbs, filp_head) {
-                       if (put_user(fb->base.id, fb_id + copied)) {
-                               mutex_unlock(&file_priv->fbs_lock);
-                               return -EFAULT;
-                       }
-                       copied++;
-               }
-       }
-       card_res->count_fbs = fb_count;
-       mutex_unlock(&file_priv->fbs_lock);
-
-       /* mode_config.mutex protects the connector list against e.g. DP MST
-        * connector hot-adding. CRTC/Plane lists are invariant. */
-       mutex_lock(&dev->mode_config.mutex);
-       drm_for_each_crtc(crtc, dev)
-               crtc_count++;
-
-       drm_for_each_connector(connector, dev)
-               connector_count++;
-
-       drm_for_each_encoder(encoder, dev)
-               encoder_count++;
-
-       card_res->max_height = dev->mode_config.max_height;
-       card_res->min_height = dev->mode_config.min_height;
-       card_res->max_width = dev->mode_config.max_width;
-       card_res->min_width = dev->mode_config.min_width;
-
-       /* CRTCs */
-       if (card_res->count_crtcs >= crtc_count) {
-               copied = 0;
-               crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
-               drm_for_each_crtc(crtc, dev) {
-                       if (put_user(crtc->base.id, crtc_id + copied)) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
-                       copied++;
-               }
-       }
-       card_res->count_crtcs = crtc_count;
-
-       /* Encoders */
-       if (card_res->count_encoders >= encoder_count) {
-               copied = 0;
-               encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
-               drm_for_each_encoder(encoder, dev) {
-                       if (put_user(encoder->base.id, encoder_id +
-                                    copied)) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
-                       copied++;
-               }
-       }
-       card_res->count_encoders = encoder_count;
-
-       /* Connectors */
-       if (card_res->count_connectors >= connector_count) {
-               copied = 0;
-               connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
-               drm_for_each_connector(connector, dev) {
-                       if (put_user(connector->base.id,
-                                    connector_id + copied)) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
-                       copied++;
-               }
-       }
-       card_res->count_connectors = connector_count;
-
-out:
-       mutex_unlock(&dev->mode_config.mutex);
-       return ret;
-}
-
-/**
- * drm_mode_getcrtc - get CRTC configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a CRTC configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getcrtc(struct drm_device *dev,
-                    void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_crtc *crtc_resp = data;
-       struct drm_crtc *crtc;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
-       if (!crtc)
-               return -ENOENT;
-
-       drm_modeset_lock_crtc(crtc, crtc->primary);
-       crtc_resp->gamma_size = crtc->gamma_size;
-       if (crtc->primary->fb)
-               crtc_resp->fb_id = crtc->primary->fb->base.id;
-       else
-               crtc_resp->fb_id = 0;
-
-       if (crtc->state) {
-               crtc_resp->x = crtc->primary->state->src_x >> 16;
-               crtc_resp->y = crtc->primary->state->src_y >> 16;
-               if (crtc->state->enable) {
-                       drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
-                       crtc_resp->mode_valid = 1;
-
-               } else {
-                       crtc_resp->mode_valid = 0;
-               }
-       } else {
-               crtc_resp->x = crtc->x;
-               crtc_resp->y = crtc->y;
-               if (crtc->enabled) {
-                       drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode);
-                       crtc_resp->mode_valid = 1;
-
-               } else {
-                       crtc_resp->mode_valid = 0;
-               }
-       }
-       drm_modeset_unlock_crtc(crtc);
-
-       return 0;
-}
-
-static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
-                                        const struct drm_file *file_priv)
-{
-       /*
-        * If user-space hasn't configured the driver to expose the stereo 3D
-        * modes, don't expose them.
-        */
-       if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
-               return false;
-
-       return true;
-}
-
-static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
-{
-       /* For atomic drivers only state objects are synchronously updated and
-        * protected by modeset locks, so check those first. */
-       if (connector->state)
-               return connector->state->best_encoder;
-       return connector->encoder;
-}
-
-/* helper for getconnector and getproperties ioctls */
-static int get_properties(struct drm_mode_object *obj, bool atomic,
-               uint32_t __user *prop_ptr, uint64_t __user *prop_values,
-               uint32_t *arg_count_props)
-{
-       int props_count;
-       int i, ret, copied;
-
-       props_count = obj->properties->count;
-       if (!atomic)
-               props_count -= obj->properties->atomic_count;
-
-       if ((*arg_count_props >= props_count) && props_count) {
-               for (i = 0, copied = 0; copied < props_count; i++) {
-                       struct drm_property *prop = obj->properties->properties[i];
-                       uint64_t val;
-
-                       if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
-                               continue;
-
-                       ret = drm_object_property_get_value(obj, prop, &val);
-                       if (ret)
-                               return ret;
-
-                       if (put_user(prop->base.id, prop_ptr + copied))
-                               return -EFAULT;
-
-                       if (put_user(val, prop_values + copied))
-                               return -EFAULT;
-
-                       copied++;
-               }
-       }
-       *arg_count_props = props_count;
-
-       return 0;
-}
-
-/**
- * drm_mode_getconnector - get connector configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a connector configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getconnector(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv)
-{
-       struct drm_mode_get_connector *out_resp = data;
-       struct drm_connector *connector;
-       struct drm_encoder *encoder;
-       struct drm_display_mode *mode;
-       int mode_count = 0;
-       int encoders_count = 0;
-       int ret = 0;
-       int copied = 0;
-       int i;
-       struct drm_mode_modeinfo u_mode;
-       struct drm_mode_modeinfo __user *mode_ptr;
-       uint32_t __user *encoder_ptr;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-
-       mutex_lock(&dev->mode_config.mutex);
-
-       connector = drm_connector_lookup(dev, out_resp->connector_id);
-       if (!connector) {
-               ret = -ENOENT;
-               goto out_unlock;
-       }
-
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
-               if (connector->encoder_ids[i] != 0)
-                       encoders_count++;
-
-       if (out_resp->count_modes == 0) {
-               connector->funcs->fill_modes(connector,
-                                            dev->mode_config.max_width,
-                                            dev->mode_config.max_height);
-       }
-
-       /* delayed so we get modes regardless of pre-fill_modes state */
-       list_for_each_entry(mode, &connector->modes, head)
-               if (drm_mode_expose_to_userspace(mode, file_priv))
-                       mode_count++;
-
-       out_resp->connector_id = connector->base.id;
-       out_resp->connector_type = connector->connector_type;
-       out_resp->connector_type_id = connector->connector_type_id;
-       out_resp->mm_width = connector->display_info.width_mm;
-       out_resp->mm_height = connector->display_info.height_mm;
-       out_resp->subpixel = connector->display_info.subpixel_order;
-       out_resp->connection = connector->status;
-
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-       encoder = drm_connector_get_encoder(connector);
-       if (encoder)
-               out_resp->encoder_id = encoder->base.id;
-       else
-               out_resp->encoder_id = 0;
-
-       /*
-        * This ioctl is called twice, once to determine how much space is
-        * needed, and the 2nd time to fill it.
-        */
-       if ((out_resp->count_modes >= mode_count) && mode_count) {
-               copied = 0;
-               mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
-               list_for_each_entry(mode, &connector->modes, head) {
-                       if (!drm_mode_expose_to_userspace(mode, file_priv))
-                               continue;
-
-                       drm_mode_convert_to_umode(&u_mode, mode);
-                       if (copy_to_user(mode_ptr + copied,
-                                        &u_mode, sizeof(u_mode))) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
-                       copied++;
-               }
-       }
-       out_resp->count_modes = mode_count;
-
-       ret = get_properties(&connector->base, file_priv->atomic,
-                       (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
-                       (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
-                       &out_resp->count_props);
-       if (ret)
-               goto out;
-
-       if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
-               copied = 0;
-               encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
-               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-                       if (connector->encoder_ids[i] != 0) {
-                               if (put_user(connector->encoder_ids[i],
-                                            encoder_ptr + copied)) {
-                                       ret = -EFAULT;
-                                       goto out;
-                               }
-                               copied++;
-                       }
-               }
-       }
-       out_resp->count_encoders = encoders_count;
-
-out:
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
-       drm_connector_unreference(connector);
-out_unlock:
-       mutex_unlock(&dev->mode_config.mutex);
-
-       return ret;
-}
-
-static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
-       struct drm_connector *connector;
-       struct drm_device *dev = encoder->dev;
-       bool uses_atomic = false;
-
-       /* For atomic drivers only state objects are synchronously updated and
-        * protected by modeset locks, so check those first. */
-       drm_for_each_connector(connector, dev) {
-               if (!connector->state)
-                       continue;
-
-               uses_atomic = true;
-
-               if (connector->state->best_encoder != encoder)
-                       continue;
-
-               return connector->state->crtc;
-       }
-
-       /* Don't return stale data (e.g. pending async disable). */
-       if (uses_atomic)
-               return NULL;
-
-       return encoder->crtc;
-}
-
-/**
- * drm_mode_getencoder - get encoder configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a encoder configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getencoder(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv)
-{
-       struct drm_mode_get_encoder *enc_resp = data;
-       struct drm_encoder *encoder;
-       struct drm_crtc *crtc;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       encoder = drm_encoder_find(dev, enc_resp->encoder_id);
-       if (!encoder)
-               return -ENOENT;
-
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-       crtc = drm_encoder_get_crtc(encoder);
-       if (crtc)
-               enc_resp->crtc_id = crtc->base.id;
-       else
-               enc_resp->crtc_id = 0;
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
-       enc_resp->encoder_type = encoder->encoder_type;
-       enc_resp->encoder_id = encoder->base.id;
-       enc_resp->possible_crtcs = encoder->possible_crtcs;
-       enc_resp->possible_clones = encoder->possible_clones;
-
-       return 0;
-}
-
-/**
- * drm_mode_getplane_res - enumerate all plane resources
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Construct a list of plane ids to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getplane_res(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv)
-{
-       struct drm_mode_get_plane_res *plane_resp = data;
-       struct drm_mode_config *config;
-       struct drm_plane *plane;
-       uint32_t __user *plane_ptr;
-       int copied = 0;
-       unsigned num_planes;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       config = &dev->mode_config;
-
-       if (file_priv->universal_planes)
-               num_planes = config->num_total_plane;
-       else
-               num_planes = config->num_overlay_plane;
-
-       /*
-        * This ioctl is called twice, once to determine how much space is
-        * needed, and the 2nd time to fill it.
-        */
-       if (num_planes &&
-           (plane_resp->count_planes >= num_planes)) {
-               plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
-
-               /* Plane lists are invariant, no locking needed. */
-               drm_for_each_plane(plane, dev) {
-                       /*
-                        * Unless userspace set the 'universal planes'
-                        * capability bit, only advertise overlays.
-                        */
-                       if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
-                           !file_priv->universal_planes)
-                               continue;
-
-                       if (put_user(plane->base.id, plane_ptr + copied))
-                               return -EFAULT;
-                       copied++;
-               }
-       }
-       plane_resp->count_planes = num_planes;
-
-       return 0;
-}
-
-/**
- * drm_mode_getplane - get plane configuration
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Construct a plane configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getplane(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
-{
-       struct drm_mode_get_plane *plane_resp = data;
-       struct drm_plane *plane;
-       uint32_t __user *format_ptr;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       plane = drm_plane_find(dev, plane_resp->plane_id);
-       if (!plane)
-               return -ENOENT;
-
-       drm_modeset_lock(&plane->mutex, NULL);
-       if (plane->crtc)
-               plane_resp->crtc_id = plane->crtc->base.id;
-       else
-               plane_resp->crtc_id = 0;
-
-       if (plane->fb)
-               plane_resp->fb_id = plane->fb->base.id;
-       else
-               plane_resp->fb_id = 0;
-       drm_modeset_unlock(&plane->mutex);
-
-       plane_resp->plane_id = plane->base.id;
-       plane_resp->possible_crtcs = plane->possible_crtcs;
-       plane_resp->gamma_size = 0;
-
-       /*
-        * This ioctl is called twice, once to determine how much space is
-        * needed, and the 2nd time to fill it.
-        */
-       if (plane->format_count &&
-           (plane_resp->count_format_types >= plane->format_count)) {
-               format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
-               if (copy_to_user(format_ptr,
-                                plane->format_types,
-                                sizeof(uint32_t) * plane->format_count)) {
-                       return -EFAULT;
-               }
-       }
-       plane_resp->count_format_types = plane->format_count;
-
-       return 0;
-}
-
-/**
- * drm_plane_check_pixel_format - Check if the plane supports the pixel format
- * @plane: plane to check for format support
- * @format: the pixel format
- *
- * Returns:
- * Zero of @plane has @format in its list of supported pixel formats, -EINVAL
- * otherwise.
- */
-int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
-{
-       unsigned int i;
-
-       for (i = 0; i < plane->format_count; i++) {
-               if (format == plane->format_types[i])
-                       return 0;
-       }
-
-       return -EINVAL;
-}
-
-static int check_src_coords(uint32_t src_x, uint32_t src_y,
-                           uint32_t src_w, uint32_t src_h,
-                           const struct drm_framebuffer *fb)
-{
-       unsigned int fb_width, fb_height;
-
-       fb_width = fb->width << 16;
-       fb_height = fb->height << 16;
-
-       /* Make sure source coordinates are inside the fb. */
-       if (src_w > fb_width ||
-           src_x > fb_width - src_w ||
-           src_h > fb_height ||
-           src_y > fb_height - src_h) {
-               DRM_DEBUG_KMS("Invalid source coordinates "
-                             "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
-                             src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
-                             src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
-                             src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
-                             src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
-               return -ENOSPC;
-       }
-
-       return 0;
-}
-
-/*
- * setplane_internal - setplane handler for internal callers
- *
- * Note that we assume an extra reference has already been taken on fb.  If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
- *
- * src_{x,y,w,h} are provided in 16.16 fixed point format
- */
-static int __setplane_internal(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int32_t crtc_x, int32_t crtc_y,
-                              uint32_t crtc_w, uint32_t crtc_h,
-                              /* src_{x,y,w,h} values are 16.16 fixed point */
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h)
-{
-       int ret = 0;
-
-       /* No fb means shut it down */
-       if (!fb) {
-               plane->old_fb = plane->fb;
-               ret = plane->funcs->disable_plane(plane);
-               if (!ret) {
-                       plane->crtc = NULL;
-                       plane->fb = NULL;
-               } else {
-                       plane->old_fb = NULL;
-               }
-               goto out;
-       }
-
-       /* Check whether this plane is usable on this CRTC */
-       if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
-               DRM_DEBUG_KMS("Invalid crtc for plane\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* Check whether this plane supports the fb pixel format. */
-       ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
-       if (ret) {
-               DRM_DEBUG_KMS("Invalid pixel format %s\n",
-                             drm_get_format_name(fb->pixel_format));
-               goto out;
-       }
-
-       /* Give drivers some help against integer overflows */
-       if (crtc_w > INT_MAX ||
-           crtc_x > INT_MAX - (int32_t) crtc_w ||
-           crtc_h > INT_MAX ||
-           crtc_y > INT_MAX - (int32_t) crtc_h) {
-               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
-                             crtc_w, crtc_h, crtc_x, crtc_y);
-               ret = -ERANGE;
-               goto out;
-       }
-
-       ret = check_src_coords(src_x, src_y, src_w, src_h, fb);
-       if (ret)
-               goto out;
-
-       plane->old_fb = plane->fb;
-       ret = plane->funcs->update_plane(plane, crtc, fb,
-                                        crtc_x, crtc_y, crtc_w, crtc_h,
-                                        src_x, src_y, src_w, src_h);
-       if (!ret) {
-               plane->crtc = crtc;
-               plane->fb = fb;
-               fb = NULL;
-       } else {
-               plane->old_fb = NULL;
-       }
-
-out:
-       if (fb)
-               drm_framebuffer_unreference(fb);
-       if (plane->old_fb)
-               drm_framebuffer_unreference(plane->old_fb);
-       plane->old_fb = NULL;
-
-       return ret;
-}
-
-static int setplane_internal(struct drm_plane *plane,
-                            struct drm_crtc *crtc,
-                            struct drm_framebuffer *fb,
-                            int32_t crtc_x, int32_t crtc_y,
-                            uint32_t crtc_w, uint32_t crtc_h,
-                            /* src_{x,y,w,h} values are 16.16 fixed point */
-                            uint32_t src_x, uint32_t src_y,
-                            uint32_t src_w, uint32_t src_h)
-{
-       int ret;
-
-       drm_modeset_lock_all(plane->dev);
-       ret = __setplane_internal(plane, crtc, fb,
-                                 crtc_x, crtc_y, crtc_w, crtc_h,
-                                 src_x, src_y, src_w, src_h);
-       drm_modeset_unlock_all(plane->dev);
-
-       return ret;
-}
-
-/**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
- *
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable (planes may be disabled without providing a
- * valid crtc).
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_setplane(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
-{
-       struct drm_mode_set_plane *plane_req = data;
-       struct drm_plane *plane;
-       struct drm_crtc *crtc = NULL;
-       struct drm_framebuffer *fb = NULL;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       /*
-        * First, find the plane, crtc, and fb objects.  If not available,
-        * we don't bother to call the driver.
-        */
-       plane = drm_plane_find(dev, plane_req->plane_id);
-       if (!plane) {
-               DRM_DEBUG_KMS("Unknown plane ID %d\n",
-                             plane_req->plane_id);
-               return -ENOENT;
-       }
-
-       if (plane_req->fb_id) {
-               fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
-               if (!fb) {
-                       DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
-                                     plane_req->fb_id);
-                       return -ENOENT;
-               }
-
-               crtc = drm_crtc_find(dev, plane_req->crtc_id);
-               if (!crtc) {
-                       DRM_DEBUG_KMS("Unknown crtc ID %d\n",
-                                     plane_req->crtc_id);
-                       return -ENOENT;
-               }
-       }
-
-       /*
-        * setplane_internal will take care of deref'ing either the old or new
-        * framebuffer depending on success.
-        */
-       return setplane_internal(plane, crtc, fb,
-                                plane_req->crtc_x, plane_req->crtc_y,
-                                plane_req->crtc_w, plane_req->crtc_h,
-                                plane_req->src_x, plane_req->src_y,
-                                plane_req->src_w, plane_req->src_h);
-}
-
-/**
- * drm_mode_set_config_internal - helper to call ->set_config
- * @set: modeset config to set
- *
- * This is a little helper to wrap internal calls to the ->set_config driver
- * interface. The only thing it adds is correct refcounting dance.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_set_config_internal(struct drm_mode_set *set)
-{
-       struct drm_crtc *crtc = set->crtc;
-       struct drm_framebuffer *fb;
-       struct drm_crtc *tmp;
-       int ret;
-
-       /*
-        * NOTE: ->set_config can also disable other crtcs (if we steal all
-        * connectors from it), hence we need to refcount the fbs across all
-        * crtcs. Atomic modeset will have saner semantics ...
-        */
-       drm_for_each_crtc(tmp, crtc->dev)
-               tmp->primary->old_fb = tmp->primary->fb;
-
-       fb = set->fb;
-
-       ret = crtc->funcs->set_config(set);
-       if (ret == 0) {
-               crtc->primary->crtc = crtc;
-               crtc->primary->fb = fb;
-       }
-
-       drm_for_each_crtc(tmp, crtc->dev) {
-               if (tmp->primary->fb)
-                       drm_framebuffer_reference(tmp->primary->fb);
-               if (tmp->primary->old_fb)
-                       drm_framebuffer_unreference(tmp->primary->old_fb);
-               tmp->primary->old_fb = NULL;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_mode_set_config_internal);
-
-/**
- * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
- * @mode: mode to query
- * @hdisplay: hdisplay value to fill in
- * @vdisplay: vdisplay value to fill in
- *
- * The vdisplay value will be doubled if the specified mode is a stereo mode of
- * the appropriate layout.
- */
-void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
-                           int *hdisplay, int *vdisplay)
-{
-       struct drm_display_mode adjusted;
-
-       drm_mode_copy(&adjusted, mode);
-       drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
-       *hdisplay = adjusted.crtc_hdisplay;
-       *vdisplay = adjusted.crtc_vdisplay;
-}
-EXPORT_SYMBOL(drm_crtc_get_hv_timing);
-
-/**
- * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
- *     CRTC viewport
- * @crtc: CRTC that framebuffer will be displayed on
- * @x: x panning
- * @y: y panning
- * @mode: mode that framebuffer will be displayed under
- * @fb: framebuffer to check size of
- */
-int drm_crtc_check_viewport(const struct drm_crtc *crtc,
-                           int x, int y,
-                           const struct drm_display_mode *mode,
-                           const struct drm_framebuffer *fb)
-
-{
-       int hdisplay, vdisplay;
-
-       drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
-
-       if (crtc->state &&
-           crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) |
-                                             BIT(DRM_ROTATE_270)))
-               swap(hdisplay, vdisplay);
-
-       return check_src_coords(x << 16, y << 16,
-                               hdisplay << 16, vdisplay << 16, fb);
-}
-EXPORT_SYMBOL(drm_crtc_check_viewport);
-
-/**
- * drm_mode_setcrtc - set CRTC configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Build a new CRTC configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_setcrtc(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
-{
-       struct drm_mode_config *config = &dev->mode_config;
-       struct drm_mode_crtc *crtc_req = data;
-       struct drm_crtc *crtc;
-       struct drm_connector **connector_set = NULL, *connector;
-       struct drm_framebuffer *fb = NULL;
-       struct drm_display_mode *mode = NULL;
-       struct drm_mode_set set;
-       uint32_t __user *set_connectors_ptr;
-       int ret;
-       int i;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       /*
-        * Universal plane src offsets are only 16.16, prevent havoc for
-        * drivers using universal plane code internally.
-        */
-       if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
-               return -ERANGE;
-
-       drm_modeset_lock_all(dev);
-       crtc = drm_crtc_find(dev, crtc_req->crtc_id);
-       if (!crtc) {
-               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
-               ret = -ENOENT;
-               goto out;
-       }
-       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
-
-       if (crtc_req->mode_valid) {
-               /* If we have a mode we need a framebuffer. */
-               /* If we pass -1, set the mode with the currently bound fb */
-               if (crtc_req->fb_id == -1) {
-                       if (!crtc->primary->fb) {
-                               DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
-                               ret = -EINVAL;
-                               goto out;
-                       }
-                       fb = crtc->primary->fb;
-                       /* Make refcounting symmetric with the lookup path. */
-                       drm_framebuffer_reference(fb);
-               } else {
-                       fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
-                       if (!fb) {
-                               DRM_DEBUG_KMS("Unknown FB ID%d\n",
-                                               crtc_req->fb_id);
-                               ret = -ENOENT;
-                               goto out;
-                       }
-               }
-
-               mode = drm_mode_create(dev);
-               if (!mode) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               ret = drm_mode_convert_umode(mode, &crtc_req->mode);
-               if (ret) {
-                       DRM_DEBUG_KMS("Invalid mode\n");
-                       goto out;
-               }
-
-               /*
-                * Check whether the primary plane supports the fb pixel format.
-                * Drivers not implementing the universal planes API use a
-                * default formats list provided by the DRM core which doesn't
-                * match real hardware capabilities. Skip the check in that
-                * case.
-                */
-               if (!crtc->primary->format_default) {
-                       ret = drm_plane_check_pixel_format(crtc->primary,
-                                                          fb->pixel_format);
-                       if (ret) {
-                               DRM_DEBUG_KMS("Invalid pixel format %s\n",
-                                       drm_get_format_name(fb->pixel_format));
-                               goto out;
-                       }
-               }
-
-               ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
-                                             mode, fb);
-               if (ret)
-                       goto out;
-
-       }
-
-       if (crtc_req->count_connectors == 0 && mode) {
-               DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
-               DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
-                         crtc_req->count_connectors);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (crtc_req->count_connectors > 0) {
-               u32 out_id;
-
-               /* Avoid unbounded kernel memory allocation */
-               if (crtc_req->count_connectors > config->num_connector) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               connector_set = kmalloc_array(crtc_req->count_connectors,
-                                             sizeof(struct drm_connector *),
-                                             GFP_KERNEL);
-               if (!connector_set) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               for (i = 0; i < crtc_req->count_connectors; i++) {
-                       connector_set[i] = NULL;
-                       set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
-                       if (get_user(out_id, &set_connectors_ptr[i])) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
-
-                       connector = drm_connector_lookup(dev, out_id);
-                       if (!connector) {
-                               DRM_DEBUG_KMS("Connector id %d unknown\n",
-                                               out_id);
-                               ret = -ENOENT;
-                               goto out;
-                       }
-                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                                       connector->base.id,
-                                       connector->name);
-
-                       connector_set[i] = connector;
-               }
-       }
-
-       set.crtc = crtc;
-       set.x = crtc_req->x;
-       set.y = crtc_req->y;
-       set.mode = mode;
-       set.connectors = connector_set;
-       set.num_connectors = crtc_req->count_connectors;
-       set.fb = fb;
-       ret = drm_mode_set_config_internal(&set);
-
-out:
-       if (fb)
-               drm_framebuffer_unreference(fb);
-
-       if (connector_set) {
-               for (i = 0; i < crtc_req->count_connectors; i++) {
-                       if (connector_set[i])
-                               drm_connector_unreference(connector_set[i]);
-               }
-       }
-       kfree(connector_set);
-       drm_mode_destroy(dev, mode);
-       drm_modeset_unlock_all(dev);
-       return ret;
-}
-
-/**
- * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
- *     universal plane handler call
- * @crtc: crtc to update cursor for
- * @req: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Legacy cursor ioctl's work directly with driver buffer handles.  To
- * translate legacy ioctl calls into universal plane handler calls, we need to
- * wrap the native buffer handle in a drm_framebuffer.
- *
- * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
- * buffer with a pitch of 4*width; the universal plane interface should be used
- * directly in cases where the hardware can support other buffer settings and
- * userspace wants to make use of these capabilities.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-static int drm_mode_cursor_universal(struct drm_crtc *crtc,
-                                    struct drm_mode_cursor2 *req,
-                                    struct drm_file *file_priv)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_framebuffer *fb = NULL;
-       struct drm_mode_fb_cmd2 fbreq = {
-               .width = req->width,
-               .height = req->height,
-               .pixel_format = DRM_FORMAT_ARGB8888,
-               .pitches = { req->width * 4 },
-               .handles = { req->handle },
-       };
-       int32_t crtc_x, crtc_y;
-       uint32_t crtc_w = 0, crtc_h = 0;
-       uint32_t src_w = 0, src_h = 0;
-       int ret = 0;
-
-       BUG_ON(!crtc->cursor);
-       WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
-
-       /*
-        * Obtain fb we'll be using (either new or existing) and take an extra
-        * reference to it if fb != null.  setplane will take care of dropping
-        * the reference if the plane update fails.
-        */
-       if (req->flags & DRM_MODE_CURSOR_BO) {
-               if (req->handle) {
-                       fb = internal_framebuffer_create(dev, &fbreq, file_priv);
-                       if (IS_ERR(fb)) {
-                               DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
-                               return PTR_ERR(fb);
-                       }
-                       fb->hot_x = req->hot_x;
-                       fb->hot_y = req->hot_y;
-               } else {
-                       fb = NULL;
-               }
-       } else {
-               fb = crtc->cursor->fb;
-               if (fb)
-                       drm_framebuffer_reference(fb);
-       }
-
-       if (req->flags & DRM_MODE_CURSOR_MOVE) {
-               crtc_x = req->x;
-               crtc_y = req->y;
-       } else {
-               crtc_x = crtc->cursor_x;
-               crtc_y = crtc->cursor_y;
-       }
-
-       if (fb) {
-               crtc_w = fb->width;
-               crtc_h = fb->height;
-               src_w = fb->width << 16;
-               src_h = fb->height << 16;
-       }
-
-       /*
-        * setplane_internal will take care of deref'ing either the old or new
-        * framebuffer depending on success.
-        */
-       ret = __setplane_internal(crtc->cursor, crtc, fb,
-                               crtc_x, crtc_y, crtc_w, crtc_h,
-                               0, 0, src_w, src_h);
-
-       /* Update successful; save new cursor position, if necessary */
-       if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
-               crtc->cursor_x = req->x;
-               crtc->cursor_y = req->y;
-       }
-
-       return ret;
-}
-
-static int drm_mode_cursor_common(struct drm_device *dev,
-                                 struct drm_mode_cursor2 *req,
-                                 struct drm_file *file_priv)
-{
-       struct drm_crtc *crtc;
-       int ret = 0;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
-               return -EINVAL;
-
-       crtc = drm_crtc_find(dev, req->crtc_id);
-       if (!crtc) {
-               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
-               return -ENOENT;
-       }
-
-       /*
-        * If this crtc has a universal cursor plane, call that plane's update
-        * handler rather than using legacy cursor handlers.
-        */
-       drm_modeset_lock_crtc(crtc, crtc->cursor);
-       if (crtc->cursor) {
-               ret = drm_mode_cursor_universal(crtc, req, file_priv);
-               goto out;
-       }
-
-       if (req->flags & DRM_MODE_CURSOR_BO) {
-               if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
-                       ret = -ENXIO;
-                       goto out;
-               }
-               /* Turns off the cursor if handle is 0 */
-               if (crtc->funcs->cursor_set2)
-                       ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
-                                                     req->width, req->height, req->hot_x, req->hot_y);
-               else
-                       ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
-                                                     req->width, req->height);
-       }
-
-       if (req->flags & DRM_MODE_CURSOR_MOVE) {
-               if (crtc->funcs->cursor_move) {
-                       ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
-               } else {
-                       ret = -EFAULT;
-                       goto out;
-               }
-       }
-out:
-       drm_modeset_unlock_crtc(crtc);
-
-       return ret;
-
-}
-
-
-/**
- * drm_mode_cursor_ioctl - set CRTC's cursor configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Set the cursor configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_cursor_ioctl(struct drm_device *dev,
-                         void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_cursor *req = data;
-       struct drm_mode_cursor2 new_req;
-
-       memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
-       new_req.hot_x = new_req.hot_y = 0;
-
-       return drm_mode_cursor_common(dev, &new_req, file_priv);
-}
-
-/**
- * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Set the cursor configuration based on user request. This implements the 2nd
- * version of the cursor ioctl, which allows userspace to additionally specify
- * the hotspot of the pointer.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_cursor2_ioctl(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_cursor2 *req = data;
-
-       return drm_mode_cursor_common(dev, req, file_priv);
-}
-
-/**
- * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
- * @bpp: bits per pixels
- * @depth: bit depth per pixel
- *
- * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
- * Useful in fbdev emulation code, since that deals in those values.
- */
-uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
-{
-       uint32_t fmt;
-
-       switch (bpp) {
-       case 8:
-               fmt = DRM_FORMAT_C8;
-               break;
-       case 16:
-               if (depth == 15)
-                       fmt = DRM_FORMAT_XRGB1555;
-               else
-                       fmt = DRM_FORMAT_RGB565;
-               break;
-       case 24:
-               fmt = DRM_FORMAT_RGB888;
-               break;
-       case 32:
-               if (depth == 24)
-                       fmt = DRM_FORMAT_XRGB8888;
-               else if (depth == 30)
-                       fmt = DRM_FORMAT_XRGB2101010;
-               else
-                       fmt = DRM_FORMAT_ARGB8888;
-               break;
-       default:
-               DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
-               fmt = DRM_FORMAT_XRGB8888;
-               break;
-       }
-
-       return fmt;
-}
-EXPORT_SYMBOL(drm_mode_legacy_fb_format);
-
-/**
- * drm_mode_addfb - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request. This is the
- * original addfb ioctl which only supported RGB formats.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_addfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_fb_cmd *or = data;
-       struct drm_mode_fb_cmd2 r = {};
-       int ret;
-
-       /* convert to new format and call new ioctl */
-       r.fb_id = or->fb_id;
-       r.width = or->width;
-       r.height = or->height;
-       r.pitches[0] = or->pitch;
-       r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
-       r.handles[0] = or->handle;
-
-       ret = drm_mode_addfb2(dev, &r, file_priv);
-       if (ret)
-               return ret;
-
-       or->fb_id = r.fb_id;
-
-       return 0;
-}
-
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
-       uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
-
-       switch (format) {
-       case DRM_FORMAT_C8:
-       case DRM_FORMAT_RGB332:
-       case DRM_FORMAT_BGR233:
-       case DRM_FORMAT_XRGB4444:
-       case DRM_FORMAT_XBGR4444:
-       case DRM_FORMAT_RGBX4444:
-       case DRM_FORMAT_BGRX4444:
-       case DRM_FORMAT_ARGB4444:
-       case DRM_FORMAT_ABGR4444:
-       case DRM_FORMAT_RGBA4444:
-       case DRM_FORMAT_BGRA4444:
-       case DRM_FORMAT_XRGB1555:
-       case DRM_FORMAT_XBGR1555:
-       case DRM_FORMAT_RGBX5551:
-       case DRM_FORMAT_BGRX5551:
-       case DRM_FORMAT_ARGB1555:
-       case DRM_FORMAT_ABGR1555:
-       case DRM_FORMAT_RGBA5551:
-       case DRM_FORMAT_BGRA5551:
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_BGR565:
-       case DRM_FORMAT_RGB888:
-       case DRM_FORMAT_BGR888:
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_RGBX8888:
-       case DRM_FORMAT_BGRX8888:
-       case DRM_FORMAT_ARGB8888:
-       case DRM_FORMAT_ABGR8888:
-       case DRM_FORMAT_RGBA8888:
-       case DRM_FORMAT_BGRA8888:
-       case DRM_FORMAT_XRGB2101010:
-       case DRM_FORMAT_XBGR2101010:
-       case DRM_FORMAT_RGBX1010102:
-       case DRM_FORMAT_BGRX1010102:
-       case DRM_FORMAT_ARGB2101010:
-       case DRM_FORMAT_ABGR2101010:
-       case DRM_FORMAT_RGBA1010102:
-       case DRM_FORMAT_BGRA1010102:
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-       case DRM_FORMAT_AYUV:
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV16:
-       case DRM_FORMAT_NV61:
-       case DRM_FORMAT_NV24:
-       case DRM_FORMAT_NV42:
-       case DRM_FORMAT_YUV410:
-       case DRM_FORMAT_YVU410:
-       case DRM_FORMAT_YUV411:
-       case DRM_FORMAT_YVU411:
-       case DRM_FORMAT_YUV420:
-       case DRM_FORMAT_YVU420:
-       case DRM_FORMAT_YUV422:
-       case DRM_FORMAT_YVU422:
-       case DRM_FORMAT_YUV444:
-       case DRM_FORMAT_YVU444:
-               return 0;
-       default:
-               DRM_DEBUG_KMS("invalid pixel format %s\n",
-                             drm_get_format_name(r->pixel_format));
-               return -EINVAL;
-       }
-}
-
-static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
-{
-       int ret, hsub, vsub, num_planes, i;
-
-       ret = format_check(r);
-       if (ret) {
-               DRM_DEBUG_KMS("bad framebuffer format %s\n",
-                             drm_get_format_name(r->pixel_format));
-               return ret;
-       }
-
-       hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
-       vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
-       num_planes = drm_format_num_planes(r->pixel_format);
-
-       if (r->width == 0 || r->width % hsub) {
-               DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
-               return -EINVAL;
-       }
-
-       if (r->height == 0 || r->height % vsub) {
-               DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
-               return -EINVAL;
-       }
-
-       for (i = 0; i < num_planes; i++) {
-               unsigned int width = r->width / (i != 0 ? hsub : 1);
-               unsigned int height = r->height / (i != 0 ? vsub : 1);
-               unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
-
-               if (!r->handles[i]) {
-                       DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
-                       return -EINVAL;
-               }
-
-               if ((uint64_t) width * cpp > UINT_MAX)
-                       return -ERANGE;
-
-               if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
-                       return -ERANGE;
-
-               if (r->pitches[i] < width * cpp) {
-                       DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
-                       return -EINVAL;
-               }
-
-               if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
-                       DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
-                                     r->modifier[i], i);
-                       return -EINVAL;
-               }
-
-               /* modifier specific checks: */
-               switch (r->modifier[i]) {
-               case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
-                       /* NOTE: the pitch restriction may be lifted later if it turns
-                        * out that no hw has this restriction:
-                        */
-                       if (r->pixel_format != DRM_FORMAT_NV12 ||
-                                       width % 128 || height % 32 ||
-                                       r->pitches[i] % 128) {
-                               DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
-                               return -EINVAL;
-                       }
-                       break;
-
-               default:
-                       break;
-               }
-       }
-
-       for (i = num_planes; i < 4; i++) {
-               if (r->modifier[i]) {
-                       DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
-                       return -EINVAL;
-               }
-
-               /* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
-               if (!(r->flags & DRM_MODE_FB_MODIFIERS))
-                       continue;
-
-               if (r->handles[i]) {
-                       DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
-                       return -EINVAL;
-               }
-
-               if (r->pitches[i]) {
-                       DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
-                       return -EINVAL;
-               }
-
-               if (r->offsets[i]) {
-                       DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-static struct drm_framebuffer *
-internal_framebuffer_create(struct drm_device *dev,
-                           const struct drm_mode_fb_cmd2 *r,
-                           struct drm_file *file_priv)
-{
-       struct drm_mode_config *config = &dev->mode_config;
-       struct drm_framebuffer *fb;
-       int ret;
-
-       if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
-               DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if ((config->min_width > r->width) || (r->width > config->max_width)) {
-               DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
-                         r->width, config->min_width, config->max_width);
-               return ERR_PTR(-EINVAL);
-       }
-       if ((config->min_height > r->height) || (r->height > config->max_height)) {
-               DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
-                         r->height, config->min_height, config->max_height);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (r->flags & DRM_MODE_FB_MODIFIERS &&
-           !dev->mode_config.allow_fb_modifiers) {
-               DRM_DEBUG_KMS("driver does not support fb modifiers\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       ret = framebuffer_check(r);
-       if (ret)
-               return ERR_PTR(ret);
-
-       fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
-       if (IS_ERR(fb)) {
-               DRM_DEBUG_KMS("could not create framebuffer\n");
-               return fb;
-       }
-
-       return fb;
-}
-
-/**
- * drm_mode_addfb2 - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request with format. This is
- * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
- * and uses fourcc codes as pixel format specifiers.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_addfb2(struct drm_device *dev,
-                   void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_fb_cmd2 *r = data;
-       struct drm_framebuffer *fb;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       fb = internal_framebuffer_create(dev, r, file_priv);
-       if (IS_ERR(fb))
-               return PTR_ERR(fb);
-
-       DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-       r->fb_id = fb->base.id;
-
-       /* Transfer ownership to the filp for reaping on close */
-       mutex_lock(&file_priv->fbs_lock);
-       list_add(&fb->filp_head, &file_priv->fbs);
-       mutex_unlock(&file_priv->fbs_lock);
-
-       return 0;
-}
-
-struct drm_mode_rmfb_work {
-       struct work_struct work;
-       struct list_head fbs;
-};
-
-static void drm_mode_rmfb_work_fn(struct work_struct *w)
-{
-       struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
-
-       while (!list_empty(&arg->fbs)) {
-               struct drm_framebuffer *fb =
-                       list_first_entry(&arg->fbs, typeof(*fb), filp_head);
-
-               list_del_init(&fb->filp_head);
-               drm_framebuffer_remove(fb);
-       }
-}
-
-/**
- * drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Remove the FB specified by the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_rmfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv)
-{
-       struct drm_framebuffer *fb = NULL;
-       struct drm_framebuffer *fbl = NULL;
-       uint32_t *id = data;
-       int found = 0;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       fb = drm_framebuffer_lookup(dev, *id);
-       if (!fb)
-               return -ENOENT;
-
-       mutex_lock(&file_priv->fbs_lock);
-       list_for_each_entry(fbl, &file_priv->fbs, filp_head)
-               if (fb == fbl)
-                       found = 1;
-       if (!found) {
-               mutex_unlock(&file_priv->fbs_lock);
-               goto fail_unref;
-       }
-
-       list_del_init(&fb->filp_head);
-       mutex_unlock(&file_priv->fbs_lock);
-
-       /* drop the reference we picked up in framebuffer lookup */
-       drm_framebuffer_unreference(fb);
-
-       /*
-        * we now own the reference that was stored in the fbs list
-        *
-        * drm_framebuffer_remove may fail with -EINTR on pending signals,
-        * so run this in a separate stack as there's no way to correctly
-        * handle this after the fb is already removed from the lookup table.
-        */
-       if (drm_framebuffer_read_refcount(fb) > 1) {
-               struct drm_mode_rmfb_work arg;
-
-               INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
-               INIT_LIST_HEAD(&arg.fbs);
-               list_add_tail(&fb->filp_head, &arg.fbs);
-
-               schedule_work(&arg.work);
-               flush_work(&arg.work);
-               destroy_work_on_stack(&arg.work);
-       } else
-               drm_framebuffer_unreference(fb);
-
-       return 0;
-
-fail_unref:
-       drm_framebuffer_unreference(fb);
-       return -ENOENT;
-}
-
-/**
- * drm_mode_getfb - get FB info
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Lookup the FB given its ID and return info about it.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_fb_cmd *r = data;
-       struct drm_framebuffer *fb;
-       int ret;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       fb = drm_framebuffer_lookup(dev, r->fb_id);
-       if (!fb)
-               return -ENOENT;
-
-       r->height = fb->height;
-       r->width = fb->width;
-       r->depth = fb->depth;
-       r->bpp = fb->bits_per_pixel;
-       r->pitch = fb->pitches[0];
-       if (fb->funcs->create_handle) {
-               if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
-                   drm_is_control_client(file_priv)) {
-                       ret = fb->funcs->create_handle(fb, file_priv,
-                                                      &r->handle);
-               } else {
-                       /* GET_FB() is an unprivileged ioctl so we must not
-                        * return a buffer-handle to non-master processes! For
-                        * backwards-compatibility reasons, we cannot make
-                        * GET_FB() privileged, so just return an invalid handle
-                        * for non-masters. */
-                       r->handle = 0;
-                       ret = 0;
-               }
-       } else {
-               ret = -ENODEV;
-       }
-
-       drm_framebuffer_unreference(fb);
-
-       return ret;
-}
-
-/**
- * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Lookup the FB and flush out the damaged area supplied by userspace as a clip
- * rectangle list. Generic userspace which does frontbuffer rendering must call
- * this ioctl to flush out the changes on manual-update display outputs, e.g.
- * usb display-link, mipi manual update panels or edp panel self refresh modes.
- *
- * Modesetting drivers which always update the frontbuffer do not need to
- * implement the corresponding ->dirty framebuffer callback.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv)
-{
-       struct drm_clip_rect __user *clips_ptr;
-       struct drm_clip_rect *clips = NULL;
-       struct drm_mode_fb_dirty_cmd *r = data;
-       struct drm_framebuffer *fb;
-       unsigned flags;
-       int num_clips;
-       int ret;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       fb = drm_framebuffer_lookup(dev, r->fb_id);
-       if (!fb)
-               return -ENOENT;
-
-       num_clips = r->num_clips;
-       clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
-
-       if (!num_clips != !clips_ptr) {
-               ret = -EINVAL;
-               goto out_err1;
-       }
-
-       flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
-
-       /* If userspace annotates copy, clips must come in pairs */
-       if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
-               ret = -EINVAL;
-               goto out_err1;
-       }
-
-       if (num_clips && clips_ptr) {
-               if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
-                       ret = -EINVAL;
-                       goto out_err1;
-               }
-               clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
-               if (!clips) {
-                       ret = -ENOMEM;
-                       goto out_err1;
-               }
-
-               ret = copy_from_user(clips, clips_ptr,
-                                    num_clips * sizeof(*clips));
-               if (ret) {
-                       ret = -EFAULT;
-                       goto out_err2;
-               }
-       }
-
-       if (fb->funcs->dirty) {
-               ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
-                                      clips, num_clips);
-       } else {
-               ret = -ENOSYS;
-       }
-
-out_err2:
-       kfree(clips);
-out_err1:
-       drm_framebuffer_unreference(fb);
-
-       return ret;
-}
-
-/**
- * drm_fb_release - remove and free the FBs on this file
- * @priv: drm file for the ioctl
- *
- * Destroy all the FBs associated with @filp.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-void drm_fb_release(struct drm_file *priv)
-{
-       struct drm_framebuffer *fb, *tfb;
-       struct drm_mode_rmfb_work arg;
-
-       INIT_LIST_HEAD(&arg.fbs);
-
-       /*
-        * When the file gets released that means no one else can access the fb
-        * list any more, so no need to grab fpriv->fbs_lock. And we need to
-        * avoid upsetting lockdep since the universal cursor code adds a
-        * framebuffer while holding mutex locks.
-        *
-        * Note that a real deadlock between fpriv->fbs_lock and the modeset
-        * locks is impossible here since no one else but this function can get
-        * at it any more.
-        */
-       list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
-               if (drm_framebuffer_read_refcount(fb) > 1) {
-                       list_move_tail(&fb->filp_head, &arg.fbs);
-               } else {
-                       list_del_init(&fb->filp_head);
-
-                       /* This drops the fpriv->fbs reference. */
-                       drm_framebuffer_unreference(fb);
-               }
-       }
-
-       if (!list_empty(&arg.fbs)) {
-               INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
-
-               schedule_work(&arg.work);
-               flush_work(&arg.work);
-               destroy_work_on_stack(&arg.work);
-       }
-}
-
-static bool drm_property_type_valid(struct drm_property *property)
-{
-       if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
-               return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-       return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-}
-
-/**
- * drm_property_create - create a new property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @num_values: number of pre-defined values
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Note that the DRM core keeps a per-device list of properties and that, if
- * drm_mode_config_cleanup() is called, it will destroy all properties created
- * by the driver.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
-                                        const char *name, int num_values)
-{
-       struct drm_property *property = NULL;
-       int ret;
-
-       property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
-       if (!property)
-               return NULL;
-
-       property->dev = dev;
-
-       if (num_values) {
-               property->values = kcalloc(num_values, sizeof(uint64_t),
-                                          GFP_KERNEL);
-               if (!property->values)
-                       goto fail;
-       }
-
-       ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
-       if (ret)
-               goto fail;
-
-       property->flags = flags;
-       property->num_values = num_values;
-       INIT_LIST_HEAD(&property->enum_list);
-
-       if (name) {
-               strncpy(property->name, name, DRM_PROP_NAME_LEN);
-               property->name[DRM_PROP_NAME_LEN-1] = '\0';
-       }
-
-       list_add_tail(&property->head, &dev->mode_config.property_list);
-
-       WARN_ON(!drm_property_type_valid(property));
-
-       return property;
-fail:
-       kfree(property->values);
-       kfree(property);
-       return NULL;
-}
-EXPORT_SYMBOL(drm_property_create);
-
-/**
- * drm_property_create_enum - create a new enumeration property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @props: enumeration lists with property values
- * @num_values: number of pre-defined values
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is only allowed to set one of the predefined values for enumeration
- * properties.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
-                                        const char *name,
-                                        const struct drm_prop_enum_list *props,
-                                        int num_values)
-{
-       struct drm_property *property;
-       int i, ret;
-
-       flags |= DRM_MODE_PROP_ENUM;
-
-       property = drm_property_create(dev, flags, name, num_values);
-       if (!property)
-               return NULL;
-
-       for (i = 0; i < num_values; i++) {
-               ret = drm_property_add_enum(property, i,
-                                     props[i].type,
-                                     props[i].name);
-               if (ret) {
-                       drm_property_destroy(dev, property);
-                       return NULL;
-               }
-       }
-
-       return property;
-}
-EXPORT_SYMBOL(drm_property_create_enum);
-
-/**
- * drm_property_create_bitmask - create a new bitmask property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @props: enumeration lists with property bitflags
- * @num_props: size of the @props array
- * @supported_bits: bitmask of all supported enumeration values
- *
- * This creates a new bitmask drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Compared to plain enumeration properties userspace is allowed to set any
- * or'ed together combination of the predefined property bitflag values
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
-                                        int flags, const char *name,
-                                        const struct drm_prop_enum_list *props,
-                                        int num_props,
-                                        uint64_t supported_bits)
-{
-       struct drm_property *property;
-       int i, ret, index = 0;
-       int num_values = hweight64(supported_bits);
-
-       flags |= DRM_MODE_PROP_BITMASK;
-
-       property = drm_property_create(dev, flags, name, num_values);
-       if (!property)
-               return NULL;
-       for (i = 0; i < num_props; i++) {
-               if (!(supported_bits & (1ULL << props[i].type)))
-                       continue;
-
-               if (WARN_ON(index >= num_values)) {
-                       drm_property_destroy(dev, property);
-                       return NULL;
-               }
-
-               ret = drm_property_add_enum(property, index++,
-                                     props[i].type,
-                                     props[i].name);
-               if (ret) {
-                       drm_property_destroy(dev, property);
-                       return NULL;
-               }
-       }
-
-       return property;
-}
-EXPORT_SYMBOL(drm_property_create_bitmask);
-
-static struct drm_property *property_create_range(struct drm_device *dev,
-                                        int flags, const char *name,
-                                        uint64_t min, uint64_t max)
-{
-       struct drm_property *property;
-
-       property = drm_property_create(dev, flags, name, 2);
-       if (!property)
-               return NULL;
-
-       property->values[0] = min;
-       property->values[1] = max;
-
-       return property;
-}
-
-/**
- * drm_property_create_range - create a new unsigned ranged property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @min: minimum value of the property
- * @max: maximum value of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is allowed to set any unsigned integer value in the (min, max)
- * range inclusive.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
-                                        const char *name,
-                                        uint64_t min, uint64_t max)
-{
-       return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
-                       name, min, max);
-}
-EXPORT_SYMBOL(drm_property_create_range);
-
-/**
- * drm_property_create_signed_range - create a new signed ranged property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @min: minimum value of the property
- * @max: maximum value of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is allowed to set any signed integer value in the (min, max)
- * range inclusive.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
-                                        int flags, const char *name,
-                                        int64_t min, int64_t max)
-{
-       return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
-                       name, I642U64(min), I642U64(max));
-}
-EXPORT_SYMBOL(drm_property_create_signed_range);
-
-/**
- * drm_property_create_object - create a new object property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @type: object type from DRM_MODE_OBJECT_* defines
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is only allowed to set this to any property value of the given
- * @type. Only useful for atomic properties, which is enforced.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_object(struct drm_device *dev,
-                                        int flags, const char *name, uint32_t type)
-{
-       struct drm_property *property;
-
-       flags |= DRM_MODE_PROP_OBJECT;
-
-       if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
-               return NULL;
-
-       property = drm_property_create(dev, flags, name, 1);
-       if (!property)
-               return NULL;
-
-       property->values[0] = type;
-
-       return property;
-}
-EXPORT_SYMBOL(drm_property_create_object);
-
-/**
- * drm_property_create_bool - create a new boolean property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * This is implemented as a ranged property with only {0, 1} as valid values.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
-                                        const char *name)
-{
-       return drm_property_create_range(dev, flags, name, 0, 1);
-}
-EXPORT_SYMBOL(drm_property_create_bool);
-
-/**
- * drm_property_add_enum - add a possible value to an enumeration property
- * @property: enumeration property to change
- * @index: index of the new enumeration
- * @value: value of the new enumeration
- * @name: symbolic name of the new enumeration
- *
- * This functions adds enumerations to a property.
- *
- * It's use is deprecated, drivers should use one of the more specific helpers
- * to directly create the property with all enumerations already attached.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_property_add_enum(struct drm_property *property, int index,
-                         uint64_t value, const char *name)
-{
-       struct drm_property_enum *prop_enum;
-
-       if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-                       drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
-               return -EINVAL;
-
-       /*
-        * Bitmask enum properties have the additional constraint of values
-        * from 0 to 63
-        */
-       if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
-                       (value > 63))
-               return -EINVAL;
-
-       if (!list_empty(&property->enum_list)) {
-               list_for_each_entry(prop_enum, &property->enum_list, head) {
-                       if (prop_enum->value == value) {
-                               strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
-                               prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
-                               return 0;
-                       }
-               }
-       }
-
-       prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
-       if (!prop_enum)
-               return -ENOMEM;
-
-       strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
-       prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
-       prop_enum->value = value;
-
-       property->values[index] = value;
-       list_add_tail(&prop_enum->head, &property->enum_list);
-       return 0;
-}
-EXPORT_SYMBOL(drm_property_add_enum);
-
-/**
- * drm_property_destroy - destroy a drm property
- * @dev: drm device
- * @property: property to destry
- *
- * This function frees a property including any attached resources like
- * enumeration values.
- */
-void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
-{
-       struct drm_property_enum *prop_enum, *pt;
-
-       list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
-               list_del(&prop_enum->head);
-               kfree(prop_enum);
-       }
-
-       if (property->num_values)
-               kfree(property->values);
-       drm_mode_object_unregister(dev, &property->base);
-       list_del(&property->head);
-       kfree(property);
-}
-EXPORT_SYMBOL(drm_property_destroy);
-
-/**
- * drm_object_attach_property - attach a property to a modeset object
- * @obj: drm modeset object
- * @property: property to attach
- * @init_val: initial value of the property
- *
- * This attaches the given property to the modeset object with the given initial
- * value. Currently this function cannot fail since the properties are stored in
- * a statically sized array.
- */
-void drm_object_attach_property(struct drm_mode_object *obj,
-                               struct drm_property *property,
-                               uint64_t init_val)
-{
-       int count = obj->properties->count;
-
-       if (count == DRM_OBJECT_MAX_PROPERTY) {
-               WARN(1, "Failed to attach object property (type: 0x%x). Please "
-                       "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
-                       "you see this message on the same object type.\n",
-                       obj->type);
-               return;
-       }
-
-       obj->properties->properties[count] = property;
-       obj->properties->values[count] = init_val;
-       obj->properties->count++;
-       if (property->flags & DRM_MODE_PROP_ATOMIC)
-               obj->properties->atomic_count++;
-}
-EXPORT_SYMBOL(drm_object_attach_property);
-
-/**
- * drm_object_property_set_value - set the value of a property
- * @obj: drm mode object to set property value for
- * @property: property to set
- * @val: value the property should be set to
- *
- * This functions sets a given property on a given object. This function only
- * changes the software state of the property, it does not call into the
- * driver's ->set_property callback.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_set_value(struct drm_mode_object *obj,
-                                 struct drm_property *property, uint64_t val)
-{
-       int i;
-
-       for (i = 0; i < obj->properties->count; i++) {
-               if (obj->properties->properties[i] == property) {
-                       obj->properties->values[i] = val;
-                       return 0;
-               }
-       }
-
-       return -EINVAL;
-}
-EXPORT_SYMBOL(drm_object_property_set_value);
-
-/**
- * drm_object_property_get_value - retrieve the value of a property
- * @obj: drm mode object to get property value from
- * @property: property to retrieve
- * @val: storage for the property value
- *
- * This function retrieves the softare state of the given property for the given
- * property. Since there is no driver callback to retrieve the current property
- * value this might be out of sync with the hardware, depending upon the driver
- * and property.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_get_value(struct drm_mode_object *obj,
-                                 struct drm_property *property, uint64_t *val)
-{
-       int i;
-
-       /* read-only properties bypass atomic mechanism and still store
-        * their value in obj->properties->values[].. mostly to avoid
-        * having to deal w/ EDID and similar props in atomic paths:
-        */
-       if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
-                       !(property->flags & DRM_MODE_PROP_IMMUTABLE))
-               return drm_atomic_get_property(obj, property, val);
-
-       for (i = 0; i < obj->properties->count; i++) {
-               if (obj->properties->properties[i] == property) {
-                       *val = obj->properties->values[i];
-                       return 0;
-               }
-       }
-
-       return -EINVAL;
-}
-EXPORT_SYMBOL(drm_object_property_get_value);
-
-/**
- * drm_mode_getproperty_ioctl - get the property metadata
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the metadata for a given property, like the different
- * possible values for an enum property or the limits for a range property.
- *
- * Blob properties are special
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getproperty_ioctl(struct drm_device *dev,
-                              void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_get_property *out_resp = data;
-       struct drm_property *property;
-       int enum_count = 0;
-       int value_count = 0;
-       int ret = 0, i;
-       int copied;
-       struct drm_property_enum *prop_enum;
-       struct drm_mode_property_enum __user *enum_ptr;
-       uint64_t __user *values_ptr;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       drm_modeset_lock_all(dev);
-       property = drm_property_find(dev, out_resp->prop_id);
-       if (!property) {
-               ret = -ENOENT;
-               goto done;
-       }
-
-       if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-                       drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-               list_for_each_entry(prop_enum, &property->enum_list, head)
-                       enum_count++;
-       }
-
-       value_count = property->num_values;
-
-       strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
-       out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
-       out_resp->flags = property->flags;
-
-       if ((out_resp->count_values >= value_count) && value_count) {
-               values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
-               for (i = 0; i < value_count; i++) {
-                       if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
-                               ret = -EFAULT;
-                               goto done;
-                       }
-               }
-       }
-       out_resp->count_values = value_count;
-
-       if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-                       drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-               if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
-                       copied = 0;
-                       enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
-                       list_for_each_entry(prop_enum, &property->enum_list, head) {
-
-                               if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
-                                       ret = -EFAULT;
-                                       goto done;
-                               }
-
-                               if (copy_to_user(&enum_ptr[copied].name,
-                                                &prop_enum->name, DRM_PROP_NAME_LEN)) {
-                                       ret = -EFAULT;
-                                       goto done;
-                               }
-                               copied++;
-                       }
-               }
-               out_resp->count_enum_blobs = enum_count;
-       }
-
-       /*
-        * NOTE: The idea seems to have been to use this to read all the blob
-        * property values. But nothing ever added them to the corresponding
-        * list, userspace always used the special-purpose get_blob ioctl to
-        * read the value for a blob property. It also doesn't make a lot of
-        * sense to return values here when everything else is just metadata for
-        * the property itself.
-        */
-       if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
-               out_resp->count_enum_blobs = 0;
-done:
-       drm_modeset_unlock_all(dev);
-       return ret;
-}
-
-static void drm_property_free_blob(struct kref *kref)
-{
-       struct drm_property_blob *blob =
-               container_of(kref, struct drm_property_blob, base.refcount);
-
-       mutex_lock(&blob->dev->mode_config.blob_lock);
-       list_del(&blob->head_global);
-       mutex_unlock(&blob->dev->mode_config.blob_lock);
-
-       drm_mode_object_unregister(blob->dev, &blob->base);
-
-       kfree(blob);
-}
-
-/**
- * drm_property_create_blob - Create new blob property
- *
- * Creates a new blob property for a specified DRM device, optionally
- * copying data.
- *
- * @dev: DRM device to create property for
- * @length: Length to allocate for blob data
- * @data: If specified, copies data into blob
- *
- * Returns:
- * New blob property with a single reference on success, or an ERR_PTR
- * value on failure.
- */
-struct drm_property_blob *
-drm_property_create_blob(struct drm_device *dev, size_t length,
-                        const void *data)
-{
-       struct drm_property_blob *blob;
-       int ret;
-
-       if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
-               return ERR_PTR(-EINVAL);
-
-       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
-       if (!blob)
-               return ERR_PTR(-ENOMEM);
-
-       /* This must be explicitly initialised, so we can safely call list_del
-        * on it in the removal handler, even if it isn't in a file list. */
-       INIT_LIST_HEAD(&blob->head_file);
-       blob->length = length;
-       blob->dev = dev;
-
-       if (data)
-               memcpy(blob->data, data, length);
-
-       ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
-                                     true, drm_property_free_blob);
-       if (ret) {
-               kfree(blob);
-               return ERR_PTR(-EINVAL);
-       }
-
-       mutex_lock(&dev->mode_config.blob_lock);
-       list_add_tail(&blob->head_global,
-                     &dev->mode_config.property_blob_list);
-       mutex_unlock(&dev->mode_config.blob_lock);
-
-       return blob;
-}
-EXPORT_SYMBOL(drm_property_create_blob);
-
-/**
- * drm_property_unreference_blob - Unreference a blob property
- *
- * Drop a reference on a blob property. May free the object.
- *
- * @blob: Pointer to blob property
- */
-void drm_property_unreference_blob(struct drm_property_blob *blob)
-{
-       if (!blob)
-               return;
-
-       drm_mode_object_unreference(&blob->base);
-}
-EXPORT_SYMBOL(drm_property_unreference_blob);
-
-/**
- * drm_property_destroy_user_blobs - destroy all blobs created by this client
- * @dev:       DRM device
- * @file_priv: destroy all blobs owned by this file handle
- */
-void drm_property_destroy_user_blobs(struct drm_device *dev,
-                                    struct drm_file *file_priv)
-{
-       struct drm_property_blob *blob, *bt;
-
-       /*
-        * When the file gets released that means no one else can access the
-        * blob list any more, so no need to grab dev->blob_lock.
-        */
-       list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
-               list_del_init(&blob->head_file);
-               drm_property_unreference_blob(blob);
-       }
-}
-
-/**
- * drm_property_reference_blob - Take a reference on an existing property
- *
- * Take a new reference on an existing blob property.
- *
- * @blob: Pointer to blob property
- */
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
-{
-       drm_mode_object_reference(&blob->base);
-       return blob;
-}
-EXPORT_SYMBOL(drm_property_reference_blob);
-
-/**
- * drm_property_lookup_blob - look up a blob property and take a reference
- * @dev: drm device
- * @id: id of the blob property
- *
- * If successful, this takes an additional reference to the blob property.
- * callers need to make sure to eventually unreference the returned property
- * again, using @drm_property_unreference_blob.
- */
-struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
-                                                  uint32_t id)
-{
-       struct drm_mode_object *obj;
-       struct drm_property_blob *blob = NULL;
-
-       obj = _object_find(dev, id, DRM_MODE_OBJECT_BLOB);
-       if (obj)
-               blob = obj_to_blob(obj);
-       return blob;
-}
-EXPORT_SYMBOL(drm_property_lookup_blob);
-
-/**
- * drm_property_replace_global_blob - atomically replace existing blob property
- * @dev: drm device
- * @replace: location of blob property pointer to be replaced
- * @length: length of data for new blob, or 0 for no data
- * @data: content for new blob, or NULL for no data
- * @obj_holds_id: optional object for property holding blob ID
- * @prop_holds_id: optional property holding blob ID
- * @return 0 on success or error on failure
- *
- * This function will atomically replace a global property in the blob list,
- * optionally updating a property which holds the ID of that property. It is
- * guaranteed to be atomic: no caller will be allowed to see intermediate
- * results, and either the entire operation will succeed and clean up the
- * previous property, or it will fail and the state will be unchanged.
- *
- * If length is 0 or data is NULL, no new blob will be created, and the holding
- * property, if specified, will be set to 0.
- *
- * Access to the replace pointer is assumed to be protected by the caller, e.g.
- * by holding the relevant modesetting object lock for its parent.
- *
- * For example, a drm_connector has a 'PATH' property, which contains the ID
- * of a blob property with the value of the MST path information. Calling this
- * function with replace pointing to the connector's path_blob_ptr, length and
- * data set for the new path information, obj_holds_id set to the connector's
- * base object, and prop_holds_id set to the path property name, will perform
- * a completely atomic update. The access to path_blob_ptr is protected by the
- * caller holding a lock on the connector.
- */
-static int drm_property_replace_global_blob(struct drm_device *dev,
-                                            struct drm_property_blob **replace,
-                                            size_t length,
-                                            const void *data,
-                                            struct drm_mode_object *obj_holds_id,
-                                            struct drm_property *prop_holds_id)
-{
-       struct drm_property_blob *new_blob = NULL;
-       struct drm_property_blob *old_blob = NULL;
-       int ret;
-
-       WARN_ON(replace == NULL);
-
-       old_blob = *replace;
-
-       if (length && data) {
-               new_blob = drm_property_create_blob(dev, length, data);
-               if (IS_ERR(new_blob))
-                       return PTR_ERR(new_blob);
-       }
-
-       /* This does not need to be synchronised with blob_lock, as the
-        * get_properties ioctl locks all modesetting objects, and
-        * obj_holds_id must be locked before calling here, so we cannot
-        * have its value out of sync with the list membership modified
-        * below under blob_lock. */
-       if (obj_holds_id) {
-               ret = drm_object_property_set_value(obj_holds_id,
-                                                   prop_holds_id,
-                                                   new_blob ?
-                                                       new_blob->base.id : 0);
-               if (ret != 0)
-                       goto err_created;
-       }
-
-       drm_property_unreference_blob(old_blob);
-       *replace = new_blob;
-
-       return 0;
-
-err_created:
-       drm_property_unreference_blob(new_blob);
-       return ret;
-}
-
-/**
- * drm_mode_getblob_ioctl - get the contents of a blob property value
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the contents of a blob property. The value stored in
- * an object's blob property is just a normal modeset object id.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getblob_ioctl(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_get_blob *out_resp = data;
-       struct drm_property_blob *blob;
-       int ret = 0;
-       void __user *blob_ptr;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       blob = drm_property_lookup_blob(dev, out_resp->blob_id);
-       if (!blob)
-               return -ENOENT;
-
-       if (out_resp->length == blob->length) {
-               blob_ptr = (void __user *)(unsigned long)out_resp->data;
-               if (copy_to_user(blob_ptr, blob->data, blob->length)) {
-                       ret = -EFAULT;
-                       goto unref;
-               }
-       }
-       out_resp->length = blob->length;
-unref:
-       drm_property_unreference_blob(blob);
-
-       return ret;
-}
-
-/**
- * drm_mode_createblob_ioctl - create a new blob property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function creates a new blob property with user-defined values. In order
- * to give us sensible validation and checking when creating, rather than at
- * every potential use, we also require a type to be provided upfront.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_createblob_ioctl(struct drm_device *dev,
-                             void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_create_blob *out_resp = data;
-       struct drm_property_blob *blob;
-       void __user *blob_ptr;
-       int ret = 0;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       blob = drm_property_create_blob(dev, out_resp->length, NULL);
-       if (IS_ERR(blob))
-               return PTR_ERR(blob);
-
-       blob_ptr = (void __user *)(unsigned long)out_resp->data;
-       if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
-               ret = -EFAULT;
-               goto out_blob;
-       }
-
-       /* Dropping the lock between create_blob and our access here is safe
-        * as only the same file_priv can remove the blob; at this point, it is
-        * not associated with any file_priv. */
-       mutex_lock(&dev->mode_config.blob_lock);
-       out_resp->blob_id = blob->base.id;
-       list_add_tail(&blob->head_file, &file_priv->blobs);
-       mutex_unlock(&dev->mode_config.blob_lock);
-
-       return 0;
-
-out_blob:
-       drm_property_unreference_blob(blob);
-       return ret;
-}
-
-/**
- * drm_mode_destroyblob_ioctl - destroy a user blob property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Destroy an existing user-defined blob property.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_destroyblob_ioctl(struct drm_device *dev,
-                              void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_destroy_blob *out_resp = data;
-       struct drm_property_blob *blob = NULL, *bt;
-       bool found = false;
-       int ret = 0;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       blob = drm_property_lookup_blob(dev, out_resp->blob_id);
-       if (!blob)
-               return -ENOENT;
-
-       mutex_lock(&dev->mode_config.blob_lock);
-       /* Ensure the property was actually created by this user. */
-       list_for_each_entry(bt, &file_priv->blobs, head_file) {
-               if (bt == blob) {
-                       found = true;
-                       break;
-               }
-       }
-
-       if (!found) {
-               ret = -EPERM;
-               goto err;
-       }
-
-       /* We must drop head_file here, because we may not be the last
-        * reference on the blob. */
-       list_del_init(&blob->head_file);
-       mutex_unlock(&dev->mode_config.blob_lock);
-
-       /* One reference from lookup, and one from the filp. */
-       drm_property_unreference_blob(blob);
-       drm_property_unreference_blob(blob);
-
-       return 0;
-
-err:
-       mutex_unlock(&dev->mode_config.blob_lock);
-       drm_property_unreference_blob(blob);
-
-       return ret;
-}
-
-/**
- * drm_mode_connector_set_path_property - set tile property on connector
- * @connector: connector to set property on.
- * @path: path to use for property; must not be NULL.
- *
- * This creates a property to expose to userspace to specify a
- * connector path. This is mainly used for DisplayPort MST where
- * connectors have a topology and we want to allow userspace to give
- * them more meaningful names.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
-                                        const char *path)
-{
-       struct drm_device *dev = connector->dev;
-       int ret;
-
-       ret = drm_property_replace_global_blob(dev,
-                                              &connector->path_blob_ptr,
-                                              strlen(path) + 1,
-                                              path,
-                                              &connector->base,
-                                              dev->mode_config.path_property);
-       return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_set_path_property);
-
-/**
- * drm_mode_connector_set_tile_property - set tile property on connector
- * @connector: connector to set property on.
- *
- * This looks up the tile information for a connector, and creates a
- * property for userspace to parse if it exists. The property is of
- * the form of 8 integers using ':' as a separator.
- *
- * Returns:
- * Zero on success, errno on failure.
- */
-int drm_mode_connector_set_tile_property(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       char tile[256];
-       int ret;
-
-       if (!connector->has_tile) {
-               ret  = drm_property_replace_global_blob(dev,
-                                                       &connector->tile_blob_ptr,
-                                                       0,
-                                                       NULL,
-                                                       &connector->base,
-                                                       dev->mode_config.tile_property);
-               return ret;
-       }
-
-       snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
-                connector->tile_group->id, connector->tile_is_single_monitor,
-                connector->num_h_tile, connector->num_v_tile,
-                connector->tile_h_loc, connector->tile_v_loc,
-                connector->tile_h_size, connector->tile_v_size);
-
-       ret = drm_property_replace_global_blob(dev,
-                                              &connector->tile_blob_ptr,
-                                              strlen(tile) + 1,
-                                              tile,
-                                              &connector->base,
-                                              dev->mode_config.tile_property);
-       return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
-
-/**
- * drm_mode_connector_update_edid_property - update the edid property of a connector
- * @connector: drm connector
- * @edid: new value of the edid property
- *
- * This function creates a new blob modeset object and assigns its id to the
- * connector's edid property.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-                                           const struct edid *edid)
-{
-       struct drm_device *dev = connector->dev;
-       size_t size = 0;
-       int ret;
-
-       /* ignore requests to set edid when overridden */
-       if (connector->override_edid)
-               return 0;
-
-       if (edid)
-               size = EDID_LENGTH * (1 + edid->extensions);
-
-       ret = drm_property_replace_global_blob(dev,
-                                              &connector->edid_blob_ptr,
-                                              size,
-                                              edid,
-                                              &connector->base,
-                                              dev->mode_config.edid_property);
-       return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-
-/* Some properties could refer to dynamic refcnt'd objects, or things that
- * need special locking to handle lifetime issues (ie. to ensure the prop
- * value doesn't become invalid part way through the property update due to
- * race).  The value returned by reference via 'obj' should be passed back
- * to drm_property_change_valid_put() after the property is set (and the
- * object to which the property is attached has a chance to take it's own
- * reference).
- */
-bool drm_property_change_valid_get(struct drm_property *property,
-                                        uint64_t value, struct drm_mode_object **ref)
-{
-       int i;
-
-       if (property->flags & DRM_MODE_PROP_IMMUTABLE)
-               return false;
-
-       *ref = NULL;
-
-       if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
-               if (value < property->values[0] || value > property->values[1])
-                       return false;
-               return true;
-       } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
-               int64_t svalue = U642I64(value);
-
-               if (svalue < U642I64(property->values[0]) ||
-                               svalue > U642I64(property->values[1]))
-                       return false;
-               return true;
-       } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-               uint64_t valid_mask = 0;
-
-               for (i = 0; i < property->num_values; i++)
-                       valid_mask |= (1ULL << property->values[i]);
-               return !(value & ~valid_mask);
-       } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
-               struct drm_property_blob *blob;
-
-               if (value == 0)
-                       return true;
-
-               blob = drm_property_lookup_blob(property->dev, value);
-               if (blob) {
-                       *ref = &blob->base;
-                       return true;
-               } else {
-                       return false;
-               }
-       } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
-               /* a zero value for an object property translates to null: */
-               if (value == 0)
-                       return true;
-
-               *ref = _object_find(property->dev, value, property->values[0]);
-               return *ref != NULL;
-       }
-
-       for (i = 0; i < property->num_values; i++)
-               if (property->values[i] == value)
-                       return true;
-       return false;
-}
-
-void drm_property_change_valid_put(struct drm_property *property,
-               struct drm_mode_object *ref)
-{
-       if (!ref)
-               return;
-
-       if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
-               drm_mode_object_unreference(ref);
-       } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
-               drm_property_unreference_blob(obj_to_blob(ref));
-}
-
-/**
- * drm_mode_connector_property_set_ioctl - set the current value of a connector property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function sets the current value for a connectors's property. It also
- * calls into a driver's ->set_property callback to update the hardware state
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-                                      void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_connector_set_property *conn_set_prop = data;
-       struct drm_mode_obj_set_property obj_set_prop = {
-               .value = conn_set_prop->value,
-               .prop_id = conn_set_prop->prop_id,
-               .obj_id = conn_set_prop->connector_id,
-               .obj_type = DRM_MODE_OBJECT_CONNECTOR
-       };
-
-       /* It does all the locking and checking we need */
-       return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
-}
-
-static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
-                                          struct drm_property *property,
-                                          uint64_t value)
-{
-       int ret = -EINVAL;
-       struct drm_connector *connector = obj_to_connector(obj);
-
-       /* Do DPMS ourselves */
-       if (property == connector->dev->mode_config.dpms_property) {
-               ret = (*connector->funcs->dpms)(connector, (int)value);
-       } else if (connector->funcs->set_property)
-               ret = connector->funcs->set_property(connector, property, value);
-
-       /* store the property value if successful */
-       if (!ret)
-               drm_object_property_set_value(&connector->base, property, value);
-       return ret;
-}
-
-static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
-                                     struct drm_property *property,
-                                     uint64_t value)
-{
-       int ret = -EINVAL;
-       struct drm_crtc *crtc = obj_to_crtc(obj);
-
-       if (crtc->funcs->set_property)
-               ret = crtc->funcs->set_property(crtc, property, value);
-       if (!ret)
-               drm_object_property_set_value(obj, property, value);
-
-       return ret;
-}
-
-/**
- * drm_mode_plane_set_obj_prop - set the value of a property
- * @plane: drm plane object to set property value for
- * @property: property to set
- * @value: value the property should be set to
- *
- * This functions sets a given property on a given plane object. This function
- * calls the driver's ->set_property callback and changes the software state of
- * the property if the callback succeeds.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
-                               struct drm_property *property,
-                               uint64_t value)
-{
-       int ret = -EINVAL;
-       struct drm_mode_object *obj = &plane->base;
-
-       if (plane->funcs->set_property)
-               ret = plane->funcs->set_property(plane, property, value);
-       if (!ret)
-               drm_object_property_set_value(obj, property, value);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
-
 /**
- * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
+ * drm_mode_getresources - get graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
  *
- * This function retrieves the current value for an object's property. Compared
- * to the connector specific ioctl this one is extended to also work on crtc and
- * plane objects.
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
  *
  * Called by the user via ioctl.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
-                                     struct drm_file *file_priv)
+int drm_mode_getresources(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
 {
-       struct drm_mode_obj_get_properties *arg = data;
-       struct drm_mode_object *obj;
+       struct drm_mode_card_res *card_res = data;
+       struct list_head *lh;
+       struct drm_framebuffer *fb;
+       struct drm_connector *connector;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
        int ret = 0;
+       int connector_count = 0;
+       int crtc_count = 0;
+       int fb_count = 0;
+       int encoder_count = 0;
+       int copied = 0;
+       uint32_t __user *fb_id;
+       uint32_t __user *crtc_id;
+       uint32_t __user *connector_id;
+       uint32_t __user *encoder_id;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
 
-       obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-       if (!obj) {
-               ret = -ENOENT;
-               goto out;
+       mutex_lock(&file_priv->fbs_lock);
+       /*
+        * For the non-control nodes we need to limit the list of resources
+        * by IDs in the group list for this node
+        */
+       list_for_each(lh, &file_priv->fbs)
+               fb_count++;
+
+       /* handle this in 4 parts */
+       /* FBs */
+       if (card_res->count_fbs >= fb_count) {
+               copied = 0;
+               fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+               list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+                       if (put_user(fb->base.id, fb_id + copied)) {
+                               mutex_unlock(&file_priv->fbs_lock);
+                               return -EFAULT;
+                       }
+                       copied++;
+               }
        }
-       if (!obj->properties) {
-               ret = -EINVAL;
-               goto out_unref;
+       card_res->count_fbs = fb_count;
+       mutex_unlock(&file_priv->fbs_lock);
+
+       /* mode_config.mutex protects the connector list against e.g. DP MST
+        * connector hot-adding. CRTC/Plane lists are invariant. */
+       mutex_lock(&dev->mode_config.mutex);
+       drm_for_each_crtc(crtc, dev)
+               crtc_count++;
+
+       drm_for_each_connector(connector, dev)
+               connector_count++;
+
+       drm_for_each_encoder(encoder, dev)
+               encoder_count++;
+
+       card_res->max_height = dev->mode_config.max_height;
+       card_res->min_height = dev->mode_config.min_height;
+       card_res->max_width = dev->mode_config.max_width;
+       card_res->min_width = dev->mode_config.min_width;
+
+       /* CRTCs */
+       if (card_res->count_crtcs >= crtc_count) {
+               copied = 0;
+               crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+               drm_for_each_crtc(crtc, dev) {
+                       if (put_user(crtc->base.id, crtc_id + copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       card_res->count_crtcs = crtc_count;
+
+       /* Encoders */
+       if (card_res->count_encoders >= encoder_count) {
+               copied = 0;
+               encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+               drm_for_each_encoder(encoder, dev) {
+                       if (put_user(encoder->base.id, encoder_id +
+                                    copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
        }
+       card_res->count_encoders = encoder_count;
 
-       ret = get_properties(obj, file_priv->atomic,
-                       (uint32_t __user *)(unsigned long)(arg->props_ptr),
-                       (uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
-                       &arg->count_props);
+       /* Connectors */
+       if (card_res->count_connectors >= connector_count) {
+               copied = 0;
+               connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+               drm_for_each_connector(connector, dev) {
+                       if (put_user(connector->base.id,
+                                    connector_id + copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       card_res->count_connectors = connector_count;
 
-out_unref:
-       drm_mode_object_unreference(obj);
 out:
-       drm_modeset_unlock_all(dev);
+       mutex_unlock(&dev->mode_config.mutex);
        return ret;
 }
 
 /**
- * drm_mode_obj_set_property_ioctl - set the current value of an object's property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
+ * drm_mode_getcrtc - get CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
  *
- * This function sets the current value for an object's property. It also calls
- * into a driver's ->set_property callback to update the hardware state.
- * Compared to the connector specific ioctl this one is extended to also work on
- * crtc and plane objects.
+ * Construct a CRTC configuration structure to return to the user.
  *
  * Called by the user via ioctl.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
-                                   struct drm_file *file_priv)
+int drm_mode_getcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv)
 {
-       struct drm_mode_obj_set_property *arg = data;
-       struct drm_mode_object *arg_obj;
-       struct drm_mode_object *prop_obj;
-       struct drm_property *property;
-       int i, ret = -EINVAL;
-       struct drm_mode_object *ref;
+       struct drm_mode_crtc *crtc_resp = data;
+       struct drm_crtc *crtc;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
+       crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
+       if (!crtc)
+               return -ENOENT;
 
-       arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-       if (!arg_obj) {
-               ret = -ENOENT;
-               goto out;
-       }
-       if (!arg_obj->properties)
-               goto out_unref;
+       drm_modeset_lock_crtc(crtc, crtc->primary);
+       crtc_resp->gamma_size = crtc->gamma_size;
+       if (crtc->primary->fb)
+               crtc_resp->fb_id = crtc->primary->fb->base.id;
+       else
+               crtc_resp->fb_id = 0;
 
-       for (i = 0; i < arg_obj->properties->count; i++)
-               if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
-                       break;
+       if (crtc->state) {
+               crtc_resp->x = crtc->primary->state->src_x >> 16;
+               crtc_resp->y = crtc->primary->state->src_y >> 16;
+               if (crtc->state->enable) {
+                       drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
+                       crtc_resp->mode_valid = 1;
 
-       if (i == arg_obj->properties->count)
-               goto out_unref;
+               } else {
+                       crtc_resp->mode_valid = 0;
+               }
+       } else {
+               crtc_resp->x = crtc->x;
+               crtc_resp->y = crtc->y;
+               if (crtc->enabled) {
+                       drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+                       crtc_resp->mode_valid = 1;
 
-       prop_obj = drm_mode_object_find(dev, arg->prop_id,
-                                       DRM_MODE_OBJECT_PROPERTY);
-       if (!prop_obj) {
-               ret = -ENOENT;
-               goto out_unref;
-       }
-       property = obj_to_property(prop_obj);
-
-       if (!drm_property_change_valid_get(property, arg->value, &ref))
-               goto out_unref;
-
-       switch (arg_obj->type) {
-       case DRM_MODE_OBJECT_CONNECTOR:
-               ret = drm_mode_connector_set_obj_prop(arg_obj, property,
-                                                     arg->value);
-               break;
-       case DRM_MODE_OBJECT_CRTC:
-               ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
-               break;
-       case DRM_MODE_OBJECT_PLANE:
-               ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
-                                                 property, arg->value);
-               break;
+               } else {
+                       crtc_resp->mode_valid = 0;
+               }
        }
+       drm_modeset_unlock_crtc(crtc);
 
-       drm_property_change_valid_put(property, ref);
-
-out_unref:
-       drm_mode_object_unreference(arg_obj);
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
+       return 0;
 }
 
 /**
- * drm_mode_connector_attach_encoder - attach a connector to an encoder
- * @connector: connector to attach
- * @encoder: encoder to attach @connector to
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
  *
- * This function links up a connector to an encoder. Note that the routing
- * restrictions between encoders and crtcs are exposed to userspace through the
- * possible_clones and possible_crtcs bitmasks.
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
-                                     struct drm_encoder *encoder)
+int drm_mode_set_config_internal(struct drm_mode_set *set)
 {
-       int i;
+       struct drm_crtc *crtc = set->crtc;
+       struct drm_framebuffer *fb;
+       struct drm_crtc *tmp;
+       int ret;
 
        /*
-        * In the past, drivers have attempted to model the static association
-        * of connector to encoder in simple connector/encoder devices using a
-        * direct assignment of connector->encoder = encoder. This connection
-        * is a logical one and the responsibility of the core, so drivers are
-        * expected not to mess with this.
-        *
-        * Note that the error return should've been enough here, but a large
-        * majority of drivers ignores the return value, so add in a big WARN
-        * to get people's attention.
+        * NOTE: ->set_config can also disable other crtcs (if we steal all
+        * connectors from it), hence we need to refcount the fbs across all
+        * crtcs. Atomic modeset will have saner semantics ...
         */
-       if (WARN_ON(connector->encoder))
-               return -EINVAL;
-
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0) {
-                       connector->encoder_ids[i] = encoder->base.id;
-                       return 0;
-               }
-       }
-       return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-
-/**
- * drm_mode_crtc_set_gamma_size - set the gamma table size
- * @crtc: CRTC to set the gamma table size for
- * @gamma_size: size of the gamma table
- *
- * Drivers which support gamma tables should set this to the supported gamma
- * table size when initializing the CRTC. Currently the drm core only supports a
- * fixed gamma table size.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
-                                int gamma_size)
-{
-       uint16_t *r_base, *g_base, *b_base;
-       int i;
+       drm_for_each_crtc(tmp, crtc->dev)
+               tmp->primary->old_fb = tmp->primary->fb;
 
-       crtc->gamma_size = gamma_size;
+       fb = set->fb;
 
-       crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
-                                   GFP_KERNEL);
-       if (!crtc->gamma_store) {
-               crtc->gamma_size = 0;
-               return -ENOMEM;
+       ret = crtc->funcs->set_config(set);
+       if (ret == 0) {
+               crtc->primary->crtc = crtc;
+               crtc->primary->fb = fb;
        }
 
-       r_base = crtc->gamma_store;
-       g_base = r_base + gamma_size;
-       b_base = g_base + gamma_size;
-       for (i = 0; i < gamma_size; i++) {
-               r_base[i] = i << 8;
-               g_base[i] = i << 8;
-               b_base[i] = i << 8;
+       drm_for_each_crtc(tmp, crtc->dev) {
+               if (tmp->primary->fb)
+                       drm_framebuffer_reference(tmp->primary->fb);
+               if (tmp->primary->old_fb)
+                       drm_framebuffer_unreference(tmp->primary->old_fb);
+               tmp->primary->old_fb = NULL;
        }
 
-
-       return 0;
+       return ret;
 }
-EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+EXPORT_SYMBOL(drm_mode_set_config_internal);
 
 /**
- * drm_mode_gamma_set_ioctl - set the gamma table
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
- * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
- *
- * Called by the user via ioctl.
+ * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
+ * @mode: mode to query
+ * @hdisplay: hdisplay value to fill in
+ * @vdisplay: vdisplay value to fill in
  *
- * Returns:
- * Zero on success, negative errno on failure.
+ * The vdisplay value will be doubled if the specified mode is a stereo mode of
+ * the appropriate layout.
  */
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv)
+void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+                           int *hdisplay, int *vdisplay)
 {
-       struct drm_mode_crtc_lut *crtc_lut = data;
-       struct drm_crtc *crtc;
-       void *r_base, *g_base, *b_base;
-       int size;
-       int ret = 0;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       drm_modeset_lock_all(dev);
-       crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
-       if (!crtc) {
-               ret = -ENOENT;
-               goto out;
-       }
-
-       if (crtc->funcs->gamma_set == NULL) {
-               ret = -ENOSYS;
-               goto out;
-       }
-
-       /* memcpy into gamma store */
-       if (crtc_lut->gamma_size != crtc->gamma_size) {
-               ret = -EINVAL;
-               goto out;
-       }
+       struct drm_display_mode adjusted;
 
-       size = crtc_lut->gamma_size * (sizeof(uint16_t));
-       r_base = crtc->gamma_store;
-       if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
+       drm_mode_copy(&adjusted, mode);
+       drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
+       *hdisplay = adjusted.crtc_hdisplay;
+       *vdisplay = adjusted.crtc_vdisplay;
+}
+EXPORT_SYMBOL(drm_crtc_get_hv_timing);
 
-       g_base = r_base + size;
-       if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
+/**
+ * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
+ *     CRTC viewport
+ * @crtc: CRTC that framebuffer will be displayed on
+ * @x: x panning
+ * @y: y panning
+ * @mode: mode that framebuffer will be displayed under
+ * @fb: framebuffer to check size of
+ */
+int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+                           int x, int y,
+                           const struct drm_display_mode *mode,
+                           const struct drm_framebuffer *fb)
 
-       b_base = g_base + size;
-       if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
+{
+       int hdisplay, vdisplay;
 
-       ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+       drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
 
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
+       if (crtc->state &&
+           crtc->primary->state->rotation & (DRM_ROTATE_90 |
+                                             DRM_ROTATE_270))
+               swap(hdisplay, vdisplay);
 
+       return drm_framebuffer_check_src_coords(x << 16, y << 16,
+                                               hdisplay << 16, vdisplay << 16,
+                                               fb);
 }
+EXPORT_SYMBOL(drm_crtc_check_viewport);
 
 /**
- * drm_mode_gamma_get_ioctl - get the gamma table
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
+ * drm_mode_setcrtc - set CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
  *
- * Copy the current gamma table into the storage provided. This also provides
- * the gamma table size the driver expects, which can be used to size the
- * allocated storage.
+ * Build a new CRTC configuration based on user request.
  *
  * Called by the user via ioctl.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv)
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
 {
-       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_mode_crtc *crtc_req = data;
        struct drm_crtc *crtc;
-       void *r_base, *g_base, *b_base;
-       int size;
-       int ret = 0;
+       struct drm_connector **connector_set = NULL, *connector;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_display_mode *mode = NULL;
+       struct drm_mode_set set;
+       uint32_t __user *set_connectors_ptr;
+       int ret;
+       int i;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
+       /*
+        * Universal plane src offsets are only 16.16, prevent havoc for
+        * drivers using universal plane code internally.
+        */
+       if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
+               return -ERANGE;
+
        drm_modeset_lock_all(dev);
-       crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+       crtc = drm_crtc_find(dev, crtc_req->crtc_id);
        if (!crtc) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
                ret = -ENOENT;
                goto out;
        }
+       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
-       /* memcpy into gamma store */
-       if (crtc_lut->gamma_size != crtc->gamma_size) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       size = crtc_lut->gamma_size * (sizeof(uint16_t));
-       r_base = crtc->gamma_store;
-       if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       g_base = r_base + size;
-       if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       b_base = g_base + size;
-       if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
-}
-
-/**
- * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This schedules an asynchronous update on a given CRTC, called page flip.
- * Optionally a drm event is generated to signal the completion of the event.
- * Generic drivers cannot assume that a pageflip with changed framebuffer
- * properties (including driver specific metadata like tiling layout) will work,
- * but some drivers support e.g. pixel format changes through the pageflip
- * ioctl.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_crtc_page_flip *page_flip = data;
-       struct drm_crtc *crtc;
-       struct drm_framebuffer *fb = NULL;
-       struct drm_pending_vblank_event *e = NULL;
-       int ret = -EINVAL;
-
-       if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
-           page_flip->reserved != 0)
-               return -EINVAL;
+       if (crtc_req->mode_valid) {
+               /* If we have a mode we need a framebuffer. */
+               /* If we pass -1, set the mode with the currently bound fb */
+               if (crtc_req->fb_id == -1) {
+                       if (!crtc->primary->fb) {
+                               DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       fb = crtc->primary->fb;
+                       /* Make refcounting symmetric with the lookup path. */
+                       drm_framebuffer_reference(fb);
+               } else {
+                       fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+                       if (!fb) {
+                               DRM_DEBUG_KMS("Unknown FB ID%d\n",
+                                               crtc_req->fb_id);
+                               ret = -ENOENT;
+                               goto out;
+                       }
+               }
 
-       if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
-               return -EINVAL;
+               mode = drm_mode_create(dev);
+               if (!mode) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
-       crtc = drm_crtc_find(dev, page_flip->crtc_id);
-       if (!crtc)
-               return -ENOENT;
+               ret = drm_mode_convert_umode(mode, &crtc_req->mode);
+               if (ret) {
+                       DRM_DEBUG_KMS("Invalid mode\n");
+                       goto out;
+               }
 
-       drm_modeset_lock_crtc(crtc, crtc->primary);
-       if (crtc->primary->fb == NULL) {
-               /* The framebuffer is currently unbound, presumably
-                * due to a hotplug event, that userspace has not
-                * yet discovered.
+               /*
+                * Check whether the primary plane supports the fb pixel format.
+                * Drivers not implementing the universal planes API use a
+                * default formats list provided by the DRM core which doesn't
+                * match real hardware capabilities. Skip the check in that
+                * case.
                 */
-               ret = -EBUSY;
-               goto out;
-       }
+               if (!crtc->primary->format_default) {
+                       ret = drm_plane_check_pixel_format(crtc->primary,
+                                                          fb->pixel_format);
+                       if (ret) {
+                               char *format_name = drm_get_format_name(fb->pixel_format);
+                               DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+                               kfree(format_name);
+                               goto out;
+                       }
+               }
 
-       if (crtc->funcs->page_flip == NULL)
-               goto out;
+               ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+                                             mode, fb);
+               if (ret)
+                       goto out;
 
-       fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
-       if (!fb) {
-               ret = -ENOENT;
-               goto out;
        }
 
-       if (crtc->state) {
-               const struct drm_plane_state *state = crtc->primary->state;
-
-               ret = check_src_coords(state->src_x, state->src_y,
-                                      state->src_w, state->src_h, fb);
-       } else {
-               ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
-       }
-       if (ret)
+       if (crtc_req->count_connectors == 0 && mode) {
+               DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+               ret = -EINVAL;
                goto out;
+       }
 
-       if (crtc->primary->fb->pixel_format != fb->pixel_format) {
-               DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+       if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+               DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+                         crtc_req->count_connectors);
                ret = -EINVAL;
                goto out;
        }
 
-       if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-               e = kzalloc(sizeof *e, GFP_KERNEL);
-               if (!e) {
-                       ret = -ENOMEM;
+       if (crtc_req->count_connectors > 0) {
+               u32 out_id;
+
+               /* Avoid unbounded kernel memory allocation */
+               if (crtc_req->count_connectors > config->num_connector) {
+                       ret = -EINVAL;
                        goto out;
                }
-               e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
-               e->event.base.length = sizeof(e->event);
-               e->event.user_data = page_flip->user_data;
-               ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
-               if (ret) {
-                       kfree(e);
+
+               connector_set = kmalloc_array(crtc_req->count_connectors,
+                                             sizeof(struct drm_connector *),
+                                             GFP_KERNEL);
+               if (!connector_set) {
+                       ret = -ENOMEM;
                        goto out;
                }
-       }
 
-       crtc->primary->old_fb = crtc->primary->fb;
-       ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
-       if (ret) {
-               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
-                       drm_event_cancel_free(dev, &e->base);
-               /* Keep the old fb, don't unref it. */
-               crtc->primary->old_fb = NULL;
-       } else {
-               crtc->primary->fb = fb;
-               /* Unref only the old framebuffer. */
-               fb = NULL;
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       connector_set[i] = NULL;
+                       set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
+                       if (get_user(out_id, &set_connectors_ptr[i])) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+
+                       connector = drm_connector_lookup(dev, out_id);
+                       if (!connector) {
+                               DRM_DEBUG_KMS("Connector id %d unknown\n",
+                                               out_id);
+                               ret = -ENOENT;
+                               goto out;
+                       }
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                                       connector->base.id,
+                                       connector->name);
+
+                       connector_set[i] = connector;
+               }
        }
 
+       set.crtc = crtc;
+       set.x = crtc_req->x;
+       set.y = crtc_req->y;
+       set.mode = mode;
+       set.connectors = connector_set;
+       set.num_connectors = crtc_req->count_connectors;
+       set.fb = fb;
+       ret = drm_mode_set_config_internal(&set);
+
 out:
        if (fb)
                drm_framebuffer_unreference(fb);
-       if (crtc->primary->old_fb)
-               drm_framebuffer_unreference(crtc->primary->old_fb);
-       crtc->primary->old_fb = NULL;
-       drm_modeset_unlock_crtc(crtc);
+
+       if (connector_set) {
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       if (connector_set[i])
+                               drm_connector_unreference(connector_set[i]);
+               }
+       }
+       kfree(connector_set);
+       drm_mode_destroy(dev, mode);
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
+
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+                              struct drm_property *property,
+                              uint64_t value)
+{
+       int ret = -EINVAL;
+       struct drm_crtc *crtc = obj_to_crtc(obj);
+
+       if (crtc->funcs->set_property)
+               ret = crtc->funcs->set_property(crtc, property, value);
+       if (!ret)
+               drm_object_property_set_value(obj, property, value);
 
        return ret;
 }
@@ -5637,37 +1047,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
        return dev->driver->dumb_destroy(file_priv, dev, args->handle);
 }
 
-/**
- * drm_rotation_simplify() - Try to simplify the rotation
- * @rotation: Rotation to be simplified
- * @supported_rotations: Supported rotations
- *
- * Attempt to simplify the rotation to a form that is supported.
- * Eg. if the hardware supports everything except DRM_REFLECT_X
- * one could call this function like this:
- *
- * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
- *                       BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
- *                       BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
- *
- * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
- * transforms the hardware supports, this function may not
- * be able to produce a supported transform, so the caller should
- * check the result afterwards.
- */
-unsigned int drm_rotation_simplify(unsigned int rotation,
-                                  unsigned int supported_rotations)
-{
-       if (rotation & ~supported_rotations) {
-               rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
-               rotation = (rotation & DRM_REFLECT_MASK) |
-                          BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
-       }
-
-       return rotation;
-}
-EXPORT_SYMBOL(drm_rotation_simplify);
-
 /**
  * drm_mode_config_init - initialize DRM mode_configuration structure
  * @dev: DRM device
@@ -5784,24 +1163,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
 
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-                                                      unsigned int supported_rotations)
-{
-       static const struct drm_prop_enum_list props[] = {
-               { DRM_ROTATE_0,   "rotate-0" },
-               { DRM_ROTATE_90,  "rotate-90" },
-               { DRM_ROTATE_180, "rotate-180" },
-               { DRM_ROTATE_270, "rotate-270" },
-               { DRM_REFLECT_X,  "reflect-x" },
-               { DRM_REFLECT_Y,  "reflect-y" },
-       };
-
-       return drm_property_create_bitmask(dev, 0, "rotation",
-                                          props, ARRAY_SIZE(props),
-                                          supported_rotations);
-}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
-
 /**
  * DOC: Tile group
  *
@@ -5900,48 +1261,3 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
        return tg;
 }
 EXPORT_SYMBOL(drm_mode_create_tile_group);
-
-/**
- * drm_crtc_enable_color_mgmt - enable color management properties
- * @crtc: DRM CRTC
- * @degamma_lut_size: the size of the degamma lut (before CSC)
- * @has_ctm: whether to attach ctm_property for CSC matrix
- * @gamma_lut_size: the size of the gamma lut (after CSC)
- *
- * This function lets the driver enable the color correction
- * properties on a CRTC. This includes 3 degamma, csc and gamma
- * properties that userspace can set and 2 size properties to inform
- * the userspace of the lut sizes. Each of the properties are
- * optional. The gamma and degamma properties are only attached if
- * their size is not 0 and ctm_property is only attached if has_ctm is
- * true.
- */
-void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
-                               uint degamma_lut_size,
-                               bool has_ctm,
-                               uint gamma_lut_size)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_mode_config *config = &dev->mode_config;
-
-       if (degamma_lut_size) {
-               drm_object_attach_property(&crtc->base,
-                                          config->degamma_lut_property, 0);
-               drm_object_attach_property(&crtc->base,
-                                          config->degamma_lut_size_property,
-                                          degamma_lut_size);
-       }
-
-       if (has_ctm)
-               drm_object_attach_property(&crtc->base,
-                                          config->ctm_property, 0);
-
-       if (gamma_lut_size) {
-               drm_object_attach_property(&crtc->base,
-                                          config->gamma_lut_property, 0);
-               drm_object_attach_property(&crtc->base,
-                                          config->gamma_lut_size_property,
-                                          gamma_lut_size);
-       }
-}
-EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
index 604d3ef72ffab53552918c3cf6055ed7712f1b93..5d2cb138eba64b562a880d8a9f77a2a6054e79eb 100644 (file)
  * &drm_connector_helper_funcs.
  */
 
-/**
- * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
- *                                             connector list
- * @dev: drm device to operate on
- *
- * Some userspace presumes that the first connected connector is the main
- * display, where it's supposed to display e.g. the login screen. For
- * laptops, this should be the main panel. Use this function to sort all
- * (eDP/LVDS) panels to the front of the connector list, instead of
- * painstakingly trying to initialize them in the right order.
- */
-void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
-{
-       struct drm_connector *connector, *tmp;
-       struct list_head panel_list;
-
-       INIT_LIST_HEAD(&panel_list);
-
-       list_for_each_entry_safe(connector, tmp,
-                                &dev->mode_config.connector_list, head) {
-               if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
-                   connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-                       list_move_tail(&connector->head, &panel_list);
-       }
-
-       list_splice(&panel_list, &dev->mode_config.connector_list);
-}
-EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
-
 /**
  * drm_helper_encoder_in_use - check if a given encoder is in use
  * @encoder: encoder to check
@@ -912,33 +883,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 }
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
-/**
- * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
- * @fb: drm_framebuffer object to fill out
- * @mode_cmd: metadata from the userspace fb creation request
- *
- * This helper can be used in a drivers fb_create callback to pre-fill the fb's
- * metadata fields.
- */
-void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-                                   const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       int i;
-
-       fb->width = mode_cmd->width;
-       fb->height = mode_cmd->height;
-       for (i = 0; i < 4; i++) {
-               fb->pitches[i] = mode_cmd->pitches[i];
-               fb->offsets[i] = mode_cmd->offsets[i];
-               fb->modifier[i] = mode_cmd->modifier[i];
-       }
-       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
-                                   &fb->bits_per_pixel);
-       fb->pixel_format = mode_cmd->pixel_format;
-       fb->flags = mode_cmd->flags;
-}
-EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
-
 /**
  * drm_helper_resume_force_mode - force-restore mode setting configuration
  * @dev: drm_device which should be restored
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
new file mode 100644 (file)
index 0000000..28295e5
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This header file contains mode setting related functions and definitions
+ * which are only used within the drm kms helper module as internal
+ * implementation details and are not exported to drivers.
+ */
+
+#include <drm/drm_dp_helper.h>
+
+/* drm_fb_helper.c */
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fb_helper_modinit(void);
+#else
+static inline int drm_fb_helper_modinit(void)
+{
+       return 0;
+}
+#endif
+
+/* drm_dp_aux_dev.c */
+#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+int drm_dp_aux_dev_init(void);
+void drm_dp_aux_dev_exit(void);
+int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
+#else
+static inline int drm_dp_aux_dev_init(void)
+{
+       return 0;
+}
+
+static inline void drm_dp_aux_dev_exit(void)
+{
+}
+
+static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
+{
+       return 0;
+}
+
+static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
+{
+}
+#endif
index 0c34e6d906d1cab39b3793588176140abffd978b..c48ba02c5365d6a49e8d0915d35b1d23cdd00c90 100644 (file)
 
 
 /* drm_crtc.c */
-void drm_connector_ida_init(void);
-void drm_connector_ida_destroy(void);
-int drm_mode_object_get(struct drm_device *dev,
-                       struct drm_mode_object *obj, uint32_t obj_type);
-void drm_mode_object_unregister(struct drm_device *dev,
-                               struct drm_mode_object *object);
-bool drm_property_change_valid_get(struct drm_property *property,
-                                  uint64_t value,
-                                  struct drm_mode_object **ref);
-void drm_property_change_valid_put(struct drm_property *property,
-                                  struct drm_mode_object *ref);
-
-int drm_plane_check_pixel_format(const struct drm_plane *plane,
-                                u32 format);
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+                              struct drm_property *property,
+                              uint64_t value);
 int drm_crtc_check_viewport(const struct drm_crtc *crtc,
                            int x, int y,
                            const struct drm_display_mode *mode,
                            const struct drm_framebuffer *fb);
 
 void drm_fb_release(struct drm_file *file_priv);
-void drm_property_destroy_user_blobs(struct drm_device *dev,
-                                    struct drm_file *file_priv);
 
 /* dumb buffer support IOCTLs */
 int drm_mode_create_dumb_ioctl(struct drm_device *dev,
@@ -64,42 +51,32 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
 int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
                                void *data, struct drm_file *file_priv);
 
-/* framebuffer IOCTLs */
-extern int drm_mode_addfb(struct drm_device *dev,
-                         void *data, struct drm_file *file_priv);
-extern int drm_mode_addfb2(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
-                        void *data, struct drm_file *file_priv);
-int drm_mode_getfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv);
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv);
-
 /* IOCTLs */
-int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
-                                     struct drm_file *file_priv);
-int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
-                                   struct drm_file *file_priv);
-
 int drm_mode_getresources(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
-int drm_mode_getplane_res(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv);
 int drm_mode_getcrtc(struct drm_device *dev,
                     void *data, struct drm_file *file_priv);
-int drm_mode_getconnector(struct drm_device *dev,
-                         void *data, struct drm_file *file_priv);
 int drm_mode_setcrtc(struct drm_device *dev,
                     void *data, struct drm_file *file_priv);
-int drm_mode_getplane(struct drm_device *dev,
-                     void *data, struct drm_file *file_priv);
-int drm_mode_setplane(struct drm_device *dev,
-                     void *data, struct drm_file *file_priv);
-int drm_mode_cursor_ioctl(struct drm_device *dev,
-                         void *data, struct drm_file *file_priv);
-int drm_mode_cursor2_ioctl(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv);
+
+/* drm_color_mgmt.c */
+
+/* IOCTLs */
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv);
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv);
+
+/* drm_property.c */
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+                                    struct drm_file *file_priv);
+bool drm_property_change_valid_get(struct drm_property *property,
+                                  uint64_t value,
+                                  struct drm_mode_object **ref);
+void drm_property_change_valid_put(struct drm_property *property,
+                                  struct drm_mode_object *ref);
+
+/* IOCTL */
 int drm_mode_getproperty_ioctl(struct drm_device *dev,
                               void *data, struct drm_file *file_priv);
 int drm_mode_getblob_ioctl(struct drm_device *dev,
@@ -108,17 +85,80 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
                              void *data, struct drm_file *file_priv);
 int drm_mode_destroyblob_ioctl(struct drm_device *dev,
                               void *data, struct drm_file *file_priv);
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-                                         void *data, struct drm_file *file_priv);
+
+/* drm_mode_object.c */
+int drm_mode_object_get_reg(struct drm_device *dev,
+                           struct drm_mode_object *obj,
+                           uint32_t obj_type,
+                           bool register_obj,
+                           void (*obj_free_cb)(struct kref *kref));
+void drm_mode_object_register(struct drm_device *dev,
+                             struct drm_mode_object *obj);
+int drm_mode_object_get(struct drm_device *dev,
+                       struct drm_mode_object *obj, uint32_t obj_type);
+struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+                                              uint32_t id, uint32_t type);
+void drm_mode_object_unregister(struct drm_device *dev,
+                               struct drm_mode_object *object);
+int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+                                  uint32_t __user *prop_ptr,
+                                  uint64_t __user *prop_values,
+                                  uint32_t *arg_count_props);
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+                                              uint32_t prop_id);
+
+/* IOCTL */
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+                                     struct drm_file *file_priv);
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+
+/* drm_encoder.c */
+int drm_encoder_register_all(struct drm_device *dev);
+void drm_encoder_unregister_all(struct drm_device *dev);
+
+/* IOCTL */
 int drm_mode_getencoder(struct drm_device *dev,
                        void *data, struct drm_file *file_priv);
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv);
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv);
 
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv);
+/* drm_connector.c */
+void drm_connector_ida_init(void);
+void drm_connector_ida_destroy(void);
+void drm_connector_unregister_all(struct drm_device *dev);
+int drm_connector_register_all(struct drm_device *dev);
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+                                   struct drm_property *property,
+                                   uint64_t value);
+int drm_connector_create_standard_properties(struct drm_device *dev);
+
+/* IOCTL */
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+                                         void *data, struct drm_file *file_priv);
+int drm_mode_getconnector(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+
+/* drm_framebuffer.c */
+struct drm_framebuffer *
+drm_internal_framebuffer_create(struct drm_device *dev,
+                               const struct drm_mode_fb_cmd2 *r,
+                               struct drm_file *file_priv);
+void drm_framebuffer_free(struct kref *kref);
+int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
+                                    uint32_t src_w, uint32_t src_h,
+                                    const struct drm_framebuffer *fb);
+
+/* IOCTL */
+int drm_mode_addfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv);
+int drm_mode_addfb2(struct drm_device *dev,
+                   void *data, struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev,
+                 void *data, struct drm_file *file_priv);
+int drm_mode_getfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv);
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
 
 /* drm_atomic.c */
 int drm_atomic_get_property(struct drm_mode_object *obj,
@@ -129,6 +169,23 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 int drm_modeset_register_all(struct drm_device *dev);
 void drm_modeset_unregister_all(struct drm_device *dev);
 
-/* drm_blend.c */
-int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
-                                    struct drm_atomic_state *state);
+
+/* drm_plane.c */
+int drm_plane_register_all(struct drm_device *dev);
+void drm_plane_unregister_all(struct drm_device *dev);
+int drm_plane_check_pixel_format(const struct drm_plane *plane,
+                                u32 format);
+
+/* IOCTL */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+int drm_mode_getplane(struct drm_device *dev,
+                     void *data, struct drm_file *file_priv);
+int drm_mode_setplane(struct drm_device *dev,
+                     void *data, struct drm_file *file_priv);
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv);
index ea481800ef565dfa3bd4852456f0f05efe60ac32..3f83e2ca80ad860d7e3956ec8a1a61a543bfe0a9 100644 (file)
@@ -50,9 +50,8 @@ int drm_legacy_dma_setup(struct drm_device *dev)
        int i;
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
-           drm_core_check_feature(dev, DRIVER_MODESET)) {
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return 0;
-       }
 
        dev->buf_use = 0;
        atomic_set(&dev->buf_alloc, 0);
@@ -81,9 +80,8 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
        int i, j;
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
-           drm_core_check_feature(dev, DRIVER_MODESET)) {
+           !drm_core_check_feature(dev, DRIVER_LEGACY))
                return;
-       }
 
        if (!dma)
                return;
index 734f86a345f628edab95c7ab3c8652a4d9927e7b..ec1ed94b239020f0a55425df9781f61003d15e3c 100644 (file)
@@ -36,6 +36,8 @@
 #include <drm/drm_crtc.h>
 #include <drm/drmP.h>
 
+#include "drm_crtc_helper_internal.h"
+
 struct drm_dp_aux_dev {
        unsigned index;
        struct drm_dp_aux *aux;
@@ -283,12 +285,7 @@ static int auxdev_wait_atomic_t(atomic_t *p)
        schedule();
        return 0;
 }
-/**
- * drm_dp_aux_unregister_devnode() - unregister a devnode for this aux channel
- * @aux: DisplayPort AUX channel
- *
- * Returns 0 on success or a negative error code on failure.
- */
+
 void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
 {
        struct drm_dp_aux_dev *aux_dev;
@@ -314,14 +311,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
        DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
        kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
 }
-EXPORT_SYMBOL(drm_dp_aux_unregister_devnode);
 
-/**
- * drm_dp_aux_register_devnode() - register a devnode for this aux channel
- * @aux: DisplayPort AUX channel
- *
- * Returns 0 on success or a negative error code on failure.
- */
 int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
 {
        struct drm_dp_aux_dev *aux_dev;
@@ -347,7 +337,6 @@ error:
        drm_dp_aux_unregister_devnode(aux);
        return res;
 }
-EXPORT_SYMBOL(drm_dp_aux_register_devnode);
 
 int drm_dp_aux_dev_init(void)
 {
@@ -369,11 +358,9 @@ out:
        class_destroy(drm_dp_aux_dev_class);
        return res;
 }
-EXPORT_SYMBOL(drm_dp_aux_dev_init);
 
 void drm_dp_aux_dev_exit(void)
 {
        unregister_chrdev(drm_dev_major, "aux");
        class_destroy(drm_dp_aux_dev_class);
 }
-EXPORT_SYMBOL(drm_dp_aux_dev_exit);
index eae5ef963cb790d261a5093aa8b8d72671b25e86..ac3924a877e0fe03bd58e9f75395074096802898 100644 (file)
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/i2c.h>
+#include <linux/seq_file.h>
 #include <drm/drm_dp_helper.h>
-#include <drm/drm_dp_aux_dev.h>
 #include <drm/drmP.h>
 
+#include "drm_crtc_helper_internal.h"
+
 /**
  * DOC: dp helpers
  *
@@ -223,7 +225,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
                        err = ret;
        }
 
-       DRM_DEBUG_KMS("too many retries, giving up\n");
+       DRM_DEBUG_KMS("Too many retries, giving up. First error: %d\n", err);
        ret = err;
 
 unlock:
@@ -438,6 +440,179 @@ int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
 }
 EXPORT_SYMBOL(drm_dp_link_configure);
 
+/**
+ * drm_dp_downstream_max_clock() - extract branch device max
+ *                                 pixel rate for legacy VGA
+ *                                 converter or max TMDS clock
+ *                                 rate for others
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max clock in kHz on success or 0 if max clock not defined
+ */
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                               const u8 port_cap[4])
+{
+       int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+       bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+               DP_DETAILED_CAP_INFO_AVAILABLE;
+
+       if (!detailed_cap_info)
+               return 0;
+
+       switch (type) {
+       case DP_DS_PORT_TYPE_VGA:
+               return port_cap[1] * 8 * 1000;
+       case DP_DS_PORT_TYPE_DVI:
+       case DP_DS_PORT_TYPE_HDMI:
+       case DP_DS_PORT_TYPE_DP_DUALMODE:
+               return port_cap[1] * 2500;
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_clock);
+
+/**
+ * drm_dp_downstream_max_bpc() - extract branch device max
+ *                               bits per component
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max bpc on success or 0 if max bpc not defined
+ */
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                             const u8 port_cap[4])
+{
+       int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+       bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+               DP_DETAILED_CAP_INFO_AVAILABLE;
+       int bpc;
+
+       if (!detailed_cap_info)
+               return 0;
+
+       switch (type) {
+       case DP_DS_PORT_TYPE_VGA:
+       case DP_DS_PORT_TYPE_DVI:
+       case DP_DS_PORT_TYPE_HDMI:
+       case DP_DS_PORT_TYPE_DP_DUALMODE:
+               bpc = port_cap[2] & DP_DS_MAX_BPC_MASK;
+
+               switch (bpc) {
+               case DP_DS_8BPC:
+                       return 8;
+               case DP_DS_10BPC:
+                       return 10;
+               case DP_DS_12BPC:
+                       return 12;
+               case DP_DS_16BPC:
+                       return 16;
+               }
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
+
+/**
+ * drm_dp_downstream_id() - identify branch device
+ * @aux: DisplayPort AUX channel
+ * @id: DisplayPort branch device id
+ *
+ * Returns branch device id on success or NULL on failure
+ */
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
+{
+       return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+}
+EXPORT_SYMBOL(drm_dp_downstream_id);
+
+/**
+ * drm_dp_downstream_debug() - debug DP branch devices
+ * @m: pointer for debugfs file
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @aux: DisplayPort AUX channel
+ *
+ */
+void drm_dp_downstream_debug(struct seq_file *m,
+                            const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                            const u8 port_cap[4], struct drm_dp_aux *aux)
+{
+       bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+                                DP_DETAILED_CAP_INFO_AVAILABLE;
+       int clk;
+       int bpc;
+       char id[6];
+       int len;
+       uint8_t rev[2];
+       int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+       bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+                            DP_DWN_STRM_PORT_PRESENT;
+
+       seq_printf(m, "\tDP branch device present: %s\n",
+                  branch_device ? "yes" : "no");
+
+       if (!branch_device)
+               return;
+
+       switch (type) {
+       case DP_DS_PORT_TYPE_DP:
+               seq_puts(m, "\t\tType: DisplayPort\n");
+               break;
+       case DP_DS_PORT_TYPE_VGA:
+               seq_puts(m, "\t\tType: VGA\n");
+               break;
+       case DP_DS_PORT_TYPE_DVI:
+               seq_puts(m, "\t\tType: DVI\n");
+               break;
+       case DP_DS_PORT_TYPE_HDMI:
+               seq_puts(m, "\t\tType: HDMI\n");
+               break;
+       case DP_DS_PORT_TYPE_NON_EDID:
+               seq_puts(m, "\t\tType: others without EDID support\n");
+               break;
+       case DP_DS_PORT_TYPE_DP_DUALMODE:
+               seq_puts(m, "\t\tType: DP++\n");
+               break;
+       case DP_DS_PORT_TYPE_WIRELESS:
+               seq_puts(m, "\t\tType: Wireless\n");
+               break;
+       default:
+               seq_puts(m, "\t\tType: N/A\n");
+       }
+
+       drm_dp_downstream_id(aux, id);
+       seq_printf(m, "\t\tID: %s\n", id);
+
+       len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+       if (len > 0)
+               seq_printf(m, "\t\tHW: %d.%d\n",
+                          (rev[0] & 0xf0) >> 4, rev[0] & 0xf);
+
+       len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+       if (len > 0)
+               seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
+
+       if (detailed_cap_info) {
+               clk = drm_dp_downstream_max_clock(dpcd, port_cap);
+
+               if (clk > 0) {
+                       if (type == DP_DS_PORT_TYPE_VGA)
+                               seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
+                       else
+                               seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
+               }
+
+               bpc = drm_dp_downstream_max_bpc(dpcd, port_cap);
+
+               if (bpc > 0)
+                       seq_printf(m, "\t\tMax bpc: %d\n", bpc);
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_debug);
+
 /*
  * I2C-over-AUX implementation
  */
@@ -574,7 +749,17 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                        if (ret == -EBUSY)
                                continue;
 
-                       DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+                       /*
+                        * While timeouts can be errors, they're usually normal
+                        * behavior (for instance, when a driver tries to
+                        * communicate with a non-existant DisplayPort device).
+                        * Avoid spamming the kernel log with timeout errors.
+                        */
+                       if (ret == -ETIMEDOUT)
+                               DRM_DEBUG_KMS_RATELIMITED("transaction timed out\n");
+                       else
+                               DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+
                        return ret;
                }
 
index be27ed36f56e1bc13418b44f07d82531634cf4c4..80c7f25b5b74a32df2e550db0277d0e5754f8523 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/mount.h>
 #include <linux/slab.h>
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 #include "drm_crtc_internal.h"
 #include "drm_legacy.h"
 #include "drm_internal.h"
@@ -46,8 +45,8 @@
 unsigned int drm_debug = 0;
 EXPORT_SYMBOL(drm_debug);
 
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
+MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
+MODULE_DESCRIPTION("DRM shared core routines");
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
 "\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
@@ -63,37 +62,51 @@ static struct idr drm_minors_idr;
 
 static struct dentry *drm_debugfs_root;
 
-void drm_err(const char *format, ...)
+#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
+
+void drm_dev_printk(const struct device *dev, const char *level,
+                   unsigned int category, const char *function_name,
+                   const char *prefix, const char *format, ...)
 {
        struct va_format vaf;
        va_list args;
 
-       va_start(args, format);
+       if (category != DRM_UT_NONE && !(drm_debug & category))
+               return;
 
+       va_start(args, format);
        vaf.fmt = format;
        vaf.va = &args;
 
-       printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
-              __builtin_return_address(0), &vaf);
+       if (dev)
+               dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
+                          &vaf);
+       else
+               printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
 
        va_end(args);
 }
-EXPORT_SYMBOL(drm_err);
+EXPORT_SYMBOL(drm_dev_printk);
 
-void drm_ut_debug_printk(const char *function_name, const char *format, ...)
+void drm_printk(const char *level, unsigned int category,
+               const char *function_name, const char *prefix,
+               const char *format, ...)
 {
        struct va_format vaf;
        va_list args;
 
+       if (category != DRM_UT_NONE && !(drm_debug & category))
+               return;
+
        va_start(args, format);
        vaf.fmt = format;
        vaf.va = &args;
 
-       printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
+       printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
 
        va_end(args);
 }
-EXPORT_SYMBOL(drm_ut_debug_printk);
+EXPORT_SYMBOL(drm_printk);
 
 /*
  * DRM Minors
@@ -112,7 +125,7 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
                                             unsigned int type)
 {
        switch (type) {
-       case DRM_MINOR_LEGACY:
+       case DRM_MINOR_PRIMARY:
                return &dev->primary;
        case DRM_MINOR_RENDER:
                return &dev->render;
@@ -325,6 +338,9 @@ void drm_minor_release(struct drm_minor *minor)
 
 static int drm_dev_set_unique(struct drm_device *dev, const char *name)
 {
+       if (!name)
+               return -EINVAL;
+
        kfree(dev->unique);
        dev->unique = kstrdup(name, GFP_KERNEL);
 
@@ -512,7 +528,7 @@ int drm_dev_init(struct drm_device *dev,
                        goto err_minors;
        }
 
-       ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
+       ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
        if (ret)
                goto err_minors;
 
@@ -545,7 +561,7 @@ err_ctxbitmap:
        drm_legacy_ctxbitmap_cleanup(dev);
        drm_ht_remove(&dev->map_hash);
 err_minors:
-       drm_minor_free(dev, DRM_MINOR_LEGACY);
+       drm_minor_free(dev, DRM_MINOR_PRIMARY);
        drm_minor_free(dev, DRM_MINOR_RENDER);
        drm_minor_free(dev, DRM_MINOR_CONTROL);
        drm_fs_inode_free(dev->anon_inode);
@@ -575,7 +591,7 @@ EXPORT_SYMBOL(drm_dev_init);
  * own struct should look at using drm_dev_init() instead.
  *
  * RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
+ * Pointer to new DRM device, or ERR_PTR on failure.
  */
 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                                 struct device *parent)
@@ -585,12 +601,12 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (!dev)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ret = drm_dev_init(dev, driver, parent);
        if (ret) {
                kfree(dev);
-               return NULL;
+               return ERR_PTR(ret);
        }
 
        return dev;
@@ -608,7 +624,7 @@ static void drm_dev_release(struct kref *ref)
        drm_ht_remove(&dev->map_hash);
        drm_fs_inode_free(dev->anon_inode);
 
-       drm_minor_free(dev, DRM_MINOR_LEGACY);
+       drm_minor_free(dev, DRM_MINOR_PRIMARY);
        drm_minor_free(dev, DRM_MINOR_RENDER);
        drm_minor_free(dev, DRM_MINOR_CONTROL);
 
@@ -684,7 +700,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto err_minors;
 
-       ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
+       ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
        if (ret)
                goto err_minors;
 
@@ -701,7 +717,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
        goto out_unlock;
 
 err_minors:
-       drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+       drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
        drm_minor_unregister(dev, DRM_MINOR_RENDER);
        drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 out_unlock:
@@ -741,7 +757,7 @@ void drm_dev_unregister(struct drm_device *dev)
        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
                drm_legacy_rmmap(dev, r_list->map);
 
-       drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+       drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
        drm_minor_unregister(dev, DRM_MINOR_RENDER);
        drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 }
@@ -807,53 +823,48 @@ static const struct file_operations drm_stub_fops = {
        .llseek = noop_llseek,
 };
 
+static void drm_core_exit(void)
+{
+       unregister_chrdev(DRM_MAJOR, "drm");
+       debugfs_remove(drm_debugfs_root);
+       drm_sysfs_destroy();
+       idr_destroy(&drm_minors_idr);
+       drm_connector_ida_destroy();
+       drm_global_release();
+}
+
 static int __init drm_core_init(void)
 {
-       int ret = -ENOMEM;
+       int ret;
 
        drm_global_init();
        drm_connector_ida_init();
        idr_init(&drm_minors_idr);
 
-       if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
-               goto err_p1;
-
        ret = drm_sysfs_init();
        if (ret < 0) {
-               printk(KERN_ERR "DRM: Error creating drm class.\n");
-               goto err_p2;
+               DRM_ERROR("Cannot create DRM class: %d\n", ret);
+               goto error;
        }
 
        drm_debugfs_root = debugfs_create_dir("dri", NULL);
        if (!drm_debugfs_root) {
-               DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
-               ret = -1;
-               goto err_p3;
+               ret = -ENOMEM;
+               DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
+               goto error;
        }
 
-       DRM_INFO("Initialized %s %d.%d.%d %s\n",
-                CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+       ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
+       if (ret < 0)
+               goto error;
+
+       DRM_INFO("Initialized\n");
        return 0;
-err_p3:
-       drm_sysfs_destroy();
-err_p2:
-       unregister_chrdev(DRM_MAJOR, "drm");
 
-       idr_destroy(&drm_minors_idr);
-err_p1:
+error:
+       drm_core_exit();
        return ret;
 }
 
-static void __exit drm_core_exit(void)
-{
-       debugfs_remove(drm_debugfs_root);
-       drm_sysfs_destroy();
-
-       unregister_chrdev(DRM_MAJOR, "drm");
-
-       drm_connector_ida_destroy();
-       idr_destroy(&drm_minors_idr);
-}
-
 module_init(drm_core_init);
 module_exit(drm_core_exit);
index 7df26d4b7ad8b5775c97df7e5ab3fdab315195e5..50541324a4ab76fa9e39a79010fe16b50fd57ee4 100644 (file)
@@ -74,6 +74,8 @@
 #define EDID_QUIRK_FORCE_8BPC                  (1 << 8)
 /* Force 12bpc */
 #define EDID_QUIRK_FORCE_12BPC                 (1 << 9)
+/* Force 6bpc */
+#define EDID_QUIRK_FORCE_6BPC                  (1 << 10)
 
 struct detailed_mode_closure {
        struct drm_connector *connector;
@@ -100,6 +102,9 @@ static struct edid_quirk {
        /* Unknown Acer */
        { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
 
+       /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+       { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+
        /* Belinea 10 15 55 */
        { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
        { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -986,7 +991,7 @@ static const struct drm_display_mode edid_cea_modes[] = {
         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 64 - 1920x1080@100Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
-                  2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+                  2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 };
@@ -3716,14 +3721,7 @@ bool drm_rgb_quant_range_selectable(struct edid *edid)
 }
 EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
 
-/**
- * drm_assign_hdmi_deep_color_info - detect whether monitor supports
- * hdmi deep color modes and update drm_display_info if so.
- * @edid: monitor EDID information
- * @info: Updated with maximum supported deep color bpc and color format
- *        if deep color supported.
- * @connector: DRM connector, used only for debug output
- *
+/*
  * Parse the CEA extension according to CEA-861-B.
  * Return true if HDMI deep color supported, false if not or unknown.
  */
@@ -3817,16 +3815,6 @@ static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
        return false;
 }
 
-/**
- * drm_add_display_info - pull display info out if present
- * @edid: EDID data
- * @info: display info (attached to connector)
- * @connector: connector whose edid is used to build display info
- *
- * Grab any available display info and stuff it into the drm_display_info
- * structure that's part of the connector.  Useful for tracking bpp and
- * color spaces.
- */
 static void drm_add_display_info(struct edid *edid,
                                  struct drm_display_info *info,
                                  struct drm_connector *connector)
@@ -3862,6 +3850,20 @@ static void drm_add_display_info(struct edid *edid,
        /* HDMI deep color modes supported? Assign to info, if so */
        drm_assign_hdmi_deep_color_info(edid, info, connector);
 
+       /*
+        * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
+        *
+        * For such displays, the DFP spec 1.0, section 3.10 "EDID support"
+        * tells us to assume 8 bpc color depth if the EDID doesn't have
+        * extensions which tell otherwise.
+        */
+       if ((info->bpc == 0) && (edid->revision < 4) &&
+           (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) {
+               info->bpc = 8;
+               DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
+                         connector->name, info->bpc);
+       }
+
        /* Only defined for 1.4 with digital displays */
        if (edid->revision < 4)
                return;
@@ -4033,7 +4035,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
  * @connector: connector we're probing
  * @edid: EDID data
  *
- * Add the specified modes to the connector's mode list.
+ * Add the specified modes to the connector's mode list. Also fills out the
+ * &drm_display_info structure in @connector with any information which can be
+ * derived from the edid.
  *
  * Return: The number of modes added or 0 if we couldn't find any.
  */
@@ -4082,6 +4086,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
 
        drm_add_display_info(edid, &connector->display_info, connector);
 
+       if (quirks & EDID_QUIRK_FORCE_6BPC)
+               connector->display_info.bpc = 6;
+
        if (quirks & EDID_QUIRK_FORCE_8BPC)
                connector->display_info.bpc = 8;
 
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
new file mode 100644 (file)
index 0000000..5c06771
--- /dev/null
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Encoders represent the connecting element between the CRTC (as the overall
+ * pixel pipeline, represented by struct &drm_crtc) and the connectors (as the
+ * generic sink entity, represented by struct &drm_connector). An encoder takes
+ * pixel data from a CRTC and converts it to a format suitable for any attached
+ * connector. Encoders are objects exposed to userspace, originally to allow
+ * userspace to infer cloning and connector/CRTC restrictions. Unfortunately
+ * almost all drivers get this wrong, making the uabi pretty much useless. On
+ * top of that the exposed restrictions are too simple for today's hardware, and
+ * the recommended way to infer restrictions is by using the
+ * DRM_MODE_ATOMIC_TEST_ONLY flag for the atomic IOCTL.
+ *
+ * Otherwise encoders aren't used in the uapi at all (any modeset request from
+ * userspace directly connects a connector with a CRTC), drivers are therefore
+ * free to use them however they wish. Modeset helper libraries make strong use
+ * of encoders to facilitate code sharing. But for more complex settings it is
+ * usually better to move shared code into a separate &drm_bridge. Compared to
+ * encoders, bridges also have the benefit of being purely an internal
+ * abstraction since they are not exposed to userspace at all.
+ *
+ * Encoders are initialized with drm_encoder_init() and cleaned up using
+ * drm_encoder_cleanup().
+ */
+static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
+       { DRM_MODE_ENCODER_NONE, "None" },
+       { DRM_MODE_ENCODER_DAC, "DAC" },
+       { DRM_MODE_ENCODER_TMDS, "TMDS" },
+       { DRM_MODE_ENCODER_LVDS, "LVDS" },
+       { DRM_MODE_ENCODER_TVDAC, "TV" },
+       { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+       { DRM_MODE_ENCODER_DSI, "DSI" },
+       { DRM_MODE_ENCODER_DPMST, "DP MST" },
+       { DRM_MODE_ENCODER_DPI, "DPI" },
+};
+
+int drm_encoder_register_all(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+       int ret = 0;
+
+       drm_for_each_encoder(encoder, dev) {
+               if (encoder->funcs->late_register)
+                       ret = encoder->funcs->late_register(encoder);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+void drm_encoder_unregister_all(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+
+       drm_for_each_encoder(encoder, dev) {
+               if (encoder->funcs->early_unregister)
+                       encoder->funcs->early_unregister(encoder);
+       }
+}
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initialises a preallocated encoder. Encoder should be subclassed as part of
+ * driver encoder objects. At driver unload time drm_encoder_cleanup() should be
+ * called from the driver's destroy hook in &drm_encoder_funcs.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
+                    struct drm_encoder *encoder,
+                    const struct drm_encoder_funcs *funcs,
+                    int encoder_type, const char *name, ...)
+{
+       int ret;
+
+       drm_modeset_lock_all(dev);
+
+       ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+       if (ret)
+               goto out_unlock;
+
+       encoder->dev = dev;
+       encoder->encoder_type = encoder_type;
+       encoder->funcs = funcs;
+       if (name) {
+               va_list ap;
+
+               va_start(ap, name);
+               encoder->name = kvasprintf(GFP_KERNEL, name, ap);
+               va_end(ap);
+       } else {
+               encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
+                                         drm_encoder_enum_list[encoder_type].name,
+                                         encoder->base.id);
+       }
+       if (!encoder->name) {
+               ret = -ENOMEM;
+               goto out_put;
+       }
+
+       list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+       encoder->index = dev->mode_config.num_encoder++;
+
+out_put:
+       if (ret)
+               drm_mode_object_unregister(dev, &encoder->base);
+
+out_unlock:
+       drm_modeset_unlock_all(dev);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+/**
+ * drm_encoder_cleanup - cleans up an initialised encoder
+ * @encoder: encoder to cleanup
+ *
+ * Cleans up the encoder but doesn't free the object.
+ */
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+
+       /* Note that the encoder_list is considered to be static; should we
+        * remove the drm_encoder at runtime we would have to decrement all
+        * the indices on the drm_encoder after us in the encoder_list.
+        */
+
+       drm_modeset_lock_all(dev);
+       drm_mode_object_unregister(dev, &encoder->base);
+       kfree(encoder->name);
+       list_del(&encoder->head);
+       dev->mode_config.num_encoder--;
+       drm_modeset_unlock_all(dev);
+
+       memset(encoder, 0, sizeof(*encoder));
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+       struct drm_connector *connector;
+       struct drm_device *dev = encoder->dev;
+       bool uses_atomic = false;
+
+       /* For atomic drivers only state objects are synchronously updated and
+        * protected by modeset locks, so check those first. */
+       drm_for_each_connector(connector, dev) {
+               if (!connector->state)
+                       continue;
+
+               uses_atomic = true;
+
+               if (connector->state->best_encoder != encoder)
+                       continue;
+
+               return connector->state->crtc;
+       }
+
+       /* Don't return stale data (e.g. pending async disable). */
+       if (uses_atomic)
+               return NULL;
+
+       return encoder->crtc;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_get_encoder *enc_resp = data;
+       struct drm_encoder *encoder;
+       struct drm_crtc *crtc;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+       if (!encoder)
+               return -ENOENT;
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       crtc = drm_encoder_get_crtc(encoder);
+       if (crtc)
+               enc_resp->crtc_id = crtc->base.id;
+       else
+               enc_resp->crtc_id = 0;
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+       enc_resp->encoder_type = encoder->encoder_type;
+       enc_resp->encoder_id = encoder->base.id;
+       enc_resp->possible_crtcs = encoder->possible_crtcs;
+       enc_resp->possible_clones = encoder->possible_clones;
+
+       return 0;
+}
index ce54e985d91ba0b2ee11000c6aa499cffb4a1bba..03414bde1f152637a7ed6002ed8a88e30611fec8 100644 (file)
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/console.h>
 #include <linux/kernel.h>
 #include <linux/sysrq.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
 #include <linux/module.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -41,6 +41,8 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 
+#include "drm_crtc_helper_internal.h"
+
 static bool drm_fbdev_emulation = true;
 module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
 MODULE_PARM_DESC(fbdev_emulation,
@@ -335,7 +337,7 @@ retry:
                        goto fail;
                }
 
-               plane_state->rotation = BIT(DRM_ROTATE_0);
+               plane_state->rotation = DRM_ROTATE_0;
 
                plane->old_fb = plane->fb;
                plane_mask |= 1 << drm_plane_index(plane);
@@ -395,7 +397,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
                if (dev->mode_config.rotation_property) {
                        drm_mode_plane_set_obj_prop(plane,
                                                    dev->mode_config.rotation_property,
-                                                   BIT(DRM_ROTATE_0));
+                                                   DRM_ROTATE_0);
                }
        }
 
@@ -464,7 +466,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
 
        /* Sometimes user space wants everything disabled, so don't steal the
         * display if there's a master. */
-       if (lockless_dereference(dev->master))
+       if (READ_ONCE(dev->master))
                return false;
 
        drm_for_each_crtc(crtc, dev) {
@@ -618,6 +620,16 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
        kfree(helper->crtc_info);
 }
 
+static void drm_fb_helper_resume_worker(struct work_struct *work)
+{
+       struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
+                                                   resume_work);
+
+       console_lock();
+       fb_set_suspend(helper->fbdev, 0);
+       console_unlock();
+}
+
 static void drm_fb_helper_dirty_work(struct work_struct *work)
 {
        struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -649,6 +661,7 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 {
        INIT_LIST_HEAD(&helper->kernel_fb_list);
        spin_lock_init(&helper->dirty_lock);
+       INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
        INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
        helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
        helper->funcs = funcs;
@@ -1024,17 +1037,65 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
 /**
  * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
  * @fb_helper: driver-allocated fbdev helper
- * @state: desired state, zero to resume, non-zero to suspend
+ * @suspend: whether to suspend or resume
  *
- * A wrapper around fb_set_suspend implemented by fbdev core
+ * A wrapper around fb_set_suspend implemented by fbdev core.
+ * Use drm_fb_helper_set_suspend_unlocked() if you don't need to take
+ * the lock yourself
  */
-void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
 {
        if (fb_helper && fb_helper->fbdev)
-               fb_set_suspend(fb_helper->fbdev, state);
+               fb_set_suspend(fb_helper->fbdev, suspend);
 }
 EXPORT_SYMBOL(drm_fb_helper_set_suspend);
 
+/**
+ * drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also
+ *                                      takes the console lock
+ * @fb_helper: driver-allocated fbdev helper
+ * @suspend: whether to suspend or resume
+ *
+ * A wrapper around fb_set_suspend() that takes the console lock. If the lock
+ * isn't available on resume, a worker is tasked with waiting for the lock
+ * to become available. The console lock can be pretty contented on resume
+ * due to all the printk activity.
+ *
+ * This function can be called multiple times with the same state since
+ * &fb_info->state is checked to see if fbdev is running or not before locking.
+ *
+ * Use drm_fb_helper_set_suspend() if you need to take the lock yourself.
+ */
+void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
+                                       bool suspend)
+{
+       if (!fb_helper || !fb_helper->fbdev)
+               return;
+
+       /* make sure there's no pending/ongoing resume */
+       flush_work(&fb_helper->resume_work);
+
+       if (suspend) {
+               if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING)
+                       return;
+
+               console_lock();
+
+       } else {
+               if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING)
+                       return;
+
+               if (!console_trylock()) {
+                       schedule_work(&fb_helper->resume_work);
+                       return;
+               }
+       }
+
+       fb_set_suspend(fb_helper->fbdev, suspend);
+       console_unlock();
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
+
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
                     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -2194,7 +2255,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
  * @fb_helper: the drm_fb_helper
  *
  * Scan the connectors attached to the fb_helper and try to put together a
- * setup after *notification of a change in output configuration.
+ * setup after notification of a change in output configuration.
  *
  * Called at runtime, takes the mode config locks to be able to check/change the
  * modeset configuration. Must be run from process context (which usually means
index 323c238fcac70426f947977649ccd38442693e03..e84faecf52251e49efdb8c53b2ccca345299dd8d 100644 (file)
@@ -92,7 +92,7 @@ static int drm_setup(struct drm_device * dev)
        int ret;
 
        if (dev->driver->firstopen &&
-           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+           drm_core_check_feature(dev, DRIVER_LEGACY)) {
                ret = dev->driver->firstopen(dev);
                if (ret != 0)
                        return ret;
@@ -199,7 +199,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 
        filp->private_data = priv;
        priv->filp = filp;
-       priv->uid = current_euid();
        priv->pid = get_pid(task_pid(current));
        priv->minor = minor;
 
@@ -346,7 +345,7 @@ void drm_lastclose(struct drm_device * dev)
                dev->driver->lastclose(dev);
        DRM_DEBUG("driver lastclose completed\n");
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_LEGACY))
                drm_legacy_dev_reinit(dev);
 }
 
@@ -389,7 +388,7 @@ int drm_release(struct inode *inode, struct file *filp)
                  (long)old_encode_dev(file_priv->minor->kdev->devt),
                  dev->open_count);
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_LEGACY))
                drm_legacy_lock_release(dev, filp);
 
        if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
index 0645c85d5f95fdec1cc5f430829f734bece97c5b..29c56b4331e007773e1afa2589e5a24d265da40d 100644 (file)
@@ -35,20 +35,61 @@ static char printable_char(int c)
        return isascii(c) && isprint(c) ? c : '?';
 }
 
+/**
+ * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
+ * @bpp: bits per pixels
+ * @depth: bit depth per pixel
+ *
+ * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
+ * Useful in fbdev emulation code, since that deals in those values.
+ */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+       uint32_t fmt;
+
+       switch (bpp) {
+       case 8:
+               fmt = DRM_FORMAT_C8;
+               break;
+       case 16:
+               if (depth == 15)
+                       fmt = DRM_FORMAT_XRGB1555;
+               else
+                       fmt = DRM_FORMAT_RGB565;
+               break;
+       case 24:
+               fmt = DRM_FORMAT_RGB888;
+               break;
+       case 32:
+               if (depth == 24)
+                       fmt = DRM_FORMAT_XRGB8888;
+               else if (depth == 30)
+                       fmt = DRM_FORMAT_XRGB2101010;
+               else
+                       fmt = DRM_FORMAT_ARGB8888;
+               break;
+       default:
+               DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+               fmt = DRM_FORMAT_XRGB8888;
+               break;
+       }
+
+       return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
 /**
  * drm_get_format_name - return a string for drm fourcc format
  * @format: format to compute name of
  *
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
+ * Note that the buffer returned by this function is owned by the caller
+ * and will need to be freed using kfree().
  */
-const char *drm_get_format_name(uint32_t format)
+char *drm_get_format_name(uint32_t format)
 {
-       static char buf[32];
+       char *buf = kmalloc(32, GFP_KERNEL);
 
-       snprintf(buf, sizeof(buf),
+       snprintf(buf, 32,
                 "%c%c%c%c %s-endian (0x%08x)",
                 printable_char(format & 0xff),
                 printable_char((format >> 8) & 0xff),
@@ -73,6 +114,8 @@ EXPORT_SYMBOL(drm_get_format_name);
 void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
                          int *bpp)
 {
+       char *format_name;
+
        switch (format) {
        case DRM_FORMAT_C8:
        case DRM_FORMAT_RGB332:
@@ -127,8 +170,9 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
                *bpp = 32;
                break;
        default:
-               DRM_DEBUG_KMS("unsupported pixel format %s\n",
-                             drm_get_format_name(format));
+               format_name = drm_get_format_name(format);
+               DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
+               kfree(format_name);
                *depth = 0;
                *bpp = 0;
                break;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
new file mode 100644 (file)
index 0000000..398efd6
--- /dev/null
@@ -0,0 +1,857 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_framebuffer.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Frame buffers are abstract memory objects that provide a source of pixels to
+ * scanout to a CRTC. Applications explicitly request the creation of frame
+ * buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and receive an opaque
+ * handle that can be passed to the KMS CRTC control, plane configuration and
+ * page flip functions.
+ *
+ * Frame buffers rely on the underlying memory manager for allocating backing
+ * storage. When creating a frame buffer applications pass a memory handle
+ * (or a list of memory handles for multi-planar formats) through the
+ * struct &drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace
+ * buffer management interface this would be a GEM handle.  Drivers are however
+ * free to use their own backing storage object handles, e.g. vmwgfx directly
+ * exposes special TTM handles to userspace and so expects TTM handles in the
+ * create ioctl and not GEM handles.
+ *
+ * Framebuffers are tracked with struct &drm_framebuffer. They are published
+ * using drm_framebuffer_init() - after calling that function userspace can use
+ * and access the framebuffer object. The helper function
+ * drm_helper_mode_fill_fb_struct() can be used to pre-fill the required
+ * metadata fields.
+ *
+ * The lifetime of a drm framebuffer is controlled with a reference count,
+ * drivers can grab additional references with drm_framebuffer_reference() and
+ * drop them again with drm_framebuffer_unreference(). For driver-private
+ * framebuffers for which the last reference is never dropped (e.g. for the
+ * fbdev framebuffer when the struct struct &drm_framebuffer is embedded into
+ * the fbdev helper struct) drivers can manually clean up a framebuffer at
+ * module unload time with drm_framebuffer_unregister_private(). But doing this
+ * is not recommended, and it's better to have a normal free-standing struct
+ * &drm_framebuffer.
+ */
+
+int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
+                                    uint32_t src_w, uint32_t src_h,
+                                    const struct drm_framebuffer *fb)
+{
+       unsigned int fb_width, fb_height;
+
+       fb_width = fb->width << 16;
+       fb_height = fb->height << 16;
+
+       /* Make sure source coordinates are inside the fb. */
+       if (src_w > fb_width ||
+           src_x > fb_width - src_w ||
+           src_h > fb_height ||
+           src_y > fb_height - src_h) {
+               DRM_DEBUG_KMS("Invalid source coordinates "
+                             "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+                             src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+                             src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+                             src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+                             src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
+               return -ENOSPC;
+       }
+
+       return 0;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request. This is the
+ * original addfb ioctl which only supported RGB formats.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *or = data;
+       struct drm_mode_fb_cmd2 r = {};
+       int ret;
+
+       /* convert to new format and call new ioctl */
+       r.fb_id = or->fb_id;
+       r.width = or->width;
+       r.height = or->height;
+       r.pitches[0] = or->pitch;
+       r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+       r.handles[0] = or->handle;
+
+       ret = drm_mode_addfb2(dev, &r, file_priv);
+       if (ret)
+               return ret;
+
+       or->fb_id = r.fb_id;
+
+       return 0;
+}
+
+static int format_check(const struct drm_mode_fb_cmd2 *r)
+{
+       uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+       char *format_name;
+
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_XBGR4444:
+       case DRM_FORMAT_RGBX4444:
+       case DRM_FORMAT_BGRX4444:
+       case DRM_FORMAT_ARGB4444:
+       case DRM_FORMAT_ABGR4444:
+       case DRM_FORMAT_RGBA4444:
+       case DRM_FORMAT_BGRA4444:
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_AYUV:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_NV24:
+       case DRM_FORMAT_NV42:
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 0;
+       default:
+               format_name = drm_get_format_name(r->pixel_format);
+               DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
+               kfree(format_name);
+               return -EINVAL;
+       }
+}
+
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+       int ret, hsub, vsub, num_planes, i;
+
+       ret = format_check(r);
+       if (ret) {
+               char *format_name = drm_get_format_name(r->pixel_format);
+               DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
+               kfree(format_name);
+               return ret;
+       }
+
+       hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+       vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+       num_planes = drm_format_num_planes(r->pixel_format);
+
+       if (r->width == 0 || r->width % hsub) {
+               DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
+               return -EINVAL;
+       }
+
+       if (r->height == 0 || r->height % vsub) {
+               DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num_planes; i++) {
+               unsigned int width = r->width / (i != 0 ? hsub : 1);
+               unsigned int height = r->height / (i != 0 ? vsub : 1);
+               unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+               if (!r->handles[i]) {
+                       DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+                       return -EINVAL;
+               }
+
+               if ((uint64_t) width * cpp > UINT_MAX)
+                       return -ERANGE;
+
+               if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+                       return -ERANGE;
+
+               if (r->pitches[i] < width * cpp) {
+                       DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+                       return -EINVAL;
+               }
+
+               if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
+                       DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+                                     r->modifier[i], i);
+                       return -EINVAL;
+               }
+
+               /* modifier specific checks: */
+               switch (r->modifier[i]) {
+               case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
+                       /* NOTE: the pitch restriction may be lifted later if it turns
+                        * out that no hw has this restriction:
+                        */
+                       if (r->pixel_format != DRM_FORMAT_NV12 ||
+                                       width % 128 || height % 32 ||
+                                       r->pitches[i] % 128) {
+                               DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
+                               return -EINVAL;
+                       }
+                       break;
+
+               default:
+                       break;
+               }
+       }
+
+       for (i = num_planes; i < 4; i++) {
+               if (r->modifier[i]) {
+                       DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
+                       return -EINVAL;
+               }
+
+               /* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
+               if (!(r->flags & DRM_MODE_FB_MODIFIERS))
+                       continue;
+
+               if (r->handles[i]) {
+                       DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
+                       return -EINVAL;
+               }
+
+               if (r->pitches[i]) {
+                       DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
+                       return -EINVAL;
+               }
+
+               if (r->offsets[i]) {
+                       DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+struct drm_framebuffer *
+drm_internal_framebuffer_create(struct drm_device *dev,
+                               const struct drm_mode_fb_cmd2 *r,
+                               struct drm_file *file_priv)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_framebuffer *fb;
+       int ret;
+
+       if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+               DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if ((config->min_width > r->width) || (r->width > config->max_width)) {
+               DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+                         r->width, config->min_width, config->max_width);
+               return ERR_PTR(-EINVAL);
+       }
+       if ((config->min_height > r->height) || (r->height > config->max_height)) {
+               DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+                         r->height, config->min_height, config->max_height);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (r->flags & DRM_MODE_FB_MODIFIERS &&
+           !dev->mode_config.allow_fb_modifiers) {
+               DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       ret = framebuffer_check(r);
+       if (ret)
+               return ERR_PTR(ret);
+
+       fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+       if (IS_ERR(fb)) {
+               DRM_DEBUG_KMS("could not create framebuffer\n");
+               return fb;
+       }
+
+       return fb;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+                   void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd2 *r = data;
+       struct drm_framebuffer *fb;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       fb = drm_internal_framebuffer_create(dev, r, file_priv);
+       if (IS_ERR(fb))
+               return PTR_ERR(fb);
+
+       DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+       r->fb_id = fb->base.id;
+
+       /* Transfer ownership to the filp for reaping on close */
+       mutex_lock(&file_priv->fbs_lock);
+       list_add(&fb->filp_head, &file_priv->fbs);
+       mutex_unlock(&file_priv->fbs_lock);
+
+       return 0;
+}
+
+struct drm_mode_rmfb_work {
+       struct work_struct work;
+       struct list_head fbs;
+};
+
+static void drm_mode_rmfb_work_fn(struct work_struct *w)
+{
+       struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
+
+       while (!list_empty(&arg->fbs)) {
+               struct drm_framebuffer *fb =
+                       list_first_entry(&arg->fbs, typeof(*fb), filp_head);
+
+               list_del_init(&fb->filp_head);
+               drm_framebuffer_remove(fb);
+       }
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_framebuffer *fb = NULL;
+       struct drm_framebuffer *fbl = NULL;
+       uint32_t *id = data;
+       int found = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       fb = drm_framebuffer_lookup(dev, *id);
+       if (!fb)
+               return -ENOENT;
+
+       mutex_lock(&file_priv->fbs_lock);
+       list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+               if (fb == fbl)
+                       found = 1;
+       if (!found) {
+               mutex_unlock(&file_priv->fbs_lock);
+               goto fail_unref;
+       }
+
+       list_del_init(&fb->filp_head);
+       mutex_unlock(&file_priv->fbs_lock);
+
+       /* drop the reference we picked up in framebuffer lookup */
+       drm_framebuffer_unreference(fb);
+
+       /*
+        * we now own the reference that was stored in the fbs list
+        *
+        * drm_framebuffer_remove may fail with -EINTR on pending signals,
+        * so run this in a separate stack as there's no way to correctly
+        * handle this after the fb is already removed from the lookup table.
+        */
+       if (drm_framebuffer_read_refcount(fb) > 1) {
+               struct drm_mode_rmfb_work arg;
+
+               INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+               INIT_LIST_HEAD(&arg.fbs);
+               list_add_tail(&fb->filp_head, &arg.fbs);
+
+               schedule_work(&arg.work);
+               flush_work(&arg.work);
+               destroy_work_on_stack(&arg.work);
+       } else
+               drm_framebuffer_unreference(fb);
+
+       return 0;
+
+fail_unref:
+       drm_framebuffer_unreference(fb);
+       return -ENOENT;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_framebuffer *fb;
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       fb = drm_framebuffer_lookup(dev, r->fb_id);
+       if (!fb)
+               return -ENOENT;
+
+       r->height = fb->height;
+       r->width = fb->width;
+       r->depth = fb->depth;
+       r->bpp = fb->bits_per_pixel;
+       r->pitch = fb->pitches[0];
+       if (fb->funcs->create_handle) {
+               if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
+                   drm_is_control_client(file_priv)) {
+                       ret = fb->funcs->create_handle(fb, file_priv,
+                                                      &r->handle);
+               } else {
+                       /* GET_FB() is an unprivileged ioctl so we must not
+                        * return a buffer-handle to non-master processes! For
+                        * backwards-compatibility reasons, we cannot make
+                        * GET_FB() privileged, so just return an invalid handle
+                        * for non-masters. */
+                       r->handle = 0;
+                       ret = 0;
+               }
+       } else {
+               ret = -ENODEV;
+       }
+
+       drm_framebuffer_unreference(fb);
+
+       return ret;
+}
+
+/**
+ * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB and flush out the damaged area supplied by userspace as a clip
+ * rectangle list. Generic userspace which does frontbuffer rendering must call
+ * this ioctl to flush out the changes on manual-update display outputs, e.g.
+ * usb display-link, mipi manual update panels or edp panel self refresh modes.
+ *
+ * Modesetting drivers which always update the frontbuffer do not need to
+ * implement the corresponding ->dirty framebuffer callback.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_clip_rect __user *clips_ptr;
+       struct drm_clip_rect *clips = NULL;
+       struct drm_mode_fb_dirty_cmd *r = data;
+       struct drm_framebuffer *fb;
+       unsigned flags;
+       int num_clips;
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       fb = drm_framebuffer_lookup(dev, r->fb_id);
+       if (!fb)
+               return -ENOENT;
+
+       num_clips = r->num_clips;
+       clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
+
+       if (!num_clips != !clips_ptr) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+
+       flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+       /* If userspace annotates copy, clips must come in pairs */
+       if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+
+       if (num_clips && clips_ptr) {
+               if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+                       ret = -EINVAL;
+                       goto out_err1;
+               }
+               clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
+               if (!clips) {
+                       ret = -ENOMEM;
+                       goto out_err1;
+               }
+
+               ret = copy_from_user(clips, clips_ptr,
+                                    num_clips * sizeof(*clips));
+               if (ret) {
+                       ret = -EFAULT;
+                       goto out_err2;
+               }
+       }
+
+       if (fb->funcs->dirty) {
+               ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+                                      clips, num_clips);
+       } else {
+               ret = -ENOSYS;
+       }
+
+out_err2:
+       kfree(clips);
+out_err1:
+       drm_framebuffer_unreference(fb);
+
+       return ret;
+}
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @priv: drm file for the ioctl
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+       struct drm_framebuffer *fb, *tfb;
+       struct drm_mode_rmfb_work arg;
+
+       INIT_LIST_HEAD(&arg.fbs);
+
+       /*
+        * When the file gets released that means no one else can access the fb
+        * list any more, so no need to grab fpriv->fbs_lock. And we need to
+        * avoid upsetting lockdep since the universal cursor code adds a
+        * framebuffer while holding mutex locks.
+        *
+        * Note that a real deadlock between fpriv->fbs_lock and the modeset
+        * locks is impossible here since no one else but this function can get
+        * at it any more.
+        */
+       list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+               if (drm_framebuffer_read_refcount(fb) > 1) {
+                       list_move_tail(&fb->filp_head, &arg.fbs);
+               } else {
+                       list_del_init(&fb->filp_head);
+
+                       /* This drops the fpriv->fbs reference. */
+                       drm_framebuffer_unreference(fb);
+               }
+       }
+
+       if (!list_empty(&arg.fbs)) {
+               INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+
+               schedule_work(&arg.work);
+               flush_work(&arg.work);
+               destroy_work_on_stack(&arg.work);
+       }
+}
+
+void drm_framebuffer_free(struct kref *kref)
+{
+       struct drm_framebuffer *fb =
+                       container_of(kref, struct drm_framebuffer, base.refcount);
+       struct drm_device *dev = fb->dev;
+
+       /*
+        * The lookup idr holds a weak reference, which has not necessarily been
+        * removed at this point. Check for that.
+        */
+       drm_mode_object_unregister(dev, &fb->base);
+
+       fb->funcs->destroy(fb);
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+                        const struct drm_framebuffer_funcs *funcs)
+{
+       int ret;
+
+       INIT_LIST_HEAD(&fb->filp_head);
+       fb->dev = dev;
+       fb->funcs = funcs;
+
+       ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
+                                     false, drm_framebuffer_free);
+       if (ret)
+               goto out;
+
+       mutex_lock(&dev->mode_config.fb_lock);
+       dev->mode_config.num_fb++;
+       list_add(&fb->head, &dev->mode_config.fb_list);
+       mutex_unlock(&dev->mode_config.fb_lock);
+
+       drm_mode_object_register(dev, &fb->base);
+out:
+       return ret;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again, using @drm_framebuffer_unreference.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+                                              uint32_t id)
+{
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb = NULL;
+
+       obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB);
+       if (obj)
+               fb = obj_to_fb(obj);
+       return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev;
+
+       if (!fb)
+               return;
+
+       dev = fb->dev;
+
+       /* Mark fb as reaped and drop idr ref. */
+       drm_mode_object_unregister(dev, &fb->base);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Cleanup framebuffer. This function is intended to be used from the drivers
+ * ->destroy callback. It can also be used to clean up driver private
+ * framebuffers embedded into a larger structure.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = fb->dev;
+
+       mutex_lock(&dev->mode_config.fb_lock);
+       list_del(&fb->head);
+       dev->mode_config.num_fb--;
+       mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config.  If they're
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev;
+       struct drm_crtc *crtc;
+       struct drm_plane *plane;
+
+       if (!fb)
+               return;
+
+       dev = fb->dev;
+
+       WARN_ON(!list_empty(&fb->filp_head));
+
+       /*
+        * drm ABI mandates that we remove any deleted framebuffers from active
+        * useage. But since most sane clients only remove framebuffers they no
+        * longer need, try to optimize this away.
+        *
+        * Since we're holding a reference ourselves, observing a refcount of 1
+        * means that we're the last holder and can skip it. Also, the refcount
+        * can never increase from 1 again, so we don't need any barriers or
+        * locks.
+        *
+        * Note that userspace could try to race with use and instate a new
+        * usage _after_ we've cleared all current ones. End result will be an
+        * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+        * in this manner.
+        */
+       if (drm_framebuffer_read_refcount(fb) > 1) {
+               drm_modeset_lock_all(dev);
+               /* remove from any CRTC */
+               drm_for_each_crtc(crtc, dev) {
+                       if (crtc->primary->fb == fb) {
+                               /* should turn off the crtc */
+                               if (drm_crtc_force_disable(crtc))
+                                       DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+                       }
+               }
+
+               drm_for_each_plane(plane, dev) {
+                       if (plane->fb == fb)
+                               drm_plane_force_disable(plane);
+               }
+               drm_modeset_unlock_all(dev);
+       }
+
+       drm_framebuffer_unreference(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_remove);
index 9134ae134667e2aaa0ebf36a7f0324d965de8af2..465bacd0a630455e72c3127e12a000cad80ca4db 100644 (file)
@@ -257,7 +257,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
 
        if (drm_core_check_feature(dev, DRIVER_PRIME))
                drm_gem_remove_prime_handles(obj, file_priv);
-       drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+       drm_vma_node_revoke(&obj->vma_node, file_priv);
 
        if (dev->driver->gem_close_object)
                dev->driver->gem_close_object(obj, file_priv);
@@ -372,7 +372,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
 
        handle = ret;
 
-       ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+       ret = drm_vma_node_allow(&obj->vma_node, file_priv);
        if (ret)
                goto err_remove;
 
@@ -386,7 +386,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
        return 0;
 
 err_revoke:
-       drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+       drm_vma_node_revoke(&obj->vma_node, file_priv);
 err_remove:
        spin_lock(&file_priv->table_lock);
        idr_remove(&file_priv->object_idr, handle);
@@ -991,7 +991,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        if (!obj)
                return -EINVAL;
 
-       if (!drm_vma_node_is_allowed(node, filp)) {
+       if (!drm_vma_node_is_allowed(node, priv)) {
                drm_gem_object_unreference_unlocked(obj);
                return -EACCES;
        }
index 3d2e91c4d78e1c1003ac9521250dc4ca41c9b6ae..b404287abb976745c9a4eb8a927f59c473ddd084 100644 (file)
@@ -65,30 +65,34 @@ void drm_global_release(void)
 
 int drm_global_item_ref(struct drm_global_reference *ref)
 {
-       int ret;
+       int ret = 0;
        struct drm_global_item *item = &glob[ref->global_type];
 
        mutex_lock(&item->mutex);
        if (item->refcount == 0) {
-               item->object = kzalloc(ref->size, GFP_KERNEL);
-               if (unlikely(item->object == NULL)) {
+               ref->object = kzalloc(ref->size, GFP_KERNEL);
+               if (unlikely(ref->object == NULL)) {
                        ret = -ENOMEM;
-                       goto out_err;
+                       goto error_unlock;
                }
-
-               ref->object = item->object;
                ret = ref->init(ref);
                if (unlikely(ret != 0))
-                       goto out_err;
+                       goto error_free;
 
+               item->object = ref->object;
+       } else {
+               ref->object = item->object;
        }
+
        ++item->refcount;
-       ref->object = item->object;
        mutex_unlock(&item->mutex);
        return 0;
-out_err:
+
+error_free:
+       kfree(ref->object);
+       ref->object = NULL;
+error_unlock:
        mutex_unlock(&item->mutex);
-       item->object = NULL;
        return ret;
 }
 EXPORT_SYMBOL(drm_global_item_ref);
index 7b30b307674ba6d1b71065a6923758ea251d05b2..dae18e58e79be6870a7061c07ec6c4f90e0e848b 100644 (file)
@@ -142,7 +142,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
                              unsigned long add)
 {
        int ret;
-       unsigned long mask = (1 << bits) - 1;
+       unsigned long mask = (1UL << bits) - 1;
        unsigned long first, unshifted_key;
 
        unshifted_key = hash_long(seed, bits);
index 9ae353f4dd06a562cd226292e6e53b0b5517c5d5..1df2d33d0b40ed43e0cbd4623e014851b34ce581 100644 (file)
@@ -80,6 +80,7 @@ int drm_clients_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_file *priv;
+       kuid_t uid;
 
        seq_printf(m,
                   "%20s %5s %3s master a %5s %10s\n",
@@ -98,13 +99,14 @@ int drm_clients_info(struct seq_file *m, void *data)
 
                rcu_read_lock(); /* locks pid_task()->comm */
                task = pid_task(priv->pid, PIDTYPE_PID);
+               uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
                seq_printf(m, "%20s %5d %3d   %c    %c %5d %10u\n",
                           task ? task->comm : "<unknown>",
                           pid_vnr(priv->pid),
                           priv->minor->index,
                           drm_is_current_master(priv) ? 'y' : 'n',
                           priv->authenticated ? 'y' : 'n',
-                          from_kuid_munged(seq_user_ns(m), priv->uid),
+                          from_kuid_munged(seq_user_ns(m), uid),
                           priv->magic);
                rcu_read_unlock();
        }
index b86dc9b921a5e8bfc806d3f1e4a4107125335306..e66af289a016d0e1389201ee180d34a0dea200a5 100644 (file)
@@ -21,6 +21,9 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 4
+
 /* drm_irq.c */
 extern unsigned int drm_timestamp_monotonic;
 
index 57676f8d7ecfe70c54fd79a3d714f196ff1406a5..867ab8c1582bf33f154c6519cd33f5c2f72a9be7 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/export.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 
 #define DRM_IOCTL_VERSION32            DRM_IOWR(0x00, drm_version32_t)
 #define DRM_IOCTL_GET_UNIQUE32         DRM_IOWR(0x01, drm_unique32_t)
@@ -346,6 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
        struct drm_stats __user *stats;
        int i, err;
 
+       memset(&s32, 0, sizeof(drm_stats32_t));
        stats = compat_alloc_user_space(sizeof(*stats));
        if (!stats)
                return -EFAULT;
@@ -1015,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        return 0;
 }
 
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
 typedef struct drm_mode_fb_cmd232 {
        u32 fb_id;
        u32 width;
@@ -1071,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
 
        return 0;
 }
+#endif
 
 static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
@@ -1104,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
 #endif
        [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
        [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+#endif
 };
 
 /**
index 33af4a5ddca153096067df91ae7400383ddbdadf..0ad2c47f808fc1d732ef530eba96a6dd265a71c6 100644 (file)
@@ -29,7 +29,6 @@
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 #include <drm/drm_auth.h>
 #include "drm_legacy.h"
 #include "drm_internal.h"
@@ -189,9 +188,8 @@ static int drm_getclient(struct drm_device *dev, void *data,
         */
        if (client->idx == 0) {
                client->auth = file_priv->authenticated;
-               client->pid = pid_vnr(file_priv->pid);
-               client->uid = from_kuid_munged(current_user_ns(),
-                                              file_priv->uid);
+               client->pid = task_pid_vnr(current);
+               client->uid = overflowuid;
                client->magic = 0;
                client->iocs = 0;
 
@@ -228,6 +226,7 @@ static int drm_getstats(struct drm_device *dev, void *data,
 static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        struct drm_get_cap *req = data;
+       struct drm_crtc *crtc;
 
        req->value = 0;
        switch (req->capability) {
@@ -254,6 +253,13 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
        case DRM_CAP_ASYNC_PAGE_FLIP:
                req->value = dev->mode_config.async_page_flip;
                break;
+       case DRM_CAP_PAGE_FLIP_TARGET:
+               req->value = 1;
+               drm_for_each_crtc(crtc, dev) {
+                       if (!crtc->funcs->page_flip_target)
+                               req->value = 0;
+               }
+               break;
        case DRM_CAP_CURSOR_WIDTH:
                if (dev->mode_config.cursor_width)
                        req->value = dev->mode_config.cursor_width;
@@ -714,9 +720,9 @@ long drm_ioctl(struct file *filp,
        if (ksize > in_size)
                memset(kdata + in_size, 0, ksize - in_size);
 
-       /* Enforce sane locking for kms driver ioctls. Core ioctls are
+       /* Enforce sane locking for modern driver ioctls. Core ioctls are
         * too messy still. */
-       if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
+       if ((!drm_core_check_feature(dev, DRIVER_LEGACY) && is_driver_ioctl) ||
            (ioctl->flags & DRM_UNLOCKED))
                retcode = func(dev, kdata, file_priv);
        else {
index 77f357b2c3869a8bb13eea58c8c76819df34e70a..404a1ce7730c30973deb5ddb778e86efc8c5e452 100644 (file)
@@ -482,7 +482,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
                return ret;
        }
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_LEGACY))
                vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
 
        /* After installing handler */
@@ -491,7 +491,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
 
        if (ret < 0) {
                dev->irq_enabled = false;
-               if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               if (drm_core_check_feature(dev, DRIVER_LEGACY))
                        vga_client_register(dev->pdev, NULL, NULL, NULL);
                free_irq(irq, dev);
        } else {
@@ -557,7 +557,7 @@ int drm_irq_uninstall(struct drm_device *dev)
 
        DRM_DEBUG("irq=%d\n", dev->irq);
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_LEGACY))
                vga_client_register(dev->pdev, NULL, NULL, NULL);
 
        if (dev->driver->irq_uninstall)
@@ -592,7 +592,7 @@ int drm_control(struct drm_device *dev, void *data,
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
                return 0;
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return 0;
        /* UMS was only ever supported on pci devices. */
        if (WARN_ON(!dev->pdev))
@@ -713,10 +713,10 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
  * Negative value on error, failure or if not supported in current
  * video mode:
  *
- * -EINVAL   - Invalid CRTC.
- * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
- * -ENOTSUPP - Function not supported in current display mode.
- * -EIO      - Failed, e.g., due to failed scanout position query.
+ * -EINVAL    Invalid CRTC.
+ * -EAGAIN    Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP  Function not supported in current display mode.
+ * -EIO       Failed, e.g., due to failed scanout position query.
  *
  * Returns or'ed positive status flags on success:
  *
@@ -1295,7 +1295,7 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
                if (e->pipe != pipe)
                        continue;
                DRM_DEBUG("Sending premature vblank event on disable: "
-                         "wanted %d, current %d\n",
+                         "wanted %u, current %u\n",
                          e->event.sequence, seq);
                list_del(&e->base.link);
                drm_vblank_put(dev, pipe);
@@ -1519,7 +1519,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
                return 0;
 
        /* KMS drivers handle this internally */
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return 0;
 
        pipe = modeset->crtc;
@@ -1585,7 +1585,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
 
        seq = drm_vblank_count_and_time(dev, pipe, &now);
 
-       DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
+       DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n",
                  vblwait->request.sequence, seq, pipe);
 
        trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1693,7 +1693,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
                return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
        }
 
-       DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
+       DRM_DEBUG("waiting on vblank count %u, crtc %u\n",
                  vblwait->request.sequence, pipe);
        DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
                    (((drm_vblank_count(dev, pipe) -
@@ -1708,7 +1708,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
                vblwait->reply.tval_sec = now.tv_sec;
                vblwait->reply.tval_usec = now.tv_usec;
 
-               DRM_DEBUG("returning %d to client\n",
+               DRM_DEBUG("returning %u to client\n",
                          vblwait->reply.sequence);
        } else {
                DRM_DEBUG("vblank wait interrupted by signal\n");
@@ -1735,7 +1735,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
                if ((seq - e->event.sequence) > (1<<23))
                        continue;
 
-               DRM_DEBUG("vblank event on %d, current %d\n",
+               DRM_DEBUG("vblank event on %u, current %u\n",
                          e->event.sequence, seq);
 
                list_del(&e->base.link);
@@ -1826,6 +1826,7 @@ EXPORT_SYMBOL(drm_crtc_handle_vblank);
  */
 u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
 {
+       WARN_ON_ONCE(dev->max_vblank_count != 0);
        return 0;
 }
 EXPORT_SYMBOL(drm_vblank_no_hw_counter);
index 3187c4bb01cb09a59e4c7d9f5b52389347f217e4..45db36cd3d200fef2d3f0368491cbc1c6e73afbb 100644 (file)
@@ -27,7 +27,8 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_aux_dev.h>
+
+#include "drm_crtc_helper_internal.h"
 
 MODULE_AUTHOR("David Airlie, Jesse Barnes");
 MODULE_DESCRIPTION("DRM KMS helper");
index 48ac0ebbd6634e947fecbd701d96818152b0e9cc..c901f3c5b2696897fe1a7489422bf2c021d20e89 100644 (file)
@@ -163,7 +163,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
        struct drm_master *master = file_priv->master;
        int ret = 0;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        ++file_priv->lock_count;
@@ -252,7 +252,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
        struct drm_lock *lock = data;
        struct drm_master *master = file_priv->master;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (lock->context == DRM_KERNEL_CONTEXT) {
index af0d471ee246b6b3fad5ad9e91c99885f3010c80..1160a579e0dc0dd18dae853a55844080a5b950c5 100644 (file)
@@ -998,6 +998,27 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
 }
 EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
 
+/**
+ * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
+ *    data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
+                                sizeof(format));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
+
 /**
  * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
  *    the Tearing Effect output signal of the display module
@@ -1021,25 +1042,53 @@ int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
 EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
 
 /**
- * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
- *    data used by the interface
+ * mipi_dsi_dcs_set_display_brightness() - sets the brightness value of the
+ *    display
  * @dsi: DSI peripheral device
- * @format: pixel format
+ * @brightness: brightness value
  *
  * Return: 0 on success or a negative error code on failure.
  */
-int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
+                                       u16 brightness)
 {
+       u8 payload[2] = { brightness & 0xff, brightness >> 8 };
        ssize_t err;
 
-       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
-                                sizeof(format));
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+                                payload, sizeof(payload));
        if (err < 0)
                return err;
 
        return 0;
 }
-EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness);
+
+/**
+ * mipi_dsi_dcs_get_display_brightness() - gets the current brightness value
+ *    of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+                                       u16 *brightness)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+                               brightness, sizeof(*brightness));
+       if (err <= 0) {
+               if (err == 0)
+                       err = -ENODATA;
+
+               return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
 
 static int mipi_dsi_drv_probe(struct device *dev)
 {
index cb39f45d6a16befdc9cfac339d1703a2d9a217e5..11d44a1e0ab355f2b00b7e9fe4fd24c687bb9f59 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/export.h>
+#include <linux/interval_tree_generic.h>
 
 /**
  * DOC: Overview
@@ -103,6 +104,72 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
                                                u64 end,
                                                enum drm_mm_search_flags flags);
 
+#define START(node) ((node)->start)
+#define LAST(node)  ((node)->start + (node)->size - 1)
+
+INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
+                    u64, __subtree_last,
+                    START, LAST, static inline, drm_mm_interval_tree)
+
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+{
+       return drm_mm_interval_tree_iter_first(&mm->interval_tree,
+                                              start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_first);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
+{
+       return drm_mm_interval_tree_iter_next(node, start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_next);
+
+static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
+                                         struct drm_mm_node *node)
+{
+       struct drm_mm *mm = hole_node->mm;
+       struct rb_node **link, *rb;
+       struct drm_mm_node *parent;
+
+       node->__subtree_last = LAST(node);
+
+       if (hole_node->allocated) {
+               rb = &hole_node->rb;
+               while (rb) {
+                       parent = rb_entry(rb, struct drm_mm_node, rb);
+                       if (parent->__subtree_last >= node->__subtree_last)
+                               break;
+
+                       parent->__subtree_last = node->__subtree_last;
+                       rb = rb_parent(rb);
+               }
+
+               rb = &hole_node->rb;
+               link = &hole_node->rb.rb_right;
+       } else {
+               rb = NULL;
+               link = &mm->interval_tree.rb_node;
+       }
+
+       while (*link) {
+               rb = *link;
+               parent = rb_entry(rb, struct drm_mm_node, rb);
+               if (parent->__subtree_last < node->__subtree_last)
+                       parent->__subtree_last = node->__subtree_last;
+               if (node->start < parent->start)
+                       link = &parent->rb.rb_left;
+               else
+                       link = &parent->rb.rb_right;
+       }
+
+       rb_link_node(&node->rb, rb, link);
+       rb_insert_augmented(&node->rb,
+                           &mm->interval_tree,
+                           &drm_mm_interval_tree_augment);
+}
+
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
                                 struct drm_mm_node *node,
                                 u64 size, unsigned alignment,
@@ -150,9 +217,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
        node->color = color;
        node->allocated = 1;
 
-       INIT_LIST_HEAD(&node->hole_stack);
        list_add(&node->node_list, &hole_node->node_list);
 
+       drm_mm_interval_tree_add_node(hole_node, node);
+
        BUG_ON(node->start + node->size > adj_end);
 
        node->hole_follows = 0;
@@ -178,41 +246,54 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
+       u64 end = node->start + node->size;
        struct drm_mm_node *hole;
-       u64 end;
-       u64 hole_start;
-       u64 hole_end;
+       u64 hole_start, hole_end;
 
-       BUG_ON(node == NULL);
+       if (WARN_ON(node->size == 0))
+               return -EINVAL;
 
        end = node->start + node->size;
 
        /* Find the relevant hole to add our node to */
-       drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-               if (hole_start > node->start || hole_end < end)
-                       continue;
+       hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
+                                              node->start, ~(u64)0);
+       if (hole) {
+               if (hole->start < end)
+                       return -ENOSPC;
+       } else {
+               hole = list_entry(&mm->head_node.node_list,
+                                 typeof(*hole), node_list);
+       }
 
-               node->mm = mm;
-               node->allocated = 1;
+       hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
+       if (!hole->hole_follows)
+               return -ENOSPC;
 
-               INIT_LIST_HEAD(&node->hole_stack);
-               list_add(&node->node_list, &hole->node_list);
+       hole_start = __drm_mm_hole_node_start(hole);
+       hole_end = __drm_mm_hole_node_end(hole);
+       if (hole_start > node->start || hole_end < end)
+               return -ENOSPC;
 
-               if (node->start == hole_start) {
-                       hole->hole_follows = 0;
-                       list_del_init(&hole->hole_stack);
-               }
+       node->mm = mm;
+       node->allocated = 1;
 
-               node->hole_follows = 0;
-               if (end != hole_end) {
-                       list_add(&node->hole_stack, &mm->hole_stack);
-                       node->hole_follows = 1;
-               }
+       list_add(&node->node_list, &hole->node_list);
 
-               return 0;
+       drm_mm_interval_tree_add_node(hole, node);
+
+       if (node->start == hole_start) {
+               hole->hole_follows = 0;
+               list_del(&hole->hole_stack);
        }
 
-       return -ENOSPC;
+       node->hole_follows = 0;
+       if (end != hole_end) {
+               list_add(&node->hole_stack, &mm->hole_stack);
+               node->hole_follows = 1;
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
@@ -239,6 +320,9 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 {
        struct drm_mm_node *hole_node;
 
+       if (WARN_ON(size == 0))
+               return -EINVAL;
+
        hole_node = drm_mm_search_free_generic(mm, size, alignment,
                                               color, sflags);
        if (!hole_node)
@@ -299,9 +383,10 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
        node->color = color;
        node->allocated = 1;
 
-       INIT_LIST_HEAD(&node->hole_stack);
        list_add(&node->node_list, &hole_node->node_list);
 
+       drm_mm_interval_tree_add_node(hole_node, node);
+
        BUG_ON(node->start < start);
        BUG_ON(node->start < adj_start);
        BUG_ON(node->start + node->size > adj_end);
@@ -340,6 +425,9 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
 {
        struct drm_mm_node *hole_node;
 
+       if (WARN_ON(size == 0))
+               return -EINVAL;
+
        hole_node = drm_mm_search_free_in_range_generic(mm,
                                                        size, alignment, color,
                                                        start, end, sflags);
@@ -390,6 +478,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
        } else
                list_move(&prev_node->hole_stack, &mm->hole_stack);
 
+       drm_mm_interval_tree_remove(node, &mm->interval_tree);
        list_del(&node->node_list);
        node->allocated = 0;
 }
@@ -516,11 +605,13 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
        list_replace(&old->node_list, &new->node_list);
        list_replace(&old->hole_stack, &new->hole_stack);
+       rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
        new->hole_follows = old->hole_follows;
        new->mm = old->mm;
        new->start = old->start;
        new->size = old->size;
        new->color = old->color;
+       new->__subtree_last = old->__subtree_last;
 
        old->allocated = 0;
        new->allocated = 1;
@@ -748,7 +839,6 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
 
        /* Clever trick to avoid a special case in the free hole tracking. */
        INIT_LIST_HEAD(&mm->head_node.node_list);
-       INIT_LIST_HEAD(&mm->head_node.hole_stack);
        mm->head_node.hole_follows = 1;
        mm->head_node.scanned_block = 0;
        mm->head_node.scanned_prev_free = 0;
@@ -758,6 +848,8 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
        mm->head_node.size = start - mm->head_node.start;
        list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
+       mm->interval_tree = RB_ROOT;
+
        mm->color_adjust = NULL;
 }
 EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
new file mode 100644 (file)
index 0000000..9f17085
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_mode_object.h>
+
+#include "drm_crtc_internal.h"
+
+/*
+ * Internal function to assign a slot in the object idr and optionally
+ * register the object into the idr.
+ */
+int drm_mode_object_get_reg(struct drm_device *dev,
+                           struct drm_mode_object *obj,
+                           uint32_t obj_type,
+                           bool register_obj,
+                           void (*obj_free_cb)(struct kref *kref))
+{
+       int ret;
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+       if (ret >= 0) {
+               /*
+                * Set up the object linking under the protection of the idr
+                * lock so that other users can't see inconsistent state.
+                */
+               obj->id = ret;
+               obj->type = obj_type;
+               if (obj_free_cb) {
+                       obj->free_cb = obj_free_cb;
+                       kref_init(&obj->refcount);
+               }
+       }
+       mutex_unlock(&dev->mode_config.idr_mutex);
+
+       return ret < 0 ? ret : 0;
+}
+
+/**
+ * drm_mode_object_get - allocate a new modeset identifier
+ * @dev: DRM device
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
+ * modeset identifiers are _not_ reference counted. Hence don't use this for
+ * reference counted modeset objects like framebuffers.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_object_get(struct drm_device *dev,
+                       struct drm_mode_object *obj, uint32_t obj_type)
+{
+       return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
+}
+
+void drm_mode_object_register(struct drm_device *dev,
+                             struct drm_mode_object *obj)
+{
+       mutex_lock(&dev->mode_config.idr_mutex);
+       idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+/**
+ * drm_mode_object_unregister - free a modeset identifer
+ * @dev: DRM device
+ * @object: object to free
+ *
+ * Free @id from @dev's unique identifier pool.
+ * This function can be called multiple times, and guards against
+ * multiple removals.
+ * These modeset identifiers are _not_ reference counted. Hence don't use this
+ * for reference counted modeset objects like framebuffers.
+ */
+void drm_mode_object_unregister(struct drm_device *dev,
+                               struct drm_mode_object *object)
+{
+       mutex_lock(&dev->mode_config.idr_mutex);
+       if (object->id) {
+               idr_remove(&dev->mode_config.crtc_idr, object->id);
+               object->id = 0;
+       }
+       mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+                                              uint32_t id, uint32_t type)
+{
+       struct drm_mode_object *obj = NULL;
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       obj = idr_find(&dev->mode_config.crtc_idr, id);
+       if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
+               obj = NULL;
+       if (obj && obj->id != id)
+               obj = NULL;
+
+       if (obj && obj->free_cb) {
+               if (!kref_get_unless_zero(&obj->refcount))
+                       obj = NULL;
+       }
+       mutex_unlock(&dev->mode_config.idr_mutex);
+
+       return obj;
+}
+
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * This function is used to look up a modeset object. It will acquire a
+ * reference for reference counted objects. This reference must be dropped again
+ * by callind drm_mode_object_unreference().
+ */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+               uint32_t id, uint32_t type)
+{
+       struct drm_mode_object *obj = NULL;
+
+       obj = __drm_mode_object_find(dev, id, type);
+       return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_mode_object_unreference - decr the object refcnt
+ * @obj: mode_object
+ *
+ * This function decrements the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. This is used to drop references
+ * acquired with drm_mode_object_reference().
+ */
+void drm_mode_object_unreference(struct drm_mode_object *obj)
+{
+       if (obj->free_cb) {
+               DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+               kref_put(&obj->refcount, obj->free_cb);
+       }
+}
+EXPORT_SYMBOL(drm_mode_object_unreference);
+
+/**
+ * drm_mode_object_reference - incr the object refcnt
+ * @obj: mode_object
+ *
+ * This function increments the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. References should be dropped again
+ * by calling drm_mode_object_unreference().
+ */
+void drm_mode_object_reference(struct drm_mode_object *obj)
+{
+       if (obj->free_cb) {
+               DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+               kref_get(&obj->refcount);
+       }
+}
+EXPORT_SYMBOL(drm_mode_object_reference);
+
+/**
+ * drm_object_attach_property - attach a property to a modeset object
+ * @obj: drm modeset object
+ * @property: property to attach
+ * @init_val: initial value of the property
+ *
+ * This attaches the given property to the modeset object with the given initial
+ * value. Currently this function cannot fail since the properties are stored in
+ * a statically sized array.
+ */
+void drm_object_attach_property(struct drm_mode_object *obj,
+                               struct drm_property *property,
+                               uint64_t init_val)
+{
+       int count = obj->properties->count;
+
+       if (count == DRM_OBJECT_MAX_PROPERTY) {
+               WARN(1, "Failed to attach object property (type: 0x%x). Please "
+                       "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+                       "you see this message on the same object type.\n",
+                       obj->type);
+               return;
+       }
+
+       obj->properties->properties[count] = property;
+       obj->properties->values[count] = init_val;
+       obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+/**
+ * drm_object_property_set_value - set the value of a property
+ * @obj: drm mode object to set property value for
+ * @property: property to set
+ * @val: value the property should be set to
+ *
+ * This function sets a given property on a given object. This function only
+ * changes the software state of the property, it does not call into the
+ * driver's ->set_property callback.
+ *
+ * Note that atomic drivers should not have any need to call this, the core will
+ * ensure consistency of values reported back to userspace through the
+ * appropriate ->atomic_get_property callback. Only legacy drivers should call
+ * this function to update the tracked value (after clamping and other
+ * restrictions have been applied).
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_set_value(struct drm_mode_object *obj,
+                                 struct drm_property *property, uint64_t val)
+{
+       int i;
+
+       for (i = 0; i < obj->properties->count; i++) {
+               if (obj->properties->properties[i] == property) {
+                       obj->properties->values[i] = val;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_set_value);
+
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Atomic drivers should never call this function directly, the core will read
+ * out property values through the various ->atomic_get_property callbacks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_get_value(struct drm_mode_object *obj,
+                                 struct drm_property *property, uint64_t *val)
+{
+       int i;
+
+       /* read-only properties bypass atomic mechanism and still store
+        * their value in obj->properties->values[].. mostly to avoid
+        * having to deal w/ EDID and similar props in atomic paths:
+        */
+       if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
+                       !(property->flags & DRM_MODE_PROP_IMMUTABLE))
+               return drm_atomic_get_property(obj, property, val);
+
+       for (i = 0; i < obj->properties->count; i++) {
+               if (obj->properties->properties[i] == property) {
+                       *val = obj->properties->values[i];
+                       return 0;
+               }
+
+       }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_get_value);
+
+/* helper for getconnector and getproperties ioctls */
+int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+                                  uint32_t __user *prop_ptr,
+                                  uint64_t __user *prop_values,
+                                  uint32_t *arg_count_props)
+{
+       int i, ret, count;
+
+       for (i = 0, count = 0; i < obj->properties->count; i++) {
+               struct drm_property *prop = obj->properties->properties[i];
+               uint64_t val;
+
+               if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
+                       continue;
+
+               if (*arg_count_props > count) {
+                       ret = drm_object_property_get_value(obj, prop, &val);
+                       if (ret)
+                               return ret;
+
+                       if (put_user(prop->base.id, prop_ptr + count))
+                               return -EFAULT;
+
+                       if (put_user(val, prop_values + count))
+                               return -EFAULT;
+               }
+
+               count++;
+       }
+       *arg_count_props = count;
+
+       return 0;
+}
+
+/**
+ * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an object's property. Compared
+ * to the connector specific ioctl this one is extended to also work on crtc and
+ * plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+                                     struct drm_file *file_priv)
+{
+       struct drm_mode_obj_get_properties *arg = data;
+       struct drm_mode_object *obj;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       drm_modeset_lock_all(dev);
+
+       obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+       if (!obj) {
+               ret = -ENOENT;
+               goto out;
+       }
+       if (!obj->properties) {
+               ret = -EINVAL;
+               goto out_unref;
+       }
+
+       ret = drm_mode_object_get_properties(obj, file_priv->atomic,
+                       (uint32_t __user *)(unsigned long)(arg->props_ptr),
+                       (uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
+                       &arg->count_props);
+
+out_unref:
+       drm_mode_object_unreference(obj);
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
+
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+                                              uint32_t prop_id)
+{
+       int i;
+
+       for (i = 0; i < obj->properties->count; i++)
+               if (obj->properties->properties[i]->base.id == prop_id)
+                       return obj->properties->properties[i];
+
+       return NULL;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv)
+{
+       struct drm_mode_obj_set_property *arg = data;
+       struct drm_mode_object *arg_obj;
+       struct drm_property *property;
+       int ret = -EINVAL;
+       struct drm_mode_object *ref;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       drm_modeset_lock_all(dev);
+
+       arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+       if (!arg_obj) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       if (!arg_obj->properties)
+               goto out_unref;
+
+       property = drm_mode_obj_find_prop_id(arg_obj, arg->prop_id);
+       if (!property)
+               goto out_unref;
+
+       if (!drm_property_change_valid_get(property, arg->value, &ref))
+               goto out_unref;
+
+       switch (arg_obj->type) {
+       case DRM_MODE_OBJECT_CONNECTOR:
+               ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+                                                     arg->value);
+               break;
+       case DRM_MODE_OBJECT_CRTC:
+               ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+               break;
+       case DRM_MODE_OBJECT_PLANE:
+               ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
+                                                 property, arg->value);
+               break;
+       }
+
+       drm_property_change_valid_put(property, ref);
+
+out_unref:
+       drm_mode_object_unreference(arg_obj);
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
index fc5040ae5f256ef2987fba0ce19ad1a8736ff5ef..53f07ac7c1749d20e7aa904eaabc33adbc4abbc8 100644 (file)
@@ -657,11 +657,36 @@ void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
 }
 EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
 
+/**
+ * drm_bus_flags_from_videomode - extract information about pixelclk and
+ * DE polarity from videomode and store it in a separate variable
+ * @vm: videomode structure to use
+ * @bus_flags: information about pixelclk and DE polarity will be stored here
+ *
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE
+ * in @bus_flags according to DISPLAY_FLAGS found in @vm
+ */
+void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
+{
+       *bus_flags = 0;
+       if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
+               *bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE;
+       if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+               *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+
+       if (vm->flags & DISPLAY_FLAGS_DE_LOW)
+               *bus_flags |= DRM_BUS_FLAG_DE_LOW;
+       if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
+               *bus_flags |= DRM_BUS_FLAG_DE_HIGH;
+}
+EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode);
+
 #ifdef CONFIG_OF
 /**
  * of_get_drm_display_mode - get a drm_display_mode from devicetree
  * @np: device_node with the timing specification
  * @dmode: will be set to the return value
+ * @bus_flags: information about pixelclk and DE polarity
  * @index: index into the list of display timings in devicetree
  *
  * This function is expensive and should only be used, if only one mode is to be
@@ -672,7 +697,8 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
  * 0 on success, a negative errno code when no of videomode node was found.
  */
 int of_get_drm_display_mode(struct device_node *np,
-                           struct drm_display_mode *dmode, int index)
+                           struct drm_display_mode *dmode, u32 *bus_flags,
+                           int index)
 {
        struct videomode vm;
        int ret;
@@ -682,6 +708,8 @@ int of_get_drm_display_mode(struct device_node *np,
                return ret;
 
        drm_display_mode_from_videomode(&vm, dmode);
+       if (bus_flags)
+               drm_bus_flags_from_videomode(&vm, bus_flags);
 
        pr_debug("%s: got %dx%d display mode from %s\n",
                of_node_full_name(np), vm.hactive, vm.vactive, np->name);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
new file mode 100644 (file)
index 0000000..1d45738
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_plane_helper.h>
+
+/**
+ * DOC: aux kms helpers
+ *
+ * This helper library contains various one-off functions which don't really fit
+ * anywhere else in the DRM modeset helper library.
+ */
+
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ *                                             connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+       struct drm_connector *connector, *tmp;
+       struct list_head panel_list;
+
+       INIT_LIST_HEAD(&panel_list);
+
+       list_for_each_entry_safe(connector, tmp,
+                                &dev->mode_config.connector_list, head) {
+               if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+                   connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+                       list_move_tail(&connector->head, &panel_list);
+       }
+
+       list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
+/**
+ * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
+ * @fb: drm_framebuffer object to fill out
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * This helper can be used in a drivers fb_create callback to pre-fill the fb's
+ * metadata fields.
+ */
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+                                   const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       int i;
+
+       fb->width = mode_cmd->width;
+       fb->height = mode_cmd->height;
+       for (i = 0; i < 4; i++) {
+               fb->pitches[i] = mode_cmd->pitches[i];
+               fb->offsets[i] = mode_cmd->offsets[i];
+               fb->modifier[i] = mode_cmd->modifier[i];
+       }
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+                                   &fb->bits_per_pixel);
+       fb->pixel_format = mode_cmd->pixel_format;
+       fb->flags = mode_cmd->flags;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+/*
+ * This is the minimal list of formats that seem to be safe for modeset use
+ * with all current DRM drivers.  Most hardware can actually support more
+ * formats than this and drivers may specify a more accurate list when
+ * creating the primary plane.  However drivers that still call
+ * drm_plane_init() will use this minimal format list as the default.
+ */
+static const uint32_t safe_modeset_formats[] = {
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+};
+
+static struct drm_plane *create_primary_plane(struct drm_device *dev)
+{
+       struct drm_plane *primary;
+       int ret;
+
+       primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+       if (primary == NULL) {
+               DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+               return NULL;
+       }
+
+       /*
+        * Remove the format_default field from drm_plane when dropping
+        * this helper.
+        */
+       primary->format_default = true;
+
+       /* possible_crtc's will be filled in later by crtc_init */
+       ret = drm_universal_plane_init(dev, primary, 0,
+                                      &drm_primary_helper_funcs,
+                                      safe_modeset_formats,
+                                      ARRAY_SIZE(safe_modeset_formats),
+                                      DRM_PLANE_TYPE_PRIMARY, NULL);
+       if (ret) {
+               kfree(primary);
+               primary = NULL;
+       }
+
+       return primary;
+}
+
+/**
+ * drm_crtc_init - Legacy CRTC initialization function
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * Initialize a CRTC object with a default helper-provided primary plane and no
+ * cursor plane.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                 const struct drm_crtc_funcs *funcs)
+{
+       struct drm_plane *primary;
+
+       primary = create_primary_plane(dev);
+       return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
+                                        NULL);
+}
+EXPORT_SYMBOL(drm_crtc_init);
index b2f8f1062d5fc85daa624fcd3e0a11383b83f6b2..3ceea9cb9d3e42a36f2811b2bea31d63445e4e42 100644 (file)
@@ -175,7 +175,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
 {
        struct drm_irq_busid *p = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        /* UMS was only ever support on PCI devices. */
@@ -236,8 +236,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
        DRM_DEBUG("\n");
 
        dev = drm_dev_alloc(driver, &pdev->dev);
-       if (!dev)
-               return -ENOMEM;
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
 
        ret = pci_enable_device(pdev);
        if (ret)
@@ -263,7 +263,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 
        /* No locking needed since shadow-attach is single-threaded since it may
         * only be called from the per-driver module init hook. */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_LEGACY))
                list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
 
        return 0;
@@ -299,7 +299,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
 
        DRM_DEBUG("\n");
 
-       if (driver->driver_features & DRIVER_MODESET)
+       if (!(driver->driver_features & DRIVER_LEGACY))
                return pci_register_driver(pdriver);
 
        /* If not using KMS, fall back to stealth mode manual scanning. */
@@ -421,7 +421,7 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
        struct drm_device *dev, *tmp;
        DRM_DEBUG("\n");
 
-       if (driver->driver_features & DRIVER_MODESET) {
+       if (!(driver->driver_features & DRIVER_LEGACY)) {
                pci_unregister_driver(pdriver);
        } else {
                list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
new file mode 100644 (file)
index 0000000..cd0d475
--- /dev/null
@@ -0,0 +1,907 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_plane.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * A plane represents an image source that can be blended with or overlayed on
+ * top of a CRTC during the scanout process. Planes take their input data from a
+ * &drm_framebuffer object. The plane itself specifies the cropping and scaling
+ * of that image, and where it is placed on the visible are of a display
+ * pipeline, represented by &drm_crtc. A plane can also have additional
+ * properties that specify how the pixels are positioned and blended, like
+ * rotation or Z-position. All these properties are stored in &drm_plane_state.
+ *
+ * To create a plane, a KMS drivers allocates and zeroes an instances of
+ * struct &drm_plane (possibly as part of a larger structure) and registers it
+ * with a call to drm_universal_plane_init().
+ *
+ * Cursor and overlay planes are optional. All drivers should provide one
+ * primary plane per CRTC to avoid surprising userspace too much. See enum
+ * &drm_plane_type for a more in-depth discussion of these special uapi-relevant
+ * plane types. Special planes are associated with their CRTC by calling
+ * drm_crtc_init_with_planes().
+ *
+ * The type of a plane is exposed in the immutable "type" enumeration property,
+ * which has one of the following values: "Overlay", "Primary", "Cursor".
+ */
+
+static unsigned int drm_num_planes(struct drm_device *dev)
+{
+       unsigned int num = 0;
+       struct drm_plane *tmp;
+
+       drm_for_each_plane(tmp, dev) {
+               num++;
+       }
+
+       return num;
+}
+
+/**
+ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Initializes a plane object of type @type.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+                            unsigned long possible_crtcs,
+                            const struct drm_plane_funcs *funcs,
+                            const uint32_t *formats, unsigned int format_count,
+                            enum drm_plane_type type,
+                            const char *name, ...)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       int ret;
+
+       ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+       if (ret)
+               return ret;
+
+       drm_modeset_lock_init(&plane->mutex);
+
+       plane->base.properties = &plane->properties;
+       plane->dev = dev;
+       plane->funcs = funcs;
+       plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
+                                           GFP_KERNEL);
+       if (!plane->format_types) {
+               DRM_DEBUG_KMS("out of memory when allocating plane\n");
+               drm_mode_object_unregister(dev, &plane->base);
+               return -ENOMEM;
+       }
+
+       if (name) {
+               va_list ap;
+
+               va_start(ap, name);
+               plane->name = kvasprintf(GFP_KERNEL, name, ap);
+               va_end(ap);
+       } else {
+               plane->name = kasprintf(GFP_KERNEL, "plane-%d",
+                                       drm_num_planes(dev));
+       }
+       if (!plane->name) {
+               kfree(plane->format_types);
+               drm_mode_object_unregister(dev, &plane->base);
+               return -ENOMEM;
+       }
+
+       memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+       plane->format_count = format_count;
+       plane->possible_crtcs = possible_crtcs;
+       plane->type = type;
+
+       list_add_tail(&plane->head, &config->plane_list);
+       plane->index = config->num_total_plane++;
+       if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+               config->num_overlay_plane++;
+
+       drm_object_attach_property(&plane->base,
+                                  config->plane_type_property,
+                                  plane->type);
+
+       if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+               drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
+               drm_object_attach_property(&plane->base, config->prop_src_x, 0);
+               drm_object_attach_property(&plane->base, config->prop_src_y, 0);
+               drm_object_attach_property(&plane->base, config->prop_src_w, 0);
+               drm_object_attach_property(&plane->base, config->prop_src_h, 0);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_universal_plane_init);
+
+int drm_plane_register_all(struct drm_device *dev)
+{
+       struct drm_plane *plane;
+       int ret = 0;
+
+       drm_for_each_plane(plane, dev) {
+               if (plane->funcs->late_register)
+                       ret = plane->funcs->late_register(plane);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+void drm_plane_unregister_all(struct drm_device *dev)
+{
+       struct drm_plane *plane;
+
+       drm_for_each_plane(plane, dev) {
+               if (plane->funcs->early_unregister)
+                       plane->funcs->early_unregister(plane);
+       }
+}
+
+/**
+ * drm_plane_init - Initialize a legacy plane
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @is_primary: plane type (primary vs overlay)
+ *
+ * Legacy API to initialize a DRM plane.
+ *
+ * New drivers should call drm_universal_plane_init() instead.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+                  unsigned long possible_crtcs,
+                  const struct drm_plane_funcs *funcs,
+                  const uint32_t *formats, unsigned int format_count,
+                  bool is_primary)
+{
+       enum drm_plane_type type;
+
+       type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+       return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+                                       formats, format_count, type, NULL);
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+
+       drm_modeset_lock_all(dev);
+       kfree(plane->format_types);
+       drm_mode_object_unregister(dev, &plane->base);
+
+       BUG_ON(list_empty(&plane->head));
+
+       /* Note that the plane_list is considered to be static; should we
+        * remove the drm_plane at runtime we would have to decrement all
+        * the indices on the drm_plane after us in the plane_list.
+        */
+
+       list_del(&plane->head);
+       dev->mode_config.num_total_plane--;
+       if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+               dev->mode_config.num_overlay_plane--;
+       drm_modeset_unlock_all(dev);
+
+       WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
+       if (plane->state && plane->funcs->atomic_destroy_state)
+               plane->funcs->atomic_destroy_state(plane, plane->state);
+
+       kfree(plane->name);
+
+       memset(plane, 0, sizeof(*plane));
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
+/**
+ * drm_plane_from_index - find the registered plane at an index
+ * @dev: DRM device
+ * @idx: index of registered plane to find for
+ *
+ * Given a plane index, return the registered plane from DRM device's
+ * list of planes with matching index.
+ */
+struct drm_plane *
+drm_plane_from_index(struct drm_device *dev, int idx)
+{
+       struct drm_plane *plane;
+
+       drm_for_each_plane(plane, dev)
+               if (idx == plane->index)
+                       return plane;
+
+       return NULL;
+}
+EXPORT_SYMBOL(drm_plane_from_index);
+
+/**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+ * Forces the plane to be disabled.
+ *
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
+ */
+void drm_plane_force_disable(struct drm_plane *plane)
+{
+       int ret;
+
+       if (!plane->fb)
+               return;
+
+       plane->old_fb = plane->fb;
+       ret = plane->funcs->disable_plane(plane);
+       if (ret) {
+               DRM_ERROR("failed to disable plane with busy fb\n");
+               plane->old_fb = NULL;
+               return;
+       }
+       /* disconnect the plane from the fb and crtc: */
+       drm_framebuffer_unreference(plane->old_fb);
+       plane->old_fb = NULL;
+       plane->fb = NULL;
+       plane->crtc = NULL;
+}
+EXPORT_SYMBOL(drm_plane_force_disable);
+
+/**
+ * drm_mode_plane_set_obj_prop - set the value of a property
+ * @plane: drm plane object to set property value for
+ * @property: property to set
+ * @value: value the property should be set to
+ *
+ * This functions sets a given property on a given plane object. This function
+ * calls the driver's ->set_property callback and changes the software state of
+ * the property if the callback succeeds.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+                               struct drm_property *property,
+                               uint64_t value)
+{
+       int ret = -EINVAL;
+       struct drm_mode_object *obj = &plane->base;
+
+       if (plane->funcs->set_property)
+               ret = plane->funcs->set_property(plane, property, value);
+       if (!ret)
+               drm_object_property_set_value(obj, property, value);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
+
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_get_plane_res *plane_resp = data;
+       struct drm_mode_config *config;
+       struct drm_plane *plane;
+       uint32_t __user *plane_ptr;
+       int copied = 0;
+       unsigned num_planes;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       config = &dev->mode_config;
+
+       if (file_priv->universal_planes)
+               num_planes = config->num_total_plane;
+       else
+               num_planes = config->num_overlay_plane;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if (num_planes &&
+           (plane_resp->count_planes >= num_planes)) {
+               plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+               /* Plane lists are invariant, no locking needed. */
+               drm_for_each_plane(plane, dev) {
+                       /*
+                        * Unless userspace set the 'universal planes'
+                        * capability bit, only advertise overlays.
+                        */
+                       if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
+                           !file_priv->universal_planes)
+                               continue;
+
+                       if (put_user(plane->base.id, plane_ptr + copied))
+                               return -EFAULT;
+                       copied++;
+               }
+       }
+       plane_resp->count_planes = num_planes;
+
+       return 0;
+}
+
+int drm_mode_getplane(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv)
+{
+       struct drm_mode_get_plane *plane_resp = data;
+       struct drm_plane *plane;
+       uint32_t __user *format_ptr;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       plane = drm_plane_find(dev, plane_resp->plane_id);
+       if (!plane)
+               return -ENOENT;
+
+       drm_modeset_lock(&plane->mutex, NULL);
+       if (plane->crtc)
+               plane_resp->crtc_id = plane->crtc->base.id;
+       else
+               plane_resp->crtc_id = 0;
+
+       if (plane->fb)
+               plane_resp->fb_id = plane->fb->base.id;
+       else
+               plane_resp->fb_id = 0;
+       drm_modeset_unlock(&plane->mutex);
+
+       plane_resp->plane_id = plane->base.id;
+       plane_resp->possible_crtcs = plane->possible_crtcs;
+       plane_resp->gamma_size = 0;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if (plane->format_count &&
+           (plane_resp->count_format_types >= plane->format_count)) {
+               format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+               if (copy_to_user(format_ptr,
+                                plane->format_types,
+                                sizeof(uint32_t) * plane->format_count)) {
+                       return -EFAULT;
+               }
+       }
+       plane_resp->count_format_types = plane->format_count;
+
+       return 0;
+}
+
+int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
+{
+       unsigned int i;
+
+       for (i = 0; i < plane->format_count; i++) {
+               if (format == plane->format_types[i])
+                       return 0;
+       }
+
+       return -EINVAL;
+}
+
+/*
+ * setplane_internal - setplane handler for internal callers
+ *
+ * Note that we assume an extra reference has already been taken on fb.  If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
+ *
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
+ */
+static int __setplane_internal(struct drm_plane *plane,
+                              struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb,
+                              int32_t crtc_x, int32_t crtc_y,
+                              uint32_t crtc_w, uint32_t crtc_h,
+                              /* src_{x,y,w,h} values are 16.16 fixed point */
+                              uint32_t src_x, uint32_t src_y,
+                              uint32_t src_w, uint32_t src_h)
+{
+       int ret = 0;
+
+       /* No fb means shut it down */
+       if (!fb) {
+               plane->old_fb = plane->fb;
+               ret = plane->funcs->disable_plane(plane);
+               if (!ret) {
+                       plane->crtc = NULL;
+                       plane->fb = NULL;
+               } else {
+                       plane->old_fb = NULL;
+               }
+               goto out;
+       }
+
+       /* Check whether this plane is usable on this CRTC */
+       if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+               DRM_DEBUG_KMS("Invalid crtc for plane\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Check whether this plane supports the fb pixel format. */
+       ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
+       if (ret) {
+               char *format_name = drm_get_format_name(fb->pixel_format);
+               DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+               kfree(format_name);
+               goto out;
+       }
+
+       /* Give drivers some help against integer overflows */
+       if (crtc_w > INT_MAX ||
+           crtc_x > INT_MAX - (int32_t) crtc_w ||
+           crtc_h > INT_MAX ||
+           crtc_y > INT_MAX - (int32_t) crtc_h) {
+               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+                             crtc_w, crtc_h, crtc_x, crtc_y);
+               ret = -ERANGE;
+               goto out;
+       }
+
+       ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+       if (ret)
+               goto out;
+
+       plane->old_fb = plane->fb;
+       ret = plane->funcs->update_plane(plane, crtc, fb,
+                                        crtc_x, crtc_y, crtc_w, crtc_h,
+                                        src_x, src_y, src_w, src_h);
+       if (!ret) {
+               plane->crtc = crtc;
+               plane->fb = fb;
+               fb = NULL;
+       } else {
+               plane->old_fb = NULL;
+       }
+
+out:
+       if (fb)
+               drm_framebuffer_unreference(fb);
+       if (plane->old_fb)
+               drm_framebuffer_unreference(plane->old_fb);
+       plane->old_fb = NULL;
+
+       return ret;
+}
+
+static int setplane_internal(struct drm_plane *plane,
+                            struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            int32_t crtc_x, int32_t crtc_y,
+                            uint32_t crtc_w, uint32_t crtc_h,
+                            /* src_{x,y,w,h} values are 16.16 fixed point */
+                            uint32_t src_x, uint32_t src_y,
+                            uint32_t src_w, uint32_t src_h)
+{
+       int ret;
+
+       drm_modeset_lock_all(plane->dev);
+       ret = __setplane_internal(plane, crtc, fb,
+                                 crtc_x, crtc_y, crtc_w, crtc_h,
+                                 src_x, src_y, src_w, src_h);
+       drm_modeset_unlock_all(plane->dev);
+
+       return ret;
+}
+
+int drm_mode_setplane(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv)
+{
+       struct drm_mode_set_plane *plane_req = data;
+       struct drm_plane *plane;
+       struct drm_crtc *crtc = NULL;
+       struct drm_framebuffer *fb = NULL;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       /*
+        * First, find the plane, crtc, and fb objects.  If not available,
+        * we don't bother to call the driver.
+        */
+       plane = drm_plane_find(dev, plane_req->plane_id);
+       if (!plane) {
+               DRM_DEBUG_KMS("Unknown plane ID %d\n",
+                             plane_req->plane_id);
+               return -ENOENT;
+       }
+
+       if (plane_req->fb_id) {
+               fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+               if (!fb) {
+                       DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+                                     plane_req->fb_id);
+                       return -ENOENT;
+               }
+
+               crtc = drm_crtc_find(dev, plane_req->crtc_id);
+               if (!crtc) {
+                       DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+                                     plane_req->crtc_id);
+                       return -ENOENT;
+               }
+       }
+
+       /*
+        * setplane_internal will take care of deref'ing either the old or new
+        * framebuffer depending on success.
+        */
+       return setplane_internal(plane, crtc, fb,
+                                plane_req->crtc_x, plane_req->crtc_y,
+                                plane_req->crtc_w, plane_req->crtc_h,
+                                plane_req->src_x, plane_req->src_y,
+                                plane_req->src_w, plane_req->src_h);
+}
+
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+                                    struct drm_mode_cursor2 *req,
+                                    struct drm_file *file_priv)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_mode_fb_cmd2 fbreq = {
+               .width = req->width,
+               .height = req->height,
+               .pixel_format = DRM_FORMAT_ARGB8888,
+               .pitches = { req->width * 4 },
+               .handles = { req->handle },
+       };
+       int32_t crtc_x, crtc_y;
+       uint32_t crtc_w = 0, crtc_h = 0;
+       uint32_t src_w = 0, src_h = 0;
+       int ret = 0;
+
+       BUG_ON(!crtc->cursor);
+       WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
+
+       /*
+        * Obtain fb we'll be using (either new or existing) and take an extra
+        * reference to it if fb != null.  setplane will take care of dropping
+        * the reference if the plane update fails.
+        */
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (req->handle) {
+                       fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv);
+                       if (IS_ERR(fb)) {
+                               DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+                               return PTR_ERR(fb);
+                       }
+                       fb->hot_x = req->hot_x;
+                       fb->hot_y = req->hot_y;
+               } else {
+                       fb = NULL;
+               }
+       } else {
+               fb = crtc->cursor->fb;
+               if (fb)
+                       drm_framebuffer_reference(fb);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               crtc_x = req->x;
+               crtc_y = req->y;
+       } else {
+               crtc_x = crtc->cursor_x;
+               crtc_y = crtc->cursor_y;
+       }
+
+       if (fb) {
+               crtc_w = fb->width;
+               crtc_h = fb->height;
+               src_w = fb->width << 16;
+               src_h = fb->height << 16;
+       }
+
+       /*
+        * setplane_internal will take care of deref'ing either the old or new
+        * framebuffer depending on success.
+        */
+       ret = __setplane_internal(crtc->cursor, crtc, fb,
+                               crtc_x, crtc_y, crtc_w, crtc_h,
+                               0, 0, src_w, src_h);
+
+       /* Update successful; save new cursor position, if necessary */
+       if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+               crtc->cursor_x = req->x;
+               crtc->cursor_y = req->y;
+       }
+
+       return ret;
+}
+
+static int drm_mode_cursor_common(struct drm_device *dev,
+                                 struct drm_mode_cursor2 *req,
+                                 struct drm_file *file_priv)
+{
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+               return -EINVAL;
+
+       crtc = drm_crtc_find(dev, req->crtc_id);
+       if (!crtc) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+               return -ENOENT;
+       }
+
+       /*
+        * If this crtc has a universal cursor plane, call that plane's update
+        * handler rather than using legacy cursor handlers.
+        */
+       drm_modeset_lock_crtc(crtc, crtc->cursor);
+       if (crtc->cursor) {
+               ret = drm_mode_cursor_universal(crtc, req, file_priv);
+               goto out;
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
+                       ret = -ENXIO;
+                       goto out;
+               }
+               /* Turns off the cursor if handle is 0 */
+               if (crtc->funcs->cursor_set2)
+                       ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
+                                                     req->width, req->height, req->hot_x, req->hot_y);
+               else
+                       ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+                                                     req->width, req->height);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               if (crtc->funcs->cursor_move) {
+                       ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+               } else {
+                       ret = -EFAULT;
+                       goto out;
+               }
+       }
+out:
+       drm_modeset_unlock_crtc(crtc);
+
+       return ret;
+
+}
+
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_cursor *req = data;
+       struct drm_mode_cursor2 new_req;
+
+       memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
+       new_req.hot_x = new_req.hot_y = 0;
+
+       return drm_mode_cursor_common(dev, &new_req, file_priv);
+}
+
+/*
+ * Set the cursor configuration based on user request. This implements the 2nd
+ * version of the cursor ioctl, which allows userspace to additionally specify
+ * the hotspot of the pointer.
+ */
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_cursor2 *req = data;
+
+       return drm_mode_cursor_common(dev, req, file_priv);
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_page_flip_target *page_flip = data;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_pending_vblank_event *e = NULL;
+       u32 target_vblank = page_flip->sequence;
+       int ret = -EINVAL;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS)
+               return -EINVAL;
+
+       if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET))
+               return -EINVAL;
+
+       /* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags
+        * can be specified
+        */
+       if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET)
+               return -EINVAL;
+
+       if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+               return -EINVAL;
+
+       crtc = drm_crtc_find(dev, page_flip->crtc_id);
+       if (!crtc)
+               return -ENOENT;
+
+       drm_modeset_lock_crtc(crtc, crtc->primary);
+       if (crtc->primary->fb == NULL) {
+               /* The framebuffer is currently unbound, presumably
+                * due to a hotplug event, that userspace has not
+                * yet discovered.
+                */
+               ret = -EBUSY;
+               goto out;
+       }
+
+       if (crtc->funcs->page_flip == NULL)
+               goto out;
+
+       fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+       if (!fb) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       if (crtc->state) {
+               const struct drm_plane_state *state = crtc->primary->state;
+
+               ret = drm_framebuffer_check_src_coords(state->src_x,
+                                                      state->src_y,
+                                                      state->src_w,
+                                                      state->src_h,
+                                                      fb);
+       } else {
+               ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+       }
+       if (ret)
+               goto out;
+
+       if (crtc->primary->fb->pixel_format != fb->pixel_format) {
+               DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+               e = kzalloc(sizeof *e, GFP_KERNEL);
+               if (!e) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+               e->event.base.length = sizeof(e->event);
+               e->event.user_data = page_flip->user_data;
+               ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+               if (ret) {
+                       kfree(e);
+                       goto out;
+               }
+       }
+
+       crtc->primary->old_fb = crtc->primary->fb;
+       if (crtc->funcs->page_flip_target) {
+               u32 current_vblank;
+               int r;
+
+               r = drm_crtc_vblank_get(crtc);
+               if (r)
+                       return r;
+
+               current_vblank = drm_crtc_vblank_count(crtc);
+
+               switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
+               case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
+                       if ((int)(target_vblank - current_vblank) > 1) {
+                               DRM_DEBUG("Invalid absolute flip target %u, "
+                                         "must be <= %u\n", target_vblank,
+                                         current_vblank + 1);
+                               drm_crtc_vblank_put(crtc);
+                               return -EINVAL;
+                       }
+                       break;
+               case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE:
+                       if (target_vblank != 0 && target_vblank != 1) {
+                               DRM_DEBUG("Invalid relative flip target %u, "
+                                         "must be 0 or 1\n", target_vblank);
+                               drm_crtc_vblank_put(crtc);
+                               return -EINVAL;
+                       }
+                       target_vblank += current_vblank;
+                       break;
+               default:
+                       target_vblank = current_vblank +
+                               !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC);
+                       break;
+               }
+       } else if (crtc->funcs->page_flip == NULL ||
+                  (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) {
+               return -EINVAL;
+       }
+
+       if (crtc->funcs->page_flip_target)
+               ret = crtc->funcs->page_flip_target(crtc, fb, e,
+                                                   page_flip->flags,
+                                                   target_vblank);
+       else
+               ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
+       if (ret) {
+               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
+                       drm_event_cancel_free(dev, &e->base);
+               /* Keep the old fb, don't unref it. */
+               crtc->primary->old_fb = NULL;
+       } else {
+               crtc->primary->fb = fb;
+               /* Unref only the old framebuffer. */
+               fb = NULL;
+       }
+
+out:
+       if (ret && crtc->funcs->page_flip_target)
+               drm_crtc_vblank_put(crtc);
+       if (crtc->primary->old_fb)
+               drm_framebuffer_unreference(crtc->primary->old_fb);
+       crtc->primary->old_fb = NULL;
+       drm_modeset_unlock_crtc(crtc);
+
+       return ret;
+}
index 16c4a7bd7465800c5cbcd3435d6823829cb41daf..7899fc1dcdb082a7506558d8dc0f40c1d1d40164 100644 (file)
  * the details.
  */
 
-/*
- * This is the minimal list of formats that seem to be safe for modeset use
- * with all current DRM drivers.  Most hardware can actually support more
- * formats than this and drivers may specify a more accurate list when
- * creating the primary plane.  However drivers that still call
- * drm_plane_init() will use this minimal format list as the default.
- */
-static const uint32_t safe_modeset_formats[] = {
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_ARGB8888,
-};
-
 /*
  * Returns the connectors currently associated with a CRTC.  This function
  * should be called twice:  once with a NULL connector list to retrieve
@@ -108,14 +96,9 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
 }
 
 /**
- * drm_plane_helper_check_update() - Check plane update for validity
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @src: source coordinates in 16.16 fixed point
- * @dest: integer destination coordinates
+ * drm_plane_helper_check_state() - Check plane state for validity
+ * @state: plane state to check
  * @clip: integer clipping coordinates
- * @rotation: plane rotation
  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
  * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
  * @can_position: is it legal to position the plane such that it
@@ -123,10 +106,9 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
  *                only be false for primary planes.
  * @can_update_disabled: can the plane be updated while the crtc
  *                       is disabled?
- * @visible: output parameter indicating whether plane is still visible after
- *           clipping
  *
- * Checks that a desired plane update is valid.  Drivers that provide
+ * Checks that a desired plane update is valid, and updates various
+ * bits of derived state (clipped coordinates etc.). Drivers that provide
  * their own plane handling rather than helper-provided implementations may
  * still wish to call this function to avoid duplication of error checking
  * code.
@@ -134,29 +116,38 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
  * RETURNS:
  * Zero if update appears valid, error code on failure
  */
-int drm_plane_helper_check_update(struct drm_plane *plane,
-                                 struct drm_crtc *crtc,
-                                 struct drm_framebuffer *fb,
-                                 struct drm_rect *src,
-                                 struct drm_rect *dest,
-                                 const struct drm_rect *clip,
-                                 unsigned int rotation,
-                                 int min_scale,
-                                 int max_scale,
-                                 bool can_position,
-                                 bool can_update_disabled,
-                                 bool *visible)
+int drm_plane_helper_check_state(struct drm_plane_state *state,
+                                const struct drm_rect *clip,
+                                int min_scale,
+                                int max_scale,
+                                bool can_position,
+                                bool can_update_disabled)
 {
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_rect *src = &state->src;
+       struct drm_rect *dst = &state->dst;
+       unsigned int rotation = state->rotation;
        int hscale, vscale;
 
+       src->x1 = state->src_x;
+       src->y1 = state->src_y;
+       src->x2 = state->src_x + state->src_w;
+       src->y2 = state->src_y + state->src_h;
+
+       dst->x1 = state->crtc_x;
+       dst->y1 = state->crtc_y;
+       dst->x2 = state->crtc_x + state->crtc_w;
+       dst->y2 = state->crtc_y + state->crtc_h;
+
        if (!fb) {
-               *visible = false;
+               state->visible = false;
                return 0;
        }
 
        /* crtc should only be NULL when disabling (i.e., !fb) */
        if (WARN_ON(!crtc)) {
-               *visible = false;
+               state->visible = false;
                return 0;
        }
 
@@ -168,20 +159,20 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
        drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
 
        /* Check scaling */
-       hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
-       vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
+       hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
+       vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
        if (hscale < 0 || vscale < 0) {
                DRM_DEBUG_KMS("Invalid scaling of plane\n");
-               drm_rect_debug_print("src: ", src, true);
-               drm_rect_debug_print("dst: ", dest, false);
+               drm_rect_debug_print("src: ", &state->src, true);
+               drm_rect_debug_print("dst: ", &state->dst, false);
                return -ERANGE;
        }
 
-       *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
+       state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
        drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
 
-       if (!*visible)
+       if (!state->visible)
                /*
                 * Plane isn't visible; some drivers can handle this
                 * so we just return success here.  Drivers that can't
@@ -191,15 +182,87 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
                 */
                return 0;
 
-       if (!can_position && !drm_rect_equals(dest, clip)) {
+       if (!can_position && !drm_rect_equals(dst, clip)) {
                DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
-               drm_rect_debug_print("dst: ", dest, false);
+               drm_rect_debug_print("dst: ", dst, false);
                drm_rect_debug_print("clip: ", clip, false);
                return -EINVAL;
        }
 
        return 0;
 }
+EXPORT_SYMBOL(drm_plane_helper_check_state);
+
+/**
+ * drm_plane_helper_check_update() - Check plane update for validity
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @src: source coordinates in 16.16 fixed point
+ * @dst: integer destination coordinates
+ * @clip: integer clipping coordinates
+ * @rotation: plane rotation
+ * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
+ * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
+ * @can_position: is it legal to position the plane such that it
+ *                doesn't cover the entire crtc?  This will generally
+ *                only be false for primary planes.
+ * @can_update_disabled: can the plane be updated while the crtc
+ *                       is disabled?
+ * @visible: output parameter indicating whether plane is still visible after
+ *           clipping
+ *
+ * Checks that a desired plane update is valid.  Drivers that provide
+ * their own plane handling rather than helper-provided implementations may
+ * still wish to call this function to avoid duplication of error checking
+ * code.
+ *
+ * RETURNS:
+ * Zero if update appears valid, error code on failure
+ */
+int drm_plane_helper_check_update(struct drm_plane *plane,
+                                 struct drm_crtc *crtc,
+                                 struct drm_framebuffer *fb,
+                                 struct drm_rect *src,
+                                 struct drm_rect *dst,
+                                 const struct drm_rect *clip,
+                                 unsigned int rotation,
+                                 int min_scale,
+                                 int max_scale,
+                                 bool can_position,
+                                 bool can_update_disabled,
+                                 bool *visible)
+{
+       struct drm_plane_state state = {
+               .plane = plane,
+               .crtc = crtc,
+               .fb = fb,
+               .src_x = src->x1,
+               .src_y = src->y1,
+               .src_w = drm_rect_width(src),
+               .src_h = drm_rect_height(src),
+               .crtc_x = dst->x1,
+               .crtc_y = dst->y1,
+               .crtc_w = drm_rect_width(dst),
+               .crtc_h = drm_rect_height(dst),
+               .rotation = rotation,
+               .visible = *visible,
+       };
+       int ret;
+
+       ret = drm_plane_helper_check_state(&state, clip,
+                                          min_scale, max_scale,
+                                          can_position,
+                                          can_update_disabled);
+       if (ret)
+               return ret;
+
+       *src = state.src;
+       *dst = state.dst;
+       *visible = state.visible;
+
+       return 0;
+}
 EXPORT_SYMBOL(drm_plane_helper_check_update);
 
 /**
@@ -274,7 +337,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
 
        ret = drm_plane_helper_check_update(plane, crtc, fb,
                                            &src, &dest, &clip,
-                                           BIT(DRM_ROTATE_0),
+                                           DRM_ROTATE_0,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            false, false, &visible);
@@ -363,60 +426,6 @@ const struct drm_plane_funcs drm_primary_helper_funcs = {
 };
 EXPORT_SYMBOL(drm_primary_helper_funcs);
 
-static struct drm_plane *create_primary_plane(struct drm_device *dev)
-{
-       struct drm_plane *primary;
-       int ret;
-
-       primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-       if (primary == NULL) {
-               DRM_DEBUG_KMS("Failed to allocate primary plane\n");
-               return NULL;
-       }
-
-       /*
-        * Remove the format_default field from drm_plane when dropping
-        * this helper.
-        */
-       primary->format_default = true;
-
-       /* possible_crtc's will be filled in later by crtc_init */
-       ret = drm_universal_plane_init(dev, primary, 0,
-                                      &drm_primary_helper_funcs,
-                                      safe_modeset_formats,
-                                      ARRAY_SIZE(safe_modeset_formats),
-                                      DRM_PLANE_TYPE_PRIMARY, NULL);
-       if (ret) {
-               kfree(primary);
-               primary = NULL;
-       }
-
-       return primary;
-}
-
-/**
- * drm_crtc_init - Legacy CRTC initialization function
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @funcs: callbacks for the new CRTC
- *
- * Initialize a CRTC object with a default helper-provided primary plane and no
- * cursor plane.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
-                 const struct drm_crtc_funcs *funcs)
-{
-       struct drm_plane *primary;
-
-       primary = create_primary_plane(dev);
-       return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
-                                        NULL);
-}
-EXPORT_SYMBOL(drm_crtc_init);
-
 int drm_plane_helper_commit(struct drm_plane *plane,
                            struct drm_plane_state *plane_state,
                            struct drm_framebuffer *old_fb)
index 2c819ef90090dee3b8c154d5065fd77041e66f6b..026269851ce9f005e9eca687b2f9146e75bdf4f0 100644 (file)
@@ -48,8 +48,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
        DRM_DEBUG("\n");
 
        dev = drm_dev_alloc(driver, &platdev->dev);
-       if (!dev)
-               return -ENOMEM;
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
 
        dev->platformdev = platdev;
 
index a0df377d7d1c4466a71860db90bced926273eac5..f6b64d7d3528d742a9b36c4551fe7243f38b58d5 100644 (file)
@@ -129,6 +129,7 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
 {
        bool poll = false;
        struct drm_connector *connector;
+       unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
 
        WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
@@ -141,8 +142,13 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
                        poll = true;
        }
 
+       if (dev->mode_config.delayed_event) {
+               poll = true;
+               delay = 0;
+       }
+
        if (poll)
-               schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+               schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
 
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
new file mode 100644 (file)
index 0000000..a4d81cf
--- /dev/null
@@ -0,0 +1,912 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_property.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Properties as represented by &drm_property are used to extend the modeset
+ * interface exposed to userspace. For the atomic modeset IOCTL properties are
+ * even the only way to transport metadata about the desired new modeset
+ * configuration from userspace to the kernel. Properties have a well-defined
+ * value range, which is enforced by the drm core. See the documentation of the
+ * flags member of struct &drm_property for an overview of the different
+ * property types and ranges.
+ *
+ * Properties don't store the current value directly, but need to be
+ * instatiated by attaching them to a &drm_mode_object with
+ * drm_object_attach_property().
+ *
+ * Property values are only 64bit. To support bigger piles of data (like gamma
+ * tables, color correction matrizes or large structures) a property can instead
+ * point at a &drm_property_blob with that additional data
+ *
+ * Properties are defined by their symbolic name, userspace must keep a
+ * per-object mapping from those names to the property ID used in the atomic
+ * IOCTL and in the get/set property IOCTL.
+ */
+
+static bool drm_property_type_valid(struct drm_property *property)
+{
+       if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+               return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+       return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
+/**
+ * drm_property_create - create a new property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                        const char *name, int num_values)
+{
+       struct drm_property *property = NULL;
+       int ret;
+
+       property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+       if (!property)
+               return NULL;
+
+       property->dev = dev;
+
+       if (num_values) {
+               property->values = kcalloc(num_values, sizeof(uint64_t),
+                                          GFP_KERNEL);
+               if (!property->values)
+                       goto fail;
+       }
+
+       ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+       if (ret)
+               goto fail;
+
+       property->flags = flags;
+       property->num_values = num_values;
+       INIT_LIST_HEAD(&property->enum_list);
+
+       if (name) {
+               strncpy(property->name, name, DRM_PROP_NAME_LEN);
+               property->name[DRM_PROP_NAME_LEN-1] = '\0';
+       }
+
+       list_add_tail(&property->head, &dev->mode_config.property_list);
+
+       WARN_ON(!drm_property_type_valid(property));
+
+       return property;
+fail:
+       kfree(property->values);
+       kfree(property);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+/**
+ * drm_property_create_enum - create a new enumeration property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property values
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is only allowed to set one of the predefined values for enumeration
+ * properties.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+                                        const char *name,
+                                        const struct drm_prop_enum_list *props,
+                                        int num_values)
+{
+       struct drm_property *property;
+       int i, ret;
+
+       flags |= DRM_MODE_PROP_ENUM;
+
+       property = drm_property_create(dev, flags, name, num_values);
+       if (!property)
+               return NULL;
+
+       for (i = 0; i < num_values; i++) {
+               ret = drm_property_add_enum(property, i,
+                                     props[i].type,
+                                     props[i].name);
+               if (ret) {
+                       drm_property_destroy(dev, property);
+                       return NULL;
+               }
+       }
+
+       return property;
+}
+EXPORT_SYMBOL(drm_property_create_enum);
+
+/**
+ * drm_property_create_bitmask - create a new bitmask property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property bitflags
+ * @num_props: size of the @props array
+ * @supported_bits: bitmask of all supported enumeration values
+ *
+ * This creates a new bitmask drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Compared to plain enumeration properties userspace is allowed to set any
+ * or'ed together combination of the predefined property bitflag values
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+                                        int flags, const char *name,
+                                        const struct drm_prop_enum_list *props,
+                                        int num_props,
+                                        uint64_t supported_bits)
+{
+       struct drm_property *property;
+       int i, ret, index = 0;
+       int num_values = hweight64(supported_bits);
+
+       flags |= DRM_MODE_PROP_BITMASK;
+
+       property = drm_property_create(dev, flags, name, num_values);
+       if (!property)
+               return NULL;
+       for (i = 0; i < num_props; i++) {
+               if (!(supported_bits & (1ULL << props[i].type)))
+                       continue;
+
+               if (WARN_ON(index >= num_values)) {
+                       drm_property_destroy(dev, property);
+                       return NULL;
+               }
+
+               ret = drm_property_add_enum(property, index++,
+                                     props[i].type,
+                                     props[i].name);
+               if (ret) {
+                       drm_property_destroy(dev, property);
+                       return NULL;
+               }
+       }
+
+       return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
+static struct drm_property *property_create_range(struct drm_device *dev,
+                                        int flags, const char *name,
+                                        uint64_t min, uint64_t max)
+{
+       struct drm_property *property;
+
+       property = drm_property_create(dev, flags, name, 2);
+       if (!property)
+               return NULL;
+
+       property->values[0] = min;
+       property->values[1] = max;
+
+       return property;
+}
+
+/**
+ * drm_property_create_range - create a new unsigned ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is allowed to set any unsigned integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+                                        const char *name,
+                                        uint64_t min, uint64_t max)
+{
+       return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
+                       name, min, max);
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+/**
+ * drm_property_create_signed_range - create a new signed ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is allowed to set any signed integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+                                        int flags, const char *name,
+                                        int64_t min, int64_t max)
+{
+       return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
+                       name, I642U64(min), I642U64(max));
+}
+EXPORT_SYMBOL(drm_property_create_signed_range);
+
+/**
+ * drm_property_create_object - create a new object property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @type: object type from DRM_MODE_OBJECT_* defines
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is only allowed to set this to any property value of the given
+ * @type. Only useful for atomic properties, which is enforced.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+                                               int flags, const char *name,
+                                               uint32_t type)
+{
+       struct drm_property *property;
+
+       flags |= DRM_MODE_PROP_OBJECT;
+
+       if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
+               return NULL;
+
+       property = drm_property_create(dev, flags, name, 1);
+       if (!property)
+               return NULL;
+
+       property->values[0] = type;
+
+       return property;
+}
+EXPORT_SYMBOL(drm_property_create_object);
+
+/**
+ * drm_property_create_bool - create a new boolean property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * This is implemented as a ranged property with only {0, 1} as valid values.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+                                             const char *name)
+{
+       return drm_property_create_range(dev, flags, name, 0, 1);
+}
+EXPORT_SYMBOL(drm_property_create_bool);
+
+/**
+ * drm_property_add_enum - add a possible value to an enumeration property
+ * @property: enumeration property to change
+ * @index: index of the new enumeration
+ * @value: value of the new enumeration
+ * @name: symbolic name of the new enumeration
+ *
+ * This functions adds enumerations to a property.
+ *
+ * It's use is deprecated, drivers should use one of the more specific helpers
+ * to directly create the property with all enumerations already attached.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_property_add_enum(struct drm_property *property, int index,
+                         uint64_t value, const char *name)
+{
+       struct drm_property_enum *prop_enum;
+
+       if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+                       drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
+               return -EINVAL;
+
+       /*
+        * Bitmask enum properties have the additional constraint of values
+        * from 0 to 63
+        */
+       if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
+                       (value > 63))
+               return -EINVAL;
+
+       if (!list_empty(&property->enum_list)) {
+               list_for_each_entry(prop_enum, &property->enum_list, head) {
+                       if (prop_enum->value == value) {
+                               strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+                               prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+                               return 0;
+                       }
+               }
+       }
+
+       prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+       if (!prop_enum)
+               return -ENOMEM;
+
+       strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+       prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+       prop_enum->value = value;
+
+       property->values[index] = value;
+       list_add_tail(&prop_enum->head, &property->enum_list);
+       return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+/**
+ * drm_property_destroy - destroy a drm property
+ * @dev: drm device
+ * @property: property to destry
+ *
+ * This function frees a property including any attached resources like
+ * enumeration values.
+ */
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+       struct drm_property_enum *prop_enum, *pt;
+
+       list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
+               list_del(&prop_enum->head);
+               kfree(prop_enum);
+       }
+
+       if (property->num_values)
+               kfree(property->values);
+       drm_mode_object_unregister(dev, &property->base);
+       list_del(&property->head);
+       kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_get_property *out_resp = data;
+       struct drm_property *property;
+       int enum_count = 0;
+       int value_count = 0;
+       int ret = 0, i;
+       int copied;
+       struct drm_property_enum *prop_enum;
+       struct drm_mode_property_enum __user *enum_ptr;
+       uint64_t __user *values_ptr;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       drm_modeset_lock_all(dev);
+       property = drm_property_find(dev, out_resp->prop_id);
+       if (!property) {
+               ret = -ENOENT;
+               goto done;
+       }
+
+       if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+                       drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+               list_for_each_entry(prop_enum, &property->enum_list, head)
+                       enum_count++;
+       }
+
+       value_count = property->num_values;
+
+       strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+       out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+       out_resp->flags = property->flags;
+
+       if ((out_resp->count_values >= value_count) && value_count) {
+               values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
+               for (i = 0; i < value_count; i++) {
+                       if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+       out_resp->count_values = value_count;
+
+       if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+                       drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+               if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+                       copied = 0;
+                       enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+                       list_for_each_entry(prop_enum, &property->enum_list, head) {
+
+                               if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (copy_to_user(&enum_ptr[copied].name,
+                                                &prop_enum->name, DRM_PROP_NAME_LEN)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = enum_count;
+       }
+
+       /*
+        * NOTE: The idea seems to have been to use this to read all the blob
+        * property values. But nothing ever added them to the corresponding
+        * list, userspace always used the special-purpose get_blob ioctl to
+        * read the value for a blob property. It also doesn't make a lot of
+        * sense to return values here when everything else is just metadata for
+        * the property itself.
+        */
+       if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+               out_resp->count_enum_blobs = 0;
+done:
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
+
+static void drm_property_free_blob(struct kref *kref)
+{
+       struct drm_property_blob *blob =
+               container_of(kref, struct drm_property_blob, base.refcount);
+
+       mutex_lock(&blob->dev->mode_config.blob_lock);
+       list_del(&blob->head_global);
+       mutex_unlock(&blob->dev->mode_config.blob_lock);
+
+       drm_mode_object_unregister(blob->dev, &blob->base);
+
+       kfree(blob);
+}
+
+/**
+ * drm_property_create_blob - Create new blob property
+ * @dev: DRM device to create property for
+ * @length: Length to allocate for blob data
+ * @data: If specified, copies data into blob
+ *
+ * Creates a new blob property for a specified DRM device, optionally
+ * copying data. Note that blob properties are meant to be invariant, hence the
+ * data must be filled out before the blob is used as the value of any property.
+ *
+ * Returns:
+ * New blob property with a single reference on success, or an ERR_PTR
+ * value on failure.
+ */
+struct drm_property_blob *
+drm_property_create_blob(struct drm_device *dev, size_t length,
+                        const void *data)
+{
+       struct drm_property_blob *blob;
+       int ret;
+
+       if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+               return ERR_PTR(-EINVAL);
+
+       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+       if (!blob)
+               return ERR_PTR(-ENOMEM);
+
+       /* This must be explicitly initialised, so we can safely call list_del
+        * on it in the removal handler, even if it isn't in a file list. */
+       INIT_LIST_HEAD(&blob->head_file);
+       blob->length = length;
+       blob->dev = dev;
+
+       if (data)
+               memcpy(blob->data, data, length);
+
+       ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
+                                     true, drm_property_free_blob);
+       if (ret) {
+               kfree(blob);
+               return ERR_PTR(-EINVAL);
+       }
+
+       mutex_lock(&dev->mode_config.blob_lock);
+       list_add_tail(&blob->head_global,
+                     &dev->mode_config.property_blob_list);
+       mutex_unlock(&dev->mode_config.blob_lock);
+
+       return blob;
+}
+EXPORT_SYMBOL(drm_property_create_blob);
+
+/**
+ * drm_property_unreference_blob - Unreference a blob property
+ * @blob: Pointer to blob property
+ *
+ * Drop a reference on a blob property. May free the object.
+ */
+void drm_property_unreference_blob(struct drm_property_blob *blob)
+{
+       if (!blob)
+               return;
+
+       drm_mode_object_unreference(&blob->base);
+}
+EXPORT_SYMBOL(drm_property_unreference_blob);
+
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+                                    struct drm_file *file_priv)
+{
+       struct drm_property_blob *blob, *bt;
+
+       /*
+        * When the file gets released that means no one else can access the
+        * blob list any more, so no need to grab dev->blob_lock.
+        */
+       list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
+               list_del_init(&blob->head_file);
+               drm_property_unreference_blob(blob);
+       }
+}
+
+/**
+ * drm_property_reference_blob - Take a reference on an existing property
+ * @blob: Pointer to blob property
+ *
+ * Take a new reference on an existing blob property. Returns @blob, which
+ * allows this to be used as a shorthand in assignments.
+ */
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
+{
+       drm_mode_object_reference(&blob->base);
+       return blob;
+}
+EXPORT_SYMBOL(drm_property_reference_blob);
+
+/**
+ * drm_property_lookup_blob - look up a blob property and take a reference
+ * @dev: drm device
+ * @id: id of the blob property
+ *
+ * If successful, this takes an additional reference to the blob property.
+ * callers need to make sure to eventually unreference the returned property
+ * again, using @drm_property_unreference_blob.
+ *
+ * Return:
+ * NULL on failure, pointer to the blob on success.
+ */
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+                                                  uint32_t id)
+{
+       struct drm_mode_object *obj;
+       struct drm_property_blob *blob = NULL;
+
+       obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+       if (obj)
+               blob = obj_to_blob(obj);
+       return blob;
+}
+EXPORT_SYMBOL(drm_property_lookup_blob);
+
+/**
+ * drm_property_replace_global_blob - replace existing blob property
+ * @dev: drm device
+ * @replace: location of blob property pointer to be replaced
+ * @length: length of data for new blob, or 0 for no data
+ * @data: content for new blob, or NULL for no data
+ * @obj_holds_id: optional object for property holding blob ID
+ * @prop_holds_id: optional property holding blob ID
+ * @return 0 on success or error on failure
+ *
+ * This function will replace a global property in the blob list, optionally
+ * updating a property which holds the ID of that property.
+ *
+ * If length is 0 or data is NULL, no new blob will be created, and the holding
+ * property, if specified, will be set to 0.
+ *
+ * Access to the replace pointer is assumed to be protected by the caller, e.g.
+ * by holding the relevant modesetting object lock for its parent.
+ *
+ * For example, a drm_connector has a 'PATH' property, which contains the ID
+ * of a blob property with the value of the MST path information. Calling this
+ * function with replace pointing to the connector's path_blob_ptr, length and
+ * data set for the new path information, obj_holds_id set to the connector's
+ * base object, and prop_holds_id set to the path property name, will perform
+ * a completely atomic update. The access to path_blob_ptr is protected by the
+ * caller holding a lock on the connector.
+ */
+int drm_property_replace_global_blob(struct drm_device *dev,
+                                    struct drm_property_blob **replace,
+                                    size_t length,
+                                    const void *data,
+                                    struct drm_mode_object *obj_holds_id,
+                                    struct drm_property *prop_holds_id)
+{
+       struct drm_property_blob *new_blob = NULL;
+       struct drm_property_blob *old_blob = NULL;
+       int ret;
+
+       WARN_ON(replace == NULL);
+
+       old_blob = *replace;
+
+       if (length && data) {
+               new_blob = drm_property_create_blob(dev, length, data);
+               if (IS_ERR(new_blob))
+                       return PTR_ERR(new_blob);
+       }
+
+       if (obj_holds_id) {
+               ret = drm_object_property_set_value(obj_holds_id,
+                                                   prop_holds_id,
+                                                   new_blob ?
+                                                       new_blob->base.id : 0);
+               if (ret != 0)
+                       goto err_created;
+       }
+
+       drm_property_unreference_blob(old_blob);
+       *replace = new_blob;
+
+       return 0;
+
+err_created:
+       drm_property_unreference_blob(new_blob);
+       return ret;
+}
+EXPORT_SYMBOL(drm_property_replace_global_blob);
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_get_blob *out_resp = data;
+       struct drm_property_blob *blob;
+       int ret = 0;
+       void __user *blob_ptr;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+       if (!blob)
+               return -ENOENT;
+
+       if (out_resp->length == blob->length) {
+               blob_ptr = (void __user *)(unsigned long)out_resp->data;
+               if (copy_to_user(blob_ptr, blob->data, blob->length)) {
+                       ret = -EFAULT;
+                       goto unref;
+               }
+       }
+       out_resp->length = blob->length;
+unref:
+       drm_property_unreference_blob(blob);
+
+       return ret;
+}
+
+int drm_mode_createblob_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_create_blob *out_resp = data;
+       struct drm_property_blob *blob;
+       void __user *blob_ptr;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       blob = drm_property_create_blob(dev, out_resp->length, NULL);
+       if (IS_ERR(blob))
+               return PTR_ERR(blob);
+
+       blob_ptr = (void __user *)(unsigned long)out_resp->data;
+       if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
+               ret = -EFAULT;
+               goto out_blob;
+       }
+
+       /* Dropping the lock between create_blob and our access here is safe
+        * as only the same file_priv can remove the blob; at this point, it is
+        * not associated with any file_priv. */
+       mutex_lock(&dev->mode_config.blob_lock);
+       out_resp->blob_id = blob->base.id;
+       list_add_tail(&blob->head_file, &file_priv->blobs);
+       mutex_unlock(&dev->mode_config.blob_lock);
+
+       return 0;
+
+out_blob:
+       drm_property_unreference_blob(blob);
+       return ret;
+}
+
+int drm_mode_destroyblob_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_destroy_blob *out_resp = data;
+       struct drm_property_blob *blob = NULL, *bt;
+       bool found = false;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+       if (!blob)
+               return -ENOENT;
+
+       mutex_lock(&dev->mode_config.blob_lock);
+       /* Ensure the property was actually created by this user. */
+       list_for_each_entry(bt, &file_priv->blobs, head_file) {
+               if (bt == blob) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               ret = -EPERM;
+               goto err;
+       }
+
+       /* We must drop head_file here, because we may not be the last
+        * reference on the blob. */
+       list_del_init(&blob->head_file);
+       mutex_unlock(&dev->mode_config.blob_lock);
+
+       /* One reference from lookup, and one from the filp. */
+       drm_property_unreference_blob(blob);
+       drm_property_unreference_blob(blob);
+
+       return 0;
+
+err:
+       mutex_unlock(&dev->mode_config.blob_lock);
+       drm_property_unreference_blob(blob);
+
+       return ret;
+}
+
+/* Some properties could refer to dynamic refcnt'd objects, or things that
+ * need special locking to handle lifetime issues (ie. to ensure the prop
+ * value doesn't become invalid part way through the property update due to
+ * race).  The value returned by reference via 'obj' should be passed back
+ * to drm_property_change_valid_put() after the property is set (and the
+ * object to which the property is attached has a chance to take it's own
+ * reference).
+ */
+bool drm_property_change_valid_get(struct drm_property *property,
+                                  uint64_t value, struct drm_mode_object **ref)
+{
+       int i;
+
+       if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+               return false;
+
+       *ref = NULL;
+
+       if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
+               if (value < property->values[0] || value > property->values[1])
+                       return false;
+               return true;
+       } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
+               int64_t svalue = U642I64(value);
+
+               if (svalue < U642I64(property->values[0]) ||
+                               svalue > U642I64(property->values[1]))
+                       return false;
+               return true;
+       } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+               uint64_t valid_mask = 0;
+
+               for (i = 0; i < property->num_values; i++)
+                       valid_mask |= (1ULL << property->values[i]);
+               return !(value & ~valid_mask);
+       } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
+               struct drm_property_blob *blob;
+
+               if (value == 0)
+                       return true;
+
+               blob = drm_property_lookup_blob(property->dev, value);
+               if (blob) {
+                       *ref = &blob->base;
+                       return true;
+               } else {
+                       return false;
+               }
+       } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+               /* a zero value for an object property translates to null: */
+               if (value == 0)
+                       return true;
+
+               *ref = __drm_mode_object_find(property->dev, value,
+                                             property->values[0]);
+               return *ref != NULL;
+       }
+
+       for (i = 0; i < property->num_values; i++)
+               if (property->values[i] == value)
+                       return true;
+       return false;
+}
+
+void drm_property_change_valid_put(struct drm_property *property,
+               struct drm_mode_object *ref)
+{
+       if (!ref)
+               return;
+
+       if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+               drm_mode_object_unreference(ref);
+       } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+               drm_property_unreference_blob(obj_to_blob(ref));
+}
index a8e2c86039451812237c94037a5ab7001de6d886..73e53a8d1b37c08a64ea862dacf51739fe85c15a 100644 (file)
@@ -100,7 +100,7 @@ static int drm_calc_scale(int src, int dst)
 {
        int scale = 0;
 
-       if (src < 0 || dst < 0)
+       if (WARN_ON(src < 0 || dst < 0))
                return -EINVAL;
 
        if (dst == 0)
@@ -317,38 +317,38 @@ void drm_rect_rotate(struct drm_rect *r,
 {
        struct drm_rect tmp;
 
-       if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+       if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) {
                tmp = *r;
 
-               if (rotation & BIT(DRM_REFLECT_X)) {
+               if (rotation & DRM_REFLECT_X) {
                        r->x1 = width - tmp.x2;
                        r->x2 = width - tmp.x1;
                }
 
-               if (rotation & BIT(DRM_REFLECT_Y)) {
+               if (rotation & DRM_REFLECT_Y) {
                        r->y1 = height - tmp.y2;
                        r->y2 = height - tmp.y1;
                }
        }
 
        switch (rotation & DRM_ROTATE_MASK) {
-       case BIT(DRM_ROTATE_0):
+       case DRM_ROTATE_0:
                break;
-       case BIT(DRM_ROTATE_90):
+       case DRM_ROTATE_90:
                tmp = *r;
                r->x1 = tmp.y1;
                r->x2 = tmp.y2;
                r->y1 = width - tmp.x2;
                r->y2 = width - tmp.x1;
                break;
-       case BIT(DRM_ROTATE_180):
+       case DRM_ROTATE_180:
                tmp = *r;
                r->x1 = width - tmp.x2;
                r->x2 = width - tmp.x1;
                r->y1 = height - tmp.y2;
                r->y2 = height - tmp.y1;
                break;
-       case BIT(DRM_ROTATE_270):
+       case DRM_ROTATE_270:
                tmp = *r;
                r->x1 = height - tmp.y2;
                r->x2 = height - tmp.y1;
@@ -392,23 +392,23 @@ void drm_rect_rotate_inv(struct drm_rect *r,
        struct drm_rect tmp;
 
        switch (rotation & DRM_ROTATE_MASK) {
-       case BIT(DRM_ROTATE_0):
+       case DRM_ROTATE_0:
                break;
-       case BIT(DRM_ROTATE_90):
+       case DRM_ROTATE_90:
                tmp = *r;
                r->x1 = width - tmp.y2;
                r->x2 = width - tmp.y1;
                r->y1 = tmp.x1;
                r->y2 = tmp.x2;
                break;
-       case BIT(DRM_ROTATE_180):
+       case DRM_ROTATE_180:
                tmp = *r;
                r->x1 = width - tmp.x2;
                r->x2 = width - tmp.x1;
                r->y1 = height - tmp.y2;
                r->y2 = height - tmp.y1;
                break;
-       case BIT(DRM_ROTATE_270):
+       case DRM_ROTATE_270:
                tmp = *r;
                r->x1 = tmp.y1;
                r->x2 = tmp.y2;
@@ -419,15 +419,15 @@ void drm_rect_rotate_inv(struct drm_rect *r,
                break;
        }
 
-       if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+       if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) {
                tmp = *r;
 
-               if (rotation & BIT(DRM_REFLECT_X)) {
+               if (rotation & DRM_REFLECT_X) {
                        r->x1 = width - tmp.x2;
                        r->x2 = width - tmp.x1;
                }
 
-               if (rotation & BIT(DRM_REFLECT_Y)) {
+               if (rotation & DRM_REFLECT_Y) {
                        r->y1 = height - tmp.y2;
                        r->y2 = height - tmp.y1;
                }
index bf70431073f654a5ec52d4276c8fcb33f306ce04..275bca44f38c2fa118fb2ac78530ef08e0f523f0 100644 (file)
@@ -68,7 +68,7 @@ static void drm_sg_cleanup(struct drm_sg_mem * entry)
 void drm_legacy_sg_cleanup(struct drm_device *dev)
 {
        if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
-           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+           drm_core_check_feature(dev, DRIVER_LEGACY)) {
                drm_sg_cleanup(dev->sg);
                dev->sg = NULL;
        }
@@ -88,7 +88,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
 
        DRM_DEBUG("\n");
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (!drm_core_check_feature(dev, DRIVER_SG))
@@ -201,7 +201,7 @@ int drm_legacy_sg_free(struct drm_device *dev, void *data,
        struct drm_scatter_gather *request = data;
        struct drm_sg_mem *entry;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_LEGACY))
                return -EINVAL;
 
        if (!drm_core_check_feature(dev, DRIVER_SG))
index 0db36d27e90b009292e38d57221d2f173574720a..7b6d26e64977b65473a2348f000a94707db20589 100644 (file)
@@ -34,6 +34,12 @@ static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
        .destroy = drm_encoder_cleanup,
 };
 
+static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *state)
+{
+       return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
 static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_simple_display_pipe *pipe;
@@ -57,6 +63,7 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
+       .atomic_check = drm_simple_kms_crtc_check,
        .disable = drm_simple_kms_crtc_disable,
        .enable = drm_simple_kms_crtc_enable,
 };
@@ -73,22 +80,9 @@ static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
 static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
                                        struct drm_plane_state *plane_state)
 {
-       struct drm_rect src = {
-               .x1 = plane_state->src_x,
-               .y1 = plane_state->src_y,
-               .x2 = plane_state->src_x + plane_state->src_w,
-               .y2 = plane_state->src_y + plane_state->src_h,
-       };
-       struct drm_rect dest = {
-               .x1 = plane_state->crtc_x,
-               .y1 = plane_state->crtc_y,
-               .x2 = plane_state->crtc_x + plane_state->crtc_w,
-               .y2 = plane_state->crtc_y + plane_state->crtc_h,
-       };
        struct drm_rect clip = { 0 };
        struct drm_simple_display_pipe *pipe;
        struct drm_crtc_state *crtc_state;
-       bool visible;
        int ret;
 
        pipe = container_of(plane, struct drm_simple_display_pipe, plane);
@@ -102,17 +96,15 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
 
        clip.x2 = crtc_state->adjusted_mode.hdisplay;
        clip.y2 = crtc_state->adjusted_mode.vdisplay;
-       ret = drm_plane_helper_check_update(plane, &pipe->crtc,
-                                           plane_state->fb,
-                                           &src, &dest, &clip,
-                                           plane_state->rotation,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           false, true, &visible);
+
+       ret = drm_plane_helper_check_state(plane_state, &clip,
+                                          DRM_PLANE_HELPER_NO_SCALING,
+                                          DRM_PLANE_HELPER_NO_SCALING,
+                                          false, true);
        if (ret)
                return ret;
 
-       if (!visible)
+       if (!plane_state->visible)
                return -EINVAL;
 
        if (!pipe->funcs || !pipe->funcs->check)
@@ -147,17 +139,62 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
        .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
 };
 
+/**
+ * drm_simple_display_pipe_attach_bridge - Attach a bridge to the display pipe
+ * @pipe: simple display pipe object
+ * @bridge: bridge to attach
+ *
+ * Makes it possible to still use the drm_simple_display_pipe helpers when
+ * a DRM bridge has to be used.
+ *
+ * Note that you probably want to initialize the pipe by passing a NULL
+ * connector to drm_simple_display_pipe_init().
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
+                                         struct drm_bridge *bridge)
+{
+       bridge->encoder = &pipe->encoder;
+       pipe->encoder.bridge = bridge;
+       return drm_bridge_attach(pipe->encoder.dev, bridge);
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
+
+/**
+ * drm_simple_display_pipe_detach_bridge - Detach the bridge from the display pipe
+ * @pipe: simple display pipe object
+ *
+ * Detaches the drm bridge previously attached with
+ * drm_simple_display_pipe_attach_bridge()
+ */
+void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe)
+{
+       if (WARN_ON(!pipe->encoder.bridge))
+               return;
+
+       drm_bridge_detach(pipe->encoder.bridge);
+       pipe->encoder.bridge = NULL;
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_detach_bridge);
+
 /**
  * drm_simple_display_pipe_init - Initialize a simple display pipeline
  * @dev: DRM device
  * @pipe: simple display pipe object to initialize
  * @funcs: callbacks for the display pipe (optional)
- * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
  * @format_count: number of elements in @formats
- * @connector: connector to attach and register
+ * @connector: connector to attach and register (optional)
  *
  * Sets up a display pipeline which consist of a really simple
- * plane-crtc-encoder pipe coupled with the provided connector.
+ * plane-crtc-encoder pipe.
+ *
+ * If a connector is supplied, the pipe will be coupled with the provided
+ * connector. You may supply a NULL connector when using drm bridges, that
+ * handle connectors themselves (see drm_simple_display_pipe_attach_bridge()).
+ *
  * Teardown of a simple display pipe is all handled automatically by the drm
  * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
  * release the memory for the structure themselves.
@@ -196,7 +233,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
        encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
        ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
                               DRM_MODE_ENCODER_NONE, NULL);
-       if (ret)
+       if (ret || !connector)
                return ret;
 
        return drm_mode_connector_attach_encoder(connector, encoder);
index 32dd821b7202a5492a5c7b184c775c329e7e2fb1..9a37196c1bf1728b7ddbc25f0e66937376cd09bc 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/export.h>
 
 #include <drm/drm_sysfs.h>
-#include <drm/drm_core.h>
 #include <drm/drmP.h>
 #include "drm_internal.h"
 
@@ -37,12 +36,7 @@ static char *drm_devnode(struct device *dev, umode_t *mode)
        return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
 }
 
-static CLASS_ATTR_STRING(version, S_IRUGO,
-               CORE_NAME " "
-               __stringify(CORE_MAJOR) "."
-               __stringify(CORE_MINOR) "."
-               __stringify(CORE_PATCHLEVEL) " "
-               CORE_DATE);
+static CLASS_ATTR_STRING(version, S_IRUGO, "drm 1.1.0 20060810");
 
 /**
  * drm_sysfs_init - initialize sysfs helpers
index f306c885597869051eafd3924fb312bf78c6cf49..20cc33d1bfc1869201593ae78bda3b3f691fd232 100644 (file)
@@ -25,7 +25,6 @@
 #include <drm/drmP.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_vma_manager.h>
-#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/rbtree.h>
@@ -86,7 +85,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
                                 unsigned long page_offset, unsigned long size)
 {
        rwlock_init(&mgr->vm_lock);
-       mgr->vm_addr_space_rb = RB_ROOT;
        drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
 }
 EXPORT_SYMBOL(drm_vma_offset_manager_init);
@@ -145,16 +143,16 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
                                                         unsigned long start,
                                                         unsigned long pages)
 {
-       struct drm_vma_offset_node *node, *best;
+       struct drm_mm_node *node, *best;
        struct rb_node *iter;
        unsigned long offset;
 
-       iter = mgr->vm_addr_space_rb.rb_node;
+       iter = mgr->vm_addr_space_mm.interval_tree.rb_node;
        best = NULL;
 
        while (likely(iter)) {
-               node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
-               offset = node->vm_node.start;
+               node = rb_entry(iter, struct drm_mm_node, rb);
+               offset = node->start;
                if (start >= offset) {
                        iter = iter->rb_right;
                        best = node;
@@ -167,38 +165,17 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
 
        /* verify that the node spans the requested area */
        if (best) {
-               offset = best->vm_node.start + best->vm_node.size;
+               offset = best->start + best->size;
                if (offset < start + pages)
                        best = NULL;
        }
 
-       return best;
-}
-EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
-
-/* internal helper to link @node into the rb-tree */
-static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
-                                  struct drm_vma_offset_node *node)
-{
-       struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
-       struct rb_node *parent = NULL;
-       struct drm_vma_offset_node *iter_node;
-
-       while (likely(*iter)) {
-               parent = *iter;
-               iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+       if (!best)
+               return NULL;
 
-               if (node->vm_node.start < iter_node->vm_node.start)
-                       iter = &(*iter)->rb_left;
-               else if (node->vm_node.start > iter_node->vm_node.start)
-                       iter = &(*iter)->rb_right;
-               else
-                       BUG();
-       }
-
-       rb_link_node(&node->vm_rb, parent, iter);
-       rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+       return container_of(best, struct drm_vma_offset_node, vm_node);
 }
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
 
 /**
  * drm_vma_offset_add() - Add offset node to manager
@@ -240,8 +217,6 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
        if (ret)
                goto out_unlock;
 
-       _drm_vma_offset_add_rb(mgr, node);
-
 out_unlock:
        write_unlock(&mgr->vm_lock);
        return ret;
@@ -265,7 +240,6 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
        write_lock(&mgr->vm_lock);
 
        if (drm_mm_node_allocated(&node->vm_node)) {
-               rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
                drm_mm_remove_node(&node->vm_node);
                memset(&node->vm_node, 0, sizeof(node->vm_node));
        }
@@ -277,9 +251,9 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
 /**
  * drm_vma_node_allow - Add open-file to list of allowed users
  * @node: Node to modify
- * @filp: Open file to add
+ * @tag: Tag of file to remove
  *
- * Add @filp to the list of allowed open-files for this node. If @filp is
+ * Add @tag to the list of allowed open-files for this node. If @tag is
  * already on this list, the ref-count is incremented.
  *
  * The list of allowed-users is preserved across drm_vma_offset_add() and
@@ -294,7 +268,7 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
  * RETURNS:
  * 0 on success, negative error code on internal failure (out-of-mem)
  */
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
 {
        struct rb_node **iter;
        struct rb_node *parent = NULL;
@@ -315,10 +289,10 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
                parent = *iter;
                entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
 
-               if (filp == entry->vm_filp) {
+               if (tag == entry->vm_tag) {
                        entry->vm_count++;
                        goto unlock;
-               } else if (filp > entry->vm_filp) {
+               } else if (tag > entry->vm_tag) {
                        iter = &(*iter)->rb_right;
                } else {
                        iter = &(*iter)->rb_left;
@@ -330,7 +304,7 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
                goto unlock;
        }
 
-       new->vm_filp = filp;
+       new->vm_tag = tag;
        new->vm_count = 1;
        rb_link_node(&new->vm_rb, parent, iter);
        rb_insert_color(&new->vm_rb, &node->vm_files);
@@ -346,17 +320,18 @@ EXPORT_SYMBOL(drm_vma_node_allow);
 /**
  * drm_vma_node_revoke - Remove open-file from list of allowed users
  * @node: Node to modify
- * @filp: Open file to remove
+ * @tag: Tag of file to remove
  *
- * Decrement the ref-count of @filp in the list of allowed open-files on @node.
- * If the ref-count drops to zero, remove @filp from the list. You must call
- * this once for every drm_vma_node_allow() on @filp.
+ * Decrement the ref-count of @tag in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @tag from the list. You must call
+ * this once for every drm_vma_node_allow() on @tag.
  *
  * This is locked against concurrent access internally.
  *
- * If @filp is not on the list, nothing is done.
+ * If @tag is not on the list, nothing is done.
  */
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+                        struct drm_file *tag)
 {
        struct drm_vma_offset_file *entry;
        struct rb_node *iter;
@@ -366,13 +341,13 @@ void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
        iter = node->vm_files.rb_node;
        while (likely(iter)) {
                entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
-               if (filp == entry->vm_filp) {
+               if (tag == entry->vm_tag) {
                        if (!--entry->vm_count) {
                                rb_erase(&entry->vm_rb, &node->vm_files);
                                kfree(entry);
                        }
                        break;
-               } else if (filp > entry->vm_filp) {
+               } else if (tag > entry->vm_tag) {
                        iter = iter->rb_right;
                } else {
                        iter = iter->rb_left;
@@ -386,9 +361,9 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
 /**
  * drm_vma_node_is_allowed - Check whether an open-file is granted access
  * @node: Node to check
- * @filp: Open-file to check for
+ * @tag: Tag of file to remove
  *
- * Search the list in @node whether @filp is currently on the list of allowed
+ * Search the list in @node whether @tag is currently on the list of allowed
  * open-files (see drm_vma_node_allow()).
  *
  * This is locked against concurrent access internally.
@@ -397,7 +372,7 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
  * true iff @filp is on the list
  */
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
-                            struct file *filp)
+                            struct drm_file *tag)
 {
        struct drm_vma_offset_file *entry;
        struct rb_node *iter;
@@ -407,9 +382,9 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
        iter = node->vm_files.rb_node;
        while (likely(iter)) {
                entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
-               if (filp == entry->vm_filp)
+               if (tag == entry->vm_tag)
                        break;
-               else if (filp > entry->vm_filp)
+               else if (tag > entry->vm_tag)
                        iter = iter->rb_right;
                else
                        iter = iter->rb_left;
index d8d5564574276421a21e032d6b85beff32e3f41e..cb86c7e5495c58b5a855cede81f20e387d5d6ec0 100644 (file)
@@ -21,6 +21,7 @@
 
 #include "common.xml.h"
 #include "state.xml.h"
+#include "state_hi.xml.h"
 #include "state_3d.xml.h"
 #include "cmdstream.xml.h"
 
@@ -117,11 +118,6 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
                       VIVS_GL_PIPE_SELECT_PIPE(pipe));
 }
 
-static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
-{
-       return buf->paddr - gpu->memory_base;
-}
-
 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
        struct etnaviv_cmdbuf *buf, u32 off, u32 len)
 {
@@ -129,7 +125,7 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
        u32 *ptr = buf->vaddr + off;
 
        dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
-                       ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
+                       ptr, etnaviv_iommu_get_cmdbuf_va(gpu, buf) + off, size - len * 4 - off);
 
        print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
                        ptr, len * 4, 0);
@@ -162,7 +158,7 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
        if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
                buffer->user_size = 0;
 
-       return gpu_va(gpu, buffer) + buffer->user_size;
+       return etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + buffer->user_size;
 }
 
 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@@ -173,7 +169,41 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
        buffer->user_size = 0;
 
        CMD_WAIT(buffer);
-       CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
+       CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+                buffer->user_size - 4);
+
+       return buffer->user_size / 8;
+}
+
+u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
+{
+       struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+       buffer->user_size = 0;
+
+       if (gpu->identity.features & chipFeatures_PIPE_3D) {
+               CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+                              VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
+               CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+                       mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
+               CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
+               CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+               CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+       }
+
+       if (gpu->identity.features & chipFeatures_PIPE_2D) {
+               CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+                              VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
+               CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+                       mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
+               CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
+               CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+               CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+       }
+
+       CMD_END(buffer);
+
+       buffer->user_size = ALIGN(buffer->user_size, 8);
 
        return buffer->user_size / 8;
 }
@@ -231,7 +261,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
        if (drm_debug & DRM_UT_DRIVER)
                etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 
-       link_target = gpu_va(gpu, cmdbuf);
+       link_target = etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf);
        link_dwords = cmdbuf->size / 8;
 
        /*
@@ -246,8 +276,12 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
                extra_dwords = 1;
 
                /* flush command */
-               if (gpu->mmu->need_flush)
-                       extra_dwords += 1;
+               if (gpu->mmu->need_flush) {
+                       if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+                               extra_dwords += 1;
+                       else
+                               extra_dwords += 3;
+               }
 
                /* pipe switch commands */
                if (gpu->switch_context)
@@ -257,12 +291,23 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 
                if (gpu->mmu->need_flush) {
                        /* Add the MMU flush */
-                       CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
-                                      VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
-                                      VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
-                                      VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
-                                      VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
-                                      VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+                       if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
+                               CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
+                                              VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
+                                              VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
+                                              VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
+                                              VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
+                                              VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+                       } else {
+                               CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+                                       VIVS_MMUv2_CONFIGURATION_MODE_MASK |
+                                       VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
+                                       VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
+                               CMD_SEM(buffer, SYNC_RECIPIENT_FE,
+                                       SYNC_RECIPIENT_PE);
+                               CMD_STALL(buffer, SYNC_RECIPIENT_FE,
+                                       SYNC_RECIPIENT_PE);
+                       }
 
                        gpu->mmu->need_flush = false;
                }
@@ -301,7 +346,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 
        if (drm_debug & DRM_UT_DRIVER)
                pr_info("stream link to 0x%08x @ 0x%08x %p\n",
-                       return_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
+                       return_target, etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf), cmdbuf->vaddr);
 
        if (drm_debug & DRM_UT_DRIVER) {
                print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
index ffd1b32caa8d5b1b3a00b1dfbdf065b375e46496..aa687669e22b4b05cbbc98f4af5e72ef62979e81 100644 (file)
@@ -488,8 +488,7 @@ static const struct file_operations fops = {
 };
 
 static struct drm_driver etnaviv_drm_driver = {
-       .driver_features    = DRIVER_HAVE_IRQ |
-                               DRIVER_GEM |
+       .driver_features    = DRIVER_GEM |
                                DRIVER_PRIME |
                                DRIVER_RENDER,
        .open               = etnaviv_open,
@@ -530,10 +529,8 @@ static int etnaviv_bind(struct device *dev)
        int ret;
 
        drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
-       if (!drm)
-               return -ENOMEM;
-
-       drm->platformdev = to_platform_device(dev);
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv) {
index 115c5bc6d7c8329312c04e49b06fe2100571b265..65e057639653026363b03c3f740c91f6bb4735d0 100644 (file)
@@ -96,6 +96,7 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
        uintptr_t ptr, u32 size, u32 flags, u32 *handle);
 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
+u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
 void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
        struct etnaviv_cmdbuf *cmdbuf);
index 4a29eeadbf1e738da23a6c29a2e00e8f68d81576..2bef501d4a17211069a9c9dfdad233a44d2d0611 100644 (file)
@@ -175,11 +175,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
        etnaviv_core_dump_registers(&iter, gpu);
        etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
        etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
-                             gpu->buffer->size, gpu->buffer->paddr);
+                             gpu->buffer->size,
+                             etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer));
 
        list_for_each_entry(cmd, &gpu->active_cmd_list, node)
                etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
-                                     cmd->size, cmd->paddr);
+                                     cmd->size,
+                                     etnaviv_iommu_get_cmdbuf_va(gpu, cmd));
 
        /* Reserve space for the bomap */
        if (n_bomap_pages) {
index 87ef34150d466903ea128eddf599718eb5836dcb..b1254f885fed4b6a40c5b08f9352621259b05f4c 100644 (file)
@@ -22,8 +22,6 @@
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
-#include "etnaviv_iommu_v2.h"
 #include "common.xml.h"
 #include "state.xml.h"
 #include "state_hi.xml.h"
@@ -329,6 +327,18 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
                                gpu->identity.revision = 0x1051;
                        }
                }
+
+               /*
+                * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
+                * reality it's just a re-branded GC3000. We can identify this
+                * core by the upper half of the revision register being all 1.
+                * Fix model/rev here, so all other places can refer to this
+                * core by its real identity.
+                */
+               if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
+                       gpu->identity.model = chipModel_GC3000;
+                       gpu->identity.revision &= 0xffff;
+               }
        }
 
        dev_info(gpu->dev, "model: GC%x, revision: %x\n",
@@ -528,6 +538,14 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
        gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
 }
 
+void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
+{
+       gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
+       gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
+                 VIVS_FE_COMMAND_CONTROL_ENABLE |
+                 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+}
+
 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 {
        u16 prefetch;
@@ -568,33 +586,20 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
                gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
        }
 
-       /* set base addresses */
-       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
-       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
-       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
-       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
-       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
-
-       /* setup the MMU page table pointers */
-       etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
+       /* setup the MMU */
+       etnaviv_iommu_restore(gpu);
 
        /* Start command processor */
        prefetch = etnaviv_buffer_init(gpu);
 
        gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
-       gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
-                 gpu->buffer->paddr - gpu->memory_base);
-       gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
-                 VIVS_FE_COMMAND_CONTROL_ENABLE |
-                 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+       etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
+                            prefetch);
 }
 
 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 {
        int ret, i;
-       struct iommu_domain *iommu;
-       enum etnaviv_iommu_version version;
-       bool mmuv2;
 
        ret = pm_runtime_get_sync(gpu->dev);
        if (ret < 0) {
@@ -642,32 +647,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
                goto fail;
        }
 
-       /* Setup IOMMU.. eventually we will (I think) do this once per context
-        * and have separate page tables per context.  For now, to keep things
-        * simple and to get something working, just use a single address space:
-        */
-       mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
-       dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
-
-       if (!mmuv2) {
-               iommu = etnaviv_iommu_domain_alloc(gpu);
-               version = ETNAVIV_IOMMU_V1;
-       } else {
-               iommu = etnaviv_iommu_v2_domain_alloc(gpu);
-               version = ETNAVIV_IOMMU_V2;
-       }
-
-       if (!iommu) {
-               dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
-       if (!gpu->mmu) {
+       gpu->mmu = etnaviv_iommu_new(gpu);
+       if (IS_ERR(gpu->mmu)) {
                dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
-               iommu_domain_free(iommu);
-               ret = -ENOMEM;
+               ret = PTR_ERR(gpu->mmu);
                goto fail;
        }
 
@@ -678,7 +661,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
                dev_err(gpu->dev, "could not create command buffer\n");
                goto destroy_iommu;
        }
-       if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
+
+       if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
+           gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
                ret = -EINVAL;
                dev_err(gpu->dev,
                        "command buffer outside valid memory window\n");
@@ -867,45 +852,6 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 }
 #endif
 
-/*
- * Power Management:
- */
-static int enable_clk(struct etnaviv_gpu *gpu)
-{
-       if (gpu->clk_core)
-               clk_prepare_enable(gpu->clk_core);
-       if (gpu->clk_shader)
-               clk_prepare_enable(gpu->clk_shader);
-
-       return 0;
-}
-
-static int disable_clk(struct etnaviv_gpu *gpu)
-{
-       if (gpu->clk_core)
-               clk_disable_unprepare(gpu->clk_core);
-       if (gpu->clk_shader)
-               clk_disable_unprepare(gpu->clk_shader);
-
-       return 0;
-}
-
-static int enable_axi(struct etnaviv_gpu *gpu)
-{
-       if (gpu->clk_bus)
-               clk_prepare_enable(gpu->clk_bus);
-
-       return 0;
-}
-
-static int disable_axi(struct etnaviv_gpu *gpu)
-{
-       if (gpu->clk_bus)
-               clk_disable_unprepare(gpu->clk_bus);
-
-       return 0;
-}
-
 /*
  * Hangcheck detection for locked gpu:
  */
@@ -945,7 +891,7 @@ static void recover_worker(struct work_struct *work)
        gpu->completed_fence = gpu->active_fence;
 
        etnaviv_gpu_hw_init(gpu);
-       gpu->switch_context = true;
+       gpu->lastctx = NULL;
        gpu->exec_state = -1;
 
        mutex_unlock(&gpu->lock);
@@ -1178,6 +1124,9 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
        if (!cmdbuf)
                return NULL;
 
+       if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
+               size = ALIGN(size, SZ_4K);
+
        cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
                                     GFP_KERNEL);
        if (!cmdbuf->vaddr) {
@@ -1193,6 +1142,7 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
 
 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
 {
+       etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
        dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
                    cmdbuf->paddr);
        kfree(cmdbuf);
@@ -1333,8 +1283,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        if (ret < 0)
                return ret;
 
-       mutex_lock(&gpu->lock);
-
        /*
         * TODO
         *
@@ -1348,16 +1296,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        if (unlikely(event == ~0U)) {
                DRM_ERROR("no free event\n");
                ret = -EBUSY;
-               goto out_unlock;
+               goto out_pm_put;
        }
 
        fence = etnaviv_gpu_fence_alloc(gpu);
        if (!fence) {
                event_free(gpu, event);
                ret = -ENOMEM;
-               goto out_unlock;
+               goto out_pm_put;
        }
 
+       mutex_lock(&gpu->lock);
+
        gpu->event[event].fence = fence;
        submit->fence = fence->seqno;
        gpu->active_fence = submit->fence;
@@ -1395,9 +1345,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        hangcheck_timer_reset(gpu);
        ret = 0;
 
-out_unlock:
        mutex_unlock(&gpu->lock);
 
+out_pm_put:
        etnaviv_gpu_pm_put(gpu);
 
        return ret;
@@ -1425,6 +1375,21 @@ static irqreturn_t irq_handler(int irq, void *data)
                        intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
                }
 
+               if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
+                       int i;
+
+                       dev_err_ratelimited(gpu->dev,
+                               "MMU fault status 0x%08x\n",
+                               gpu_read(gpu, VIVS_MMUv2_STATUS));
+                       for (i = 0; i < 4; i++) {
+                               dev_err_ratelimited(gpu->dev,
+                                       "MMU %d fault addr 0x%08x\n",
+                                       i, gpu_read(gpu,
+                                       VIVS_MMUv2_EXCEPTION_ADDR(i)));
+                       }
+                       intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
+               }
+
                while ((event = ffs(intr)) != 0) {
                        struct fence *fence;
 
@@ -1466,39 +1431,72 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
 {
        int ret;
 
-       ret = enable_clk(gpu);
-       if (ret)
-               return ret;
+       if (gpu->clk_bus) {
+               ret = clk_prepare_enable(gpu->clk_bus);
+               if (ret)
+                       return ret;
+       }
 
-       ret = enable_axi(gpu);
-       if (ret) {
-               disable_clk(gpu);
-               return ret;
+       if (gpu->clk_core) {
+               ret = clk_prepare_enable(gpu->clk_core);
+               if (ret)
+                       goto disable_clk_bus;
+       }
+
+       if (gpu->clk_shader) {
+               ret = clk_prepare_enable(gpu->clk_shader);
+               if (ret)
+                       goto disable_clk_core;
        }
 
        return 0;
+
+disable_clk_core:
+       if (gpu->clk_core)
+               clk_disable_unprepare(gpu->clk_core);
+disable_clk_bus:
+       if (gpu->clk_bus)
+               clk_disable_unprepare(gpu->clk_bus);
+
+       return ret;
 }
 
 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
 {
-       int ret;
+       if (gpu->clk_shader)
+               clk_disable_unprepare(gpu->clk_shader);
+       if (gpu->clk_core)
+               clk_disable_unprepare(gpu->clk_core);
+       if (gpu->clk_bus)
+               clk_disable_unprepare(gpu->clk_bus);
 
-       ret = disable_axi(gpu);
-       if (ret)
-               return ret;
+       return 0;
+}
 
-       ret = disable_clk(gpu);
-       if (ret)
-               return ret;
+int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 
-       return 0;
+       do {
+               u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+               if ((idle & gpu->idle_mask) == gpu->idle_mask)
+                       return 0;
+
+               if (time_is_before_jiffies(timeout)) {
+                       dev_warn(gpu->dev,
+                                "timed out waiting for idle: idle=0x%x\n",
+                                idle);
+                       return -ETIMEDOUT;
+               }
+
+               udelay(5);
+       } while (1);
 }
 
 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 {
        if (gpu->buffer) {
-               unsigned long timeout;
-
                /* Replace the last WAIT with END */
                etnaviv_buffer_end(gpu);
 
@@ -1507,22 +1505,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
                 * happen quickly (as the WAIT is only 200 cycles).  If
                 * we fail, just warn and continue.
                 */
-               timeout = jiffies + msecs_to_jiffies(100);
-               do {
-                       u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
-
-                       if ((idle & gpu->idle_mask) == gpu->idle_mask)
-                               break;
-
-                       if (time_is_before_jiffies(timeout)) {
-                               dev_warn(gpu->dev,
-                                        "timed out waiting for idle: idle=0x%x\n",
-                                        idle);
-                               break;
-                       }
-
-                       udelay(5);
-               } while (1);
+               etnaviv_gpu_wait_idle(gpu, 100);
        }
 
        return etnaviv_gpu_clk_disable(gpu);
@@ -1634,7 +1617,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct etnaviv_gpu *gpu;
-       int err = 0;
+       int err;
 
        gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
        if (!gpu)
@@ -1651,16 +1634,15 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
        /* Get Interrupt: */
        gpu->irq = platform_get_irq(pdev, 0);
        if (gpu->irq < 0) {
-               err = gpu->irq;
-               dev_err(dev, "failed to get irq: %d\n", err);
-               goto fail;
+               dev_err(dev, "failed to get irq: %d\n", gpu->irq);
+               return gpu->irq;
        }
 
        err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
                               dev_name(gpu->dev), gpu);
        if (err) {
                dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
-               goto fail;
+               return err;
        }
 
        /* Get Clocks: */
@@ -1694,13 +1676,10 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
        err = component_add(&pdev->dev, &gpu_ops);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register component: %d\n", err);
-               goto fail;
+               return err;
        }
 
        return 0;
-
-fail:
-       return err;
 }
 
 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
index a69cdd526bf83149af1ee486c34d96a6e1913123..73c278dc37061a54a5c23dc24559c85d5b1d8525 100644 (file)
@@ -160,6 +160,8 @@ struct etnaviv_cmdbuf {
        dma_addr_t paddr;
        u32 size;
        u32 user_size;
+       /* vram node used if the cmdbuf is mapped through the MMUv2 */
+       struct drm_mm_node vram_node;
        /* fence after which this buffer is to be disposed */
        struct fence *fence;
        /* target exec state */
@@ -214,6 +216,8 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
+int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
+void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
 
 extern struct platform_driver etnaviv_gpu_driver;
 
index 16353ee816516ef33348832ef31a5a089a95829b..81f1583a79463374d2d36c5b4787e0e7bf32a56f 100644 (file)
@@ -196,12 +196,19 @@ static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
        .dump = etnaviv_iommuv1_dump,
 };
 
-void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
-       struct iommu_domain *domain)
+void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
 {
-       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+       struct etnaviv_iommu_domain *etnaviv_domain =
+                       to_etnaviv_domain(gpu->mmu->domain);
        u32 pgtable;
 
+       /* set base addresses */
+       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
+       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
+       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
+       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
+       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
+
        /* set page table address in MC */
        pgtable = (u32)etnaviv_domain->pgtable.paddr;
 
@@ -212,7 +219,7 @@ void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
        gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
 }
 
-struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
+struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
 {
        struct etnaviv_iommu_domain *etnaviv_domain;
        int ret;
index cf45503f6b6f45e21cfb4ccaf8df0a6a6ce04f86..8b51e7c16feb30eef67c18aabfe1cfc1093d3e7e 100644 (file)
 #ifndef __ETNAVIV_IOMMU_H__
 #define __ETNAVIV_IOMMU_H__
 
-#include <linux/iommu.h>
 struct etnaviv_gpu;
 
-struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
-void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
-       struct iommu_domain *domain);
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
+
+struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
 
 #endif /* __ETNAVIV_IOMMU_H__ */
index fbb4aed3dc80574505af3c51bf319be538f003f3..7e9c4d210a8486af9e779cb88b4da2fb88a2309b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+ * Copyright (C) 2016 Etnaviv Project
   *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
 #include <linux/bitops.h>
 
 #include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
 #include "etnaviv_iommu.h"
+#include "state.xml.h"
 #include "state_hi.xml.h"
 
+#define MMUv2_PTE_PRESENT              BIT(0)
+#define MMUv2_PTE_EXCEPTION            BIT(1)
+#define MMUv2_PTE_WRITEABLE            BIT(2)
 
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
+#define MMUv2_MTLB_MASK                        0xffc00000
+#define MMUv2_MTLB_SHIFT               22
+#define MMUv2_STLB_MASK                        0x003ff000
+#define MMUv2_STLB_SHIFT               12
+
+#define MMUv2_MAX_STLB_ENTRIES         1024
+
+struct etnaviv_iommuv2_domain {
+       struct iommu_domain domain;
+       struct device *dev;
+       void *bad_page_cpu;
+       dma_addr_t bad_page_dma;
+       /* M(aster) TLB aka first level pagetable */
+       u32 *mtlb_cpu;
+       dma_addr_t mtlb_dma;
+       /* S(lave) TLB aka second level pagetable */
+       u32 *stlb_cpu[1024];
+       dma_addr_t stlb_dma[1024];
+};
+
+static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
+{
+       return container_of(domain, struct etnaviv_iommuv2_domain, domain);
+}
+
+static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
+          phys_addr_t paddr, size_t size, int prot)
+{
+       struct etnaviv_iommuv2_domain *etnaviv_domain =
+                       to_etnaviv_domain(domain);
+       int mtlb_entry, stlb_entry;
+       u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
+
+       if (size != SZ_4K)
+               return -EINVAL;
+
+       if (prot & IOMMU_WRITE)
+               entry |= MMUv2_PTE_WRITEABLE;
+
+       mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+       stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+       etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
+
+       return 0;
+}
+
+static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
+       unsigned long iova, size_t size)
+{
+       struct etnaviv_iommuv2_domain *etnaviv_domain =
+                       to_etnaviv_domain(domain);
+       int mtlb_entry, stlb_entry;
+
+       if (size != SZ_4K)
+               return -EINVAL;
+
+       mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+       stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+       etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
+
+       return SZ_4K;
+}
+
+static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
+       dma_addr_t iova)
+{
+       struct etnaviv_iommuv2_domain *etnaviv_domain =
+                       to_etnaviv_domain(domain);
+       int mtlb_entry, stlb_entry;
+
+       mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+       stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+       return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
+}
+
+static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
+{
+       u32 *p;
+       int ret, i, j;
+
+       /* allocate scratch page */
+       etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+                                                 SZ_4K,
+                                                 &etnaviv_domain->bad_page_dma,
+                                                 GFP_KERNEL);
+       if (!etnaviv_domain->bad_page_cpu) {
+               ret = -ENOMEM;
+               goto fail_mem;
+       }
+       p = etnaviv_domain->bad_page_cpu;
+       for (i = 0; i < SZ_4K / 4; i++)
+               *p++ = 0xdead55aa;
+
+       etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+                                                 SZ_4K,
+                                                 &etnaviv_domain->mtlb_dma,
+                                                 GFP_KERNEL);
+       if (!etnaviv_domain->mtlb_cpu) {
+               ret = -ENOMEM;
+               goto fail_mem;
+       }
+
+       /* pre-populate STLB pages (may want to switch to on-demand later) */
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+               etnaviv_domain->stlb_cpu[i] =
+                               dma_alloc_coherent(etnaviv_domain->dev,
+                                                  SZ_4K,
+                                                  &etnaviv_domain->stlb_dma[i],
+                                                  GFP_KERNEL);
+               if (!etnaviv_domain->stlb_cpu[i]) {
+                       ret = -ENOMEM;
+                       goto fail_mem;
+               }
+               p = etnaviv_domain->stlb_cpu[i];
+               for (j = 0; j < SZ_4K / 4; j++)
+                       *p++ = MMUv2_PTE_EXCEPTION;
+
+               etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
+                                             MMUv2_PTE_PRESENT;
+       }
+
+       return 0;
+
+fail_mem:
+       if (etnaviv_domain->bad_page_cpu)
+               dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+                                 etnaviv_domain->bad_page_cpu,
+                                 etnaviv_domain->bad_page_dma);
+
+       if (etnaviv_domain->mtlb_cpu)
+               dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+                                 etnaviv_domain->mtlb_cpu,
+                                 etnaviv_domain->mtlb_dma);
+
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+               if (etnaviv_domain->stlb_cpu[i])
+                       dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+                                         etnaviv_domain->stlb_cpu[i],
+                                         etnaviv_domain->stlb_dma[i]);
+       }
+
+       return ret;
+}
+
+static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
+{
+       struct etnaviv_iommuv2_domain *etnaviv_domain =
+                       to_etnaviv_domain(domain);
+       int i;
+
+       dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+                         etnaviv_domain->bad_page_cpu,
+                         etnaviv_domain->bad_page_dma);
+
+       dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+                         etnaviv_domain->mtlb_cpu,
+                         etnaviv_domain->mtlb_dma);
+
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+               if (etnaviv_domain->stlb_cpu[i])
+                       dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+                                         etnaviv_domain->stlb_cpu[i],
+                                         etnaviv_domain->stlb_dma[i]);
+       }
+
+       vfree(etnaviv_domain);
+}
+
+static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
+{
+       struct etnaviv_iommuv2_domain *etnaviv_domain =
+                       to_etnaviv_domain(domain);
+       size_t dump_size = SZ_4K;
+       int i;
+
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+               if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+                       dump_size += SZ_4K;
+
+       return dump_size;
+}
+
+static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
 {
-       /* TODO */
+       struct etnaviv_iommuv2_domain *etnaviv_domain =
+                       to_etnaviv_domain(domain);
+       int i;
+
+       memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
+       buf += SZ_4K;
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
+               if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+                       memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
+}
+
+static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+       .ops = {
+               .domain_free = etnaviv_iommuv2_domain_free,
+               .map = etnaviv_iommuv2_map,
+               .unmap = etnaviv_iommuv2_unmap,
+               .iova_to_phys = etnaviv_iommuv2_iova_to_phys,
+               .pgsize_bitmap = SZ_4K,
+       },
+       .dump_size = etnaviv_iommuv2_dump_size,
+       .dump = etnaviv_iommuv2_dump,
+};
+
+void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+{
+       struct etnaviv_iommuv2_domain *etnaviv_domain =
+                       to_etnaviv_domain(gpu->mmu->domain);
+       u16 prefetch;
+
+       /* If the MMU is already enabled the state is still there. */
+       if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
+               return;
+
+       prefetch = etnaviv_buffer_config_mmuv2(gpu,
+                               (u32)etnaviv_domain->mtlb_dma,
+                               (u32)etnaviv_domain->bad_page_dma);
+       etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
+       etnaviv_gpu_wait_idle(gpu, 100);
+
+       gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
+}
+struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
+{
+       struct etnaviv_iommuv2_domain *etnaviv_domain;
+       int ret;
+
+       etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
+       if (!etnaviv_domain)
+               return NULL;
+
+       etnaviv_domain->dev = gpu->dev;
+
+       etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
+       etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+       etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
+       etnaviv_domain->domain.geometry.aperture_start = 0;
+       etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
+
+       ret = etnaviv_iommuv2_init(etnaviv_domain);
+       if (ret)
+               goto out_free;
+
+       return &etnaviv_domain->domain;
+
+out_free:
+       vfree(etnaviv_domain);
        return NULL;
 }
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
deleted file mode 100644 (file)
index 603ea41..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
-  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ETNAVIV_IOMMU_V2_H__
-#define __ETNAVIV_IOMMU_V2_H__
-
-#include <linux/iommu.h>
-struct etnaviv_gpu;
-
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
-
-#endif /* __ETNAVIV_IOMMU_V2_H__ */
index 29a723fabc17a7fc5f81d733501d28780e0dbcc2..d3796ed8d8c5b2808cd9edba22d5d10a57563917 100644 (file)
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include "common.xml.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
+#include "etnaviv_iommu.h"
 #include "etnaviv_mmu.h"
 
 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
@@ -101,40 +103,21 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
        drm_mm_remove_node(&mapping->vram_node);
 }
 
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
-       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
-       struct etnaviv_vram_mapping *mapping)
+static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
+                                  struct drm_mm_node *node, size_t size)
 {
        struct etnaviv_vram_mapping *free = NULL;
-       struct sg_table *sgt = etnaviv_obj->sgt;
-       struct drm_mm_node *node;
        int ret;
 
-       lockdep_assert_held(&etnaviv_obj->lock);
-
-       mutex_lock(&mmu->lock);
-
-       /* v1 MMU can optimize single entry (contiguous) scatterlists */
-       if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
-               u32 iova;
-
-               iova = sg_dma_address(sgt->sgl) - memory_base;
-               if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
-                       mapping->iova = iova;
-                       list_add_tail(&mapping->mmu_node, &mmu->mappings);
-                       mutex_unlock(&mmu->lock);
-                       return 0;
-               }
-       }
+       lockdep_assert_held(&mmu->lock);
 
-       node = &mapping->vram_node;
        while (1) {
                struct etnaviv_vram_mapping *m, *n;
                struct list_head list;
                bool found;
 
                ret = drm_mm_insert_node_in_range(&mmu->mm, node,
-                       etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
+                       size, 0, mmu->last_iova, ~0UL,
                        DRM_MM_SEARCH_DEFAULT);
 
                if (ret != -ENOSPC)
@@ -151,7 +134,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
                }
 
                /* Try to retire some entries */
-               drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
+               drm_mm_init_scan(&mmu->mm, size, 0, 0);
 
                found = 0;
                INIT_LIST_HEAD(&list);
@@ -212,6 +195,38 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
                mmu->need_flush = true;
        }
 
+       return ret;
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+       struct etnaviv_vram_mapping *mapping)
+{
+       struct sg_table *sgt = etnaviv_obj->sgt;
+       struct drm_mm_node *node;
+       int ret;
+
+       lockdep_assert_held(&etnaviv_obj->lock);
+
+       mutex_lock(&mmu->lock);
+
+       /* v1 MMU can optimize single entry (contiguous) scatterlists */
+       if (mmu->version == ETNAVIV_IOMMU_V1 &&
+           sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
+               u32 iova;
+
+               iova = sg_dma_address(sgt->sgl) - memory_base;
+               if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
+                       mapping->iova = iova;
+                       list_add_tail(&mapping->mmu_node, &mmu->mappings);
+                       mutex_unlock(&mmu->lock);
+                       return 0;
+               }
+       }
+
+       node = &mapping->vram_node;
+
+       ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
        if (ret < 0) {
                mutex_unlock(&mmu->lock);
                return ret;
@@ -256,30 +271,102 @@ void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
        kfree(mmu);
 }
 
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
-       struct iommu_domain *domain, enum etnaviv_iommu_version version)
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
 {
+       enum etnaviv_iommu_version version;
        struct etnaviv_iommu *mmu;
 
        mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
        if (!mmu)
                return ERR_PTR(-ENOMEM);
 
-       mmu->domain = domain;
+       if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
+               mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
+               version = ETNAVIV_IOMMU_V1;
+       } else {
+               mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
+               version = ETNAVIV_IOMMU_V2;
+       }
+
+       if (!mmu->domain) {
+               dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
+               kfree(mmu);
+               return ERR_PTR(-ENOMEM);
+       }
+
        mmu->gpu = gpu;
        mmu->version = version;
        mutex_init(&mmu->lock);
        INIT_LIST_HEAD(&mmu->mappings);
 
-       drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
-                   domain->geometry.aperture_end -
-                     domain->geometry.aperture_start + 1);
+       drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
+                   mmu->domain->geometry.aperture_end -
+                   mmu->domain->geometry.aperture_start + 1);
 
-       iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
+       iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
 
        return mmu;
 }
 
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
+{
+       if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+               etnaviv_iommuv1_restore(gpu);
+       else
+               etnaviv_iommuv2_restore(gpu);
+}
+
+u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
+                               struct etnaviv_cmdbuf *buf)
+{
+       struct etnaviv_iommu *mmu = gpu->mmu;
+
+       if (mmu->version == ETNAVIV_IOMMU_V1) {
+               return buf->paddr - gpu->memory_base;
+       } else {
+               int ret;
+
+               if (buf->vram_node.allocated)
+                       return (u32)buf->vram_node.start;
+
+               mutex_lock(&mmu->lock);
+               ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
+               if (ret < 0) {
+                       mutex_unlock(&mmu->lock);
+                       return 0;
+               }
+               ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
+                               buf->size, IOMMU_READ);
+               if (ret < 0) {
+                       drm_mm_remove_node(&buf->vram_node);
+                       mutex_unlock(&mmu->lock);
+                       return 0;
+               }
+               /*
+                * At least on GC3000 the FE MMU doesn't properly flush old TLB
+                * entries. Make sure to space the command buffers out in a way
+                * that the FE MMU prefetch won't load invalid entries.
+                */
+               mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
+               gpu->mmu->need_flush = true;
+               mutex_unlock(&mmu->lock);
+
+               return (u32)buf->vram_node.start;
+       }
+}
+
+void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
+                                struct etnaviv_cmdbuf *buf)
+{
+       struct etnaviv_iommu *mmu = gpu->mmu;
+
+       if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
+               mutex_lock(&mmu->lock);
+               iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
+               drm_mm_remove_node(&buf->vram_node);
+               mutex_unlock(&mmu->lock);
+       }
+}
 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
 {
        struct etnaviv_iommu_ops *ops;
index fff215a47630598db849c349b09394fd12eafaf0..e787e49c9693cf74f08177649e1495af8e212f87 100644 (file)
@@ -62,10 +62,15 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
        struct etnaviv_vram_mapping *mapping);
 void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
 
+u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
+                               struct etnaviv_cmdbuf *buf);
+void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
+                                struct etnaviv_cmdbuf *buf);
+
 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
 
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
-       struct iommu_domain *domain, enum etnaviv_iommu_version version);
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
 
 #endif /* __ETNAVIV_MMU_H__ */
index 807a3d9e0dd5af0a8e0f3944fa748a41ac16c021..43c73e2ed34fba825de434964801f69a807766fd 100644 (file)
@@ -8,10 +8,10 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
 git clone git://0x04.net/rules-ng-ng
 
 The rules-ng-ng source files this header was generated from are:
-- state_hi.xml (  24309 bytes, from 2015-12-12 09:02:53)
-- common.xml   (  18437 bytes, from 2015-12-12 09:02:53)
+- state_hi.xml (  25620 bytes, from 2016-08-19 22:07:37)
+- common.xml   (  20583 bytes, from 2016-06-07 05:22:38)
 
-Copyright (C) 2015
+Copyright (C) 2016
 */
 
 
@@ -78,9 +78,10 @@ Copyright (C) 2015
 #define VIVS_HI_AXI_STATUS_DET_RD_ERR                          0x00000200
 
 #define VIVS_HI_INTR_ACKNOWLEDGE                               0x00000010
-#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK                        0x7fffffff
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK                        0x3fffffff
 #define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT               0
 #define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x)                   (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
+#define VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION                 0x40000000
 #define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR                 0x80000000
 
 #define VIVS_HI_INTR_ENBL                                      0x00000014
index 877d2efa28e2171a1d2e07f4feef9a9fdd1eac7c..486943e70f70bc7137a8e64a156cf8d12b8e9bc3 100644 (file)
@@ -105,7 +105,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
                atomic_inc(&exynos_crtc->pending_update);
        }
 
-       drm_atomic_helper_commit_planes(dev, state, false);
+       drm_atomic_helper_commit_planes(dev, state, 0);
 
        exynos_atomic_wait_for_commit(state);
 
index e0166403b4bd5849cab6418a4d5de90b1a18e11c..40ce841eb9529b2f8ce3f3be8049780b64bd91a2 100644 (file)
@@ -55,11 +55,11 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
        flags = exynos_gem->flags;
 
        /*
-        * without iommu support, not support physically non-continuous memory
-        * for framebuffer.
+        * Physically non-contiguous memory type for framebuffer is not
+        * supported without IOMMU.
         */
        if (IS_NONCONTIG_BUFFER(flags)) {
-               DRM_ERROR("cannot use this gem memory type for fb.\n");
+               DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
                return -EINVAL;
        }
 
index 0525c56145db194c81297b6669e31d6e62aa61f3..147ef0d298cb0788ef391932d741ad4edbfedb93 100644 (file)
@@ -1753,32 +1753,6 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int fimc_suspend(struct device *dev)
-{
-       struct fimc_context *ctx = get_fimc_context(dev);
-
-       DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       return fimc_clk_ctrl(ctx, false);
-}
-
-static int fimc_resume(struct device *dev)
-{
-       struct fimc_context *ctx = get_fimc_context(dev);
-
-       DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-       if (!pm_runtime_suspended(dev))
-               return fimc_clk_ctrl(ctx, true);
-
-       return 0;
-}
-#endif
-
 static int fimc_runtime_suspend(struct device *dev)
 {
        struct fimc_context *ctx = get_fimc_context(dev);
@@ -1799,7 +1773,8 @@ static int fimc_runtime_resume(struct device *dev)
 #endif
 
 static const struct dev_pm_ops fimc_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
 };
 
index 4bf00f57ffe89a1566ea08cab4616db5c0c6a331..6eca8bb886486e7bbbd0d41e6cdad82f06b8e9cc 100644 (file)
@@ -1475,8 +1475,8 @@ static int g2d_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int g2d_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int g2d_runtime_suspend(struct device *dev)
 {
        struct g2d_data *g2d = dev_get_drvdata(dev);
 
@@ -1490,25 +1490,6 @@ static int g2d_suspend(struct device *dev)
 
        flush_work(&g2d->runqueue_work);
 
-       return 0;
-}
-
-static int g2d_resume(struct device *dev)
-{
-       struct g2d_data *g2d = dev_get_drvdata(dev);
-
-       g2d->suspended = false;
-       g2d_exec_runqueue(g2d);
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int g2d_runtime_suspend(struct device *dev)
-{
-       struct g2d_data *g2d = dev_get_drvdata(dev);
-
        clk_disable_unprepare(g2d->gate_clk);
 
        return 0;
@@ -1523,12 +1504,16 @@ static int g2d_runtime_resume(struct device *dev)
        if (ret < 0)
                dev_warn(dev, "failed to enable clock.\n");
 
+       g2d->suspended = false;
+       g2d_exec_runqueue(g2d);
+
        return ret;
 }
 #endif
 
 static const struct dev_pm_ops g2d_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
 };
 
index 5d20da8f957e2eac724331bb866f7c1b96244087..52a9d269484e59545a2533f37436c8c8f180f563 100644 (file)
@@ -1760,34 +1760,7 @@ static int gsc_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int gsc_suspend(struct device *dev)
-{
-       struct gsc_context *ctx = get_gsc_context(dev);
-
-       DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       return gsc_clk_ctrl(ctx, false);
-}
-
-static int gsc_resume(struct device *dev)
-{
-       struct gsc_context *ctx = get_gsc_context(dev);
-
-       DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-       if (!pm_runtime_suspended(dev))
-               return gsc_clk_ctrl(ctx, true);
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int gsc_runtime_suspend(struct device *dev)
+static int __maybe_unused gsc_runtime_suspend(struct device *dev)
 {
        struct gsc_context *ctx = get_gsc_context(dev);
 
@@ -1796,7 +1769,7 @@ static int gsc_runtime_suspend(struct device *dev)
        return  gsc_clk_ctrl(ctx, false);
 }
 
-static int gsc_runtime_resume(struct device *dev)
+static int __maybe_unused gsc_runtime_resume(struct device *dev)
 {
        struct gsc_context *ctx = get_gsc_context(dev);
 
@@ -1804,10 +1777,10 @@ static int gsc_runtime_resume(struct device *dev)
 
        return  gsc_clk_ctrl(ctx, true);
 }
-#endif
 
 static const struct dev_pm_ops gsc_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
 };
 
index 404367a430b54ffc6d9bab0d73b8f3427ee8e2f5..6591e406084c164d30c1945aa2f6d7ce559c67fd 100644 (file)
@@ -794,29 +794,6 @@ static int rotator_clk_crtl(struct rot_context *rot, bool enable)
        return 0;
 }
 
-
-#ifdef CONFIG_PM_SLEEP
-static int rotator_suspend(struct device *dev)
-{
-       struct rot_context *rot = dev_get_drvdata(dev);
-
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       return rotator_clk_crtl(rot, false);
-}
-
-static int rotator_resume(struct device *dev)
-{
-       struct rot_context *rot = dev_get_drvdata(dev);
-
-       if (!pm_runtime_suspended(dev))
-               return rotator_clk_crtl(rot, true);
-
-       return 0;
-}
-#endif
-
 static int rotator_runtime_suspend(struct device *dev)
 {
        struct rot_context *rot = dev_get_drvdata(dev);
@@ -833,7 +810,8 @@ static int rotator_runtime_resume(struct device *dev)
 #endif
 
 static const struct dev_pm_ops rotator_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
                                                                        NULL)
 };
index 7882387f9bffd5c661f30bc5800e58672a0a3bd8..0884c45aefe84a9800b2ec95c57d0f44d1259835 100644 (file)
@@ -270,7 +270,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        ret = clk_prepare_enable(fsl_dev->pix_clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable pix clk\n");
-               return ret;
+               goto disable_dcu_clk;
        }
 
        fsl_dcu_drm_init_planes(fsl_dev->drm);
@@ -284,6 +284,10 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        enable_irq(fsl_dev->irq);
 
        return 0;
+
+disable_dcu_clk:
+       clk_disable_unprepare(fsl_dev->clk);
+       return ret;
 }
 #endif
 
@@ -330,6 +334,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
        const char *pix_clk_in_name;
        const struct of_device_id *id;
        int ret;
+       u8 div_ratio_shift = 0;
 
        fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
        if (!fsl_dev)
@@ -382,11 +387,14 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
                pix_clk_in = fsl_dev->clk;
        }
 
+       if (of_property_read_bool(dev->of_node, "big-endian"))
+               div_ratio_shift = 24;
+
        pix_clk_in_name = __clk_get_name(pix_clk_in);
        snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name);
        fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name,
                        pix_clk_in_name, 0, base + DCU_DIV_RATIO,
-                       0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
+                       div_ratio_shift, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
        if (IS_ERR(fsl_dev->pix_clk)) {
                dev_err(dev, "failed to register pix clk\n");
                ret = PTR_ERR(fsl_dev->pix_clk);
@@ -402,8 +410,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
        fsl_dev->tcon = fsl_tcon_init(dev);
 
        drm = drm_dev_alloc(driver, dev);
-       if (!drm) {
-               ret = -ENOMEM;
+       if (IS_ERR(drm)) {
+               ret = PTR_ERR(drm);
                goto disable_pix_clk;
        }
 
index e50467a0deb0451aa9c5723f8c97806f21add3d2..a7e5486bd1e934be88374df0ce08731f6286ee1a 100644 (file)
@@ -169,25 +169,10 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
        return;
 }
 
-static void
-fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
-                            const struct drm_plane_state *new_state)
-{
-}
-
-static int
-fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
-                            const struct drm_plane_state *new_state)
-{
-       return 0;
-}
-
 static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
        .atomic_check = fsl_dcu_drm_plane_atomic_check,
        .atomic_disable = fsl_dcu_drm_plane_atomic_disable,
        .atomic_update = fsl_dcu_drm_plane_atomic_update,
-       .cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
-       .prepare_fb = fsl_dcu_drm_plane_prepare_fb,
 };
 
 static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
index bca09ea24632424f6899ff5c9ac9b4a1017a2300..3194e544ee27b902c4a300d2972781a832d54997 100644 (file)
@@ -57,10 +57,7 @@ static int fsl_tcon_init_regmap(struct device *dev,
 
        tcon->regs = devm_regmap_init_mmio(dev, regs,
                                           &fsl_tcon_regmap_config);
-       if (IS_ERR(tcon->regs))
-               return PTR_ERR(tcon->regs);
-
-       return 0;
+       return PTR_ERR_OR_ZERO(tcon->regs);
 }
 
 struct fsl_tcon *fsl_tcon_init(struct device *dev)
index db9f7d011832943a2d3537228b538b368b4cc5c1..0d2bb16825082f0645eb06f645a9879d7d7c2026 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/tty.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/console.h>
 
index 38dc890831483c4aef716f33c6b33453c2cc1bef..ea733ab5b1e05376c97433e6b221886d1dce248f 100644 (file)
@@ -415,14 +415,6 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
        if (ret)
                return ret;
 
-       /* Didn't get an EDID, so
-        * Set wide sync ranges so we get all modes
-        * handed to valid_mode for checking
-        */
-       connector->display_info.min_vfreq = 0;
-       connector->display_info.max_vfreq = 200;
-       connector->display_info.min_hfreq = 0;
-       connector->display_info.max_hfreq = 200;
        if (mode_dev->panel_fixed_mode != NULL) {
                struct drm_display_mode *mode =
                    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
index 0fcdce0817de2b6a65a7d41bbe88b4349a66fe1c..3a44e705db53809279f40208fcd12852167d16c9 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/tty.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/console.h>
 
index 907cb51795c36fd4535444cbbd401d49a82ff0cb..acb3848ef1c947ce7cfe7a12dbcdbaf56586acbb 100644 (file)
@@ -335,11 +335,6 @@ static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
        struct drm_display_mode *dup_mode = NULL;
        struct drm_device *dev = connector->dev;
 
-       connector->display_info.min_vfreq = 0;
-       connector->display_info.max_vfreq = 200;
-       connector->display_info.min_hfreq = 0;
-       connector->display_info.max_hfreq = 200;
-
        if (fixed_mode) {
                dev_dbg(dev->dev, "fixed_mode %dx%d\n",
                                fixed_mode->hdisplay, fixed_mode->vdisplay);
index ab696ca7eeecc8544df4a9db2a1c74d00797bc9f..eab6d889bde980e816da94eda27110fbae6e7da8 100644 (file)
@@ -163,10 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
        if (bclp > 255)
                return ASLE_BACKLIGHT_FAILED;
 
-       if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
-               int max = bd->props.max_brightness;
-               gma_backlight_set(dev, bclp * max / 255);
-       }
+       gma_backlight_set(dev, bclp * bd->props.max_brightness / 255);
 
        asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
 
index e55733ca46d2a51be77dcaa6ca0b9de39cb632c4..fd7c912548415f7b8b8996520e90422da5e03d74 100644 (file)
@@ -530,15 +530,6 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
        if (ret)
                return ret;
 
-       /* Didn't get an EDID, so
-        * Set wide sync ranges so we get all modes
-        * handed to valid_mode for checking
-        */
-       connector->display_info.min_vfreq = 0;
-       connector->display_info.max_vfreq = 200;
-       connector->display_info.min_hfreq = 0;
-       connector->display_info.max_hfreq = 200;
-
        if (mode_dev->panel_fixed_mode != NULL) {
                struct drm_display_mode *mode =
                    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
index 4fca0d6feebe1af7f8add72ce65253514ccc2364..e5360726d80b7531df0006c25327de9e36eac127 100644 (file)
@@ -18,7 +18,6 @@
  */
 
 #include <linux/i2c.h>
-#include <linux/fb.h>
 #include <drm/drmP.h>
 #include "psb_intel_drv.h"
 
index c3707d47cd89762c592f7d4f644c004bea27ac74..7e7a4d43d6b698f2326e0e8d2765b0c27bb80164 100644 (file)
@@ -608,15 +608,17 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
                         u32 ch, u32 y, u32 in_h, u32 fmt)
 {
        struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+       char *format_name;
        u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
        u32 stride = fb->pitches[0];
        u32 addr = (u32)obj->paddr + y * stride;
 
        DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
                         ch + 1, y, in_h, stride, (u32)obj->paddr);
+       format_name = drm_get_format_name(fb->pixel_format);
        DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
-                        addr, fb->width, fb->height, fmt,
-                        drm_get_format_name(fb->pixel_format));
+                        addr, fb->width, fb->height, fmt, format_name);
+       kfree(format_name);
 
        /* get reg offset */
        reg_ctrl = RD_CH_CTRL(ch);
@@ -815,19 +817,6 @@ static void ade_disable_channel(struct ade_plane *aplane)
        ade_compositor_routing_disable(base, ch);
 }
 
-static int ade_plane_prepare_fb(struct drm_plane *plane,
-                               const struct drm_plane_state *new_state)
-{
-       /* do nothing */
-       return 0;
-}
-
-static void ade_plane_cleanup_fb(struct drm_plane *plane,
-                                const struct drm_plane_state *old_state)
-{
-       /* do nothing */
-}
-
 static int ade_plane_atomic_check(struct drm_plane *plane,
                                  struct drm_plane_state *state)
 {
@@ -895,8 +884,6 @@ static void ade_plane_atomic_disable(struct drm_plane *plane,
 }
 
 static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
-       .prepare_fb = ade_plane_prepare_fb,
-       .cleanup_fb = ade_plane_cleanup_fb,
        .atomic_check = ade_plane_atomic_check,
        .atomic_update = ade_plane_atomic_update,
        .atomic_disable = ade_plane_atomic_disable,
index 1edd9bc802947f00cc1868e1e82a13f463c5b537..90377a609c986e836f3c90e03ed327b0af10ac27 100644 (file)
@@ -169,7 +169,7 @@ static int kirin_gem_cma_dumb_create(struct drm_file *file,
 
 static struct drm_driver kirin_drm_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
-                                 DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+                                 DRIVER_ATOMIC,
        .fops                   = &kirin_drm_fops,
 
        .gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -207,8 +207,8 @@ static int kirin_drm_bind(struct device *dev)
        int ret;
 
        drm_dev = drm_dev_alloc(driver, dev);
-       if (!drm_dev)
-               return -ENOMEM;
+       if (IS_ERR(drm_dev))
+               return PTR_ERR(drm_dev);
 
        drm_dev->platformdev = to_platform_device(dev);
 
index 4d341db462a2442878991e45a3438d770f1afe64..a6c92beb410a7d38f6eef7ee435c9014999018ac 100644 (file)
@@ -22,6 +22,7 @@ config DRM_I2C_SIL164
 config DRM_I2C_NXP_TDA998X
        tristate "NXP Semiconductors TDA998X HDMI encoder"
        default m if DRM_TILCDC
+       select SND_SOC_HDMI_CODEC if SND_SOC
        help
          Support for NXP Semiconductors TDA998X HDMI encoders.
 
index f4315bc8d4711409db9adb23298ba81be6792f64..9798d400d8174750522b1704be66364fe9425318 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/irq.h>
 #include <sound/asoundef.h>
+#include <sound/hdmi-codec.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
+struct tda998x_audio_port {
+       u8 format;              /* AFMT_xxx */
+       u8 config;              /* AP value */
+};
+
 struct tda998x_priv {
        struct i2c_client *cec;
        struct i2c_client *hdmi;
@@ -41,7 +47,10 @@ struct tda998x_priv {
        u8 vip_cntrl_0;
        u8 vip_cntrl_1;
        u8 vip_cntrl_2;
-       struct tda998x_encoder_params params;
+       struct tda998x_audio_params audio_params;
+
+       struct platform_device *audio_pdev;
+       struct mutex audio_mutex;
 
        wait_queue_head_t wq_edid;
        volatile int wq_edid_wait;
@@ -53,6 +62,8 @@ struct tda998x_priv {
 
        struct drm_encoder encoder;
        struct drm_connector connector;
+
+       struct tda998x_audio_port audio_port[2];
 };
 
 #define conn_to_tda998x_priv(x) \
@@ -666,26 +677,16 @@ tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
        reg_set(priv, REG_DIP_IF_FLAGS, bit);
 }
 
-static void
-tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
+static int tda998x_write_aif(struct tda998x_priv *priv,
+                            struct hdmi_audio_infoframe *cea)
 {
        union hdmi_infoframe frame;
 
-       hdmi_audio_infoframe_init(&frame.audio);
-
-       frame.audio.channels = p->audio_frame[1] & 0x07;
-       frame.audio.channel_allocation = p->audio_frame[4];
-       frame.audio.level_shift_value = (p->audio_frame[5] & 0x78) >> 3;
-       frame.audio.downmix_inhibit = (p->audio_frame[5] & 0x80) >> 7;
-
-       /*
-        * L-PCM and IEC61937 compressed audio shall always set sample
-        * frequency to "refer to stream".  For others, see the HDMI
-        * specification.
-        */
-       frame.audio.sample_frequency = (p->audio_frame[2] & 0x1c) >> 2;
+       frame.audio = *cea;
 
        tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, &frame);
+
+       return 0;
 }
 
 static void
@@ -710,20 +711,21 @@ static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
        }
 }
 
-static void
+static int
 tda998x_configure_audio(struct tda998x_priv *priv,
-               struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+                       struct tda998x_audio_params *params,
+                       unsigned mode_clock)
 {
        u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
        u32 n;
 
        /* Enable audio ports */
-       reg_write(priv, REG_ENA_AP, p->audio_cfg);
-       reg_write(priv, REG_ENA_ACLK, p->audio_clk_cfg);
+       reg_write(priv, REG_ENA_AP, params->config);
 
        /* Set audio input source */
-       switch (p->audio_format) {
+       switch (params->format) {
        case AFMT_SPDIF:
+               reg_write(priv, REG_ENA_ACLK, 0);
                reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_SPDIF);
                clksel_aip = AIP_CLKSEL_AIP_SPDIF;
                clksel_fs = AIP_CLKSEL_FS_FS64SPDIF;
@@ -731,15 +733,29 @@ tda998x_configure_audio(struct tda998x_priv *priv,
                break;
 
        case AFMT_I2S:
+               reg_write(priv, REG_ENA_ACLK, 1);
                reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_I2S);
                clksel_aip = AIP_CLKSEL_AIP_I2S;
                clksel_fs = AIP_CLKSEL_FS_ACLK;
-               cts_n = CTS_N_M(3) | CTS_N_K(3);
+               switch (params->sample_width) {
+               case 16:
+                       cts_n = CTS_N_M(3) | CTS_N_K(1);
+                       break;
+               case 18:
+               case 20:
+               case 24:
+                       cts_n = CTS_N_M(3) | CTS_N_K(2);
+                       break;
+               default:
+               case 32:
+                       cts_n = CTS_N_M(3) | CTS_N_K(3);
+                       break;
+               }
                break;
 
        default:
-               BUG();
-               return;
+               dev_err(&priv->hdmi->dev, "Unsupported I2S format\n");
+               return -EINVAL;
        }
 
        reg_write(priv, REG_AIP_CLKSEL, clksel_aip);
@@ -755,11 +771,11 @@ tda998x_configure_audio(struct tda998x_priv *priv,
         * assume 100MHz requires larger divider.
         */
        adiv = AUDIO_DIV_SERCLK_8;
-       if (mode->clock > 100000)
+       if (mode_clock > 100000)
                adiv++;                 /* AUDIO_DIV_SERCLK_16 */
 
        /* S/PDIF asks for a larger divider */
-       if (p->audio_format == AFMT_SPDIF)
+       if (params->format == AFMT_SPDIF)
                adiv++;                 /* AUDIO_DIV_SERCLK_16 or _32 */
 
        reg_write(priv, REG_AUDIO_DIV, adiv);
@@ -768,7 +784,7 @@ tda998x_configure_audio(struct tda998x_priv *priv,
         * This is the approximate value of N, which happens to be
         * the recommended values for non-coherent clocks.
         */
-       n = 128 * p->audio_sample_rate / 1000;
+       n = 128 * params->sample_rate / 1000;
 
        /* Write the CTS and N values */
        buf[0] = 0x44;
@@ -786,20 +802,21 @@ tda998x_configure_audio(struct tda998x_priv *priv,
        reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
        reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
 
-       /* Write the channel status */
-       buf[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
-       buf[1] = 0x00;
-       buf[2] = IEC958_AES3_CON_FS_NOTID;
-       buf[3] = IEC958_AES4_CON_ORIGFS_NOTID |
-                       IEC958_AES4_CON_MAX_WORDLEN_24;
+       /* Write the channel status
+        * The REG_CH_STAT_B-registers skip IEC958 AES2 byte, because
+        * there is a separate register for each I2S wire.
+        */
+       buf[0] = params->status[0];
+       buf[1] = params->status[1];
+       buf[2] = params->status[3];
+       buf[3] = params->status[4];
        reg_write_range(priv, REG_CH_STAT_B(0), buf, 4);
 
        tda998x_audio_mute(priv, true);
        msleep(20);
        tda998x_audio_mute(priv, false);
 
-       /* Write the audio information packet */
-       tda998x_write_aif(priv, p);
+       return tda998x_write_aif(priv, &params->cea);
 }
 
 /* DRM encoder functions */
@@ -820,7 +837,7 @@ static void tda998x_encoder_set_config(struct tda998x_priv *priv,
                            VIP_CNTRL_2_SWAP_F(p->swap_f) |
                            (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
 
-       priv->params = *p;
+       priv->audio_params = p->audio_params;
 }
 
 static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -1057,9 +1074,13 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
 
                tda998x_write_avi(priv, adjusted_mode);
 
-               if (priv->params.audio_cfg)
-                       tda998x_configure_audio(priv, adjusted_mode,
-                                               &priv->params);
+               if (priv->audio_params.format != AFMT_UNUSED) {
+                       mutex_lock(&priv->audio_mutex);
+                       tda998x_configure_audio(priv,
+                                               &priv->audio_params,
+                                               adjusted_mode->clock);
+                       mutex_unlock(&priv->audio_mutex);
+               }
        }
 }
 
@@ -1159,6 +1180,8 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
        drm_mode_connector_update_edid_property(connector, edid);
        n = drm_add_edid_modes(connector, edid);
        priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
+       drm_edid_to_eld(connector, edid);
+
        kfree(edid);
 
        return n;
@@ -1180,6 +1203,9 @@ static void tda998x_destroy(struct tda998x_priv *priv)
        cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
        reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
 
+       if (priv->audio_pdev)
+               platform_device_unregister(priv->audio_pdev);
+
        if (priv->hdmi->irq)
                free_irq(priv->hdmi->irq, priv);
 
@@ -1189,8 +1215,189 @@ static void tda998x_destroy(struct tda998x_priv *priv)
        i2c_unregister_device(priv->cec);
 }
 
+static int tda998x_audio_hw_params(struct device *dev, void *data,
+                                  struct hdmi_codec_daifmt *daifmt,
+                                  struct hdmi_codec_params *params)
+{
+       struct tda998x_priv *priv = dev_get_drvdata(dev);
+       int i, ret;
+       struct tda998x_audio_params audio = {
+               .sample_width = params->sample_width,
+               .sample_rate = params->sample_rate,
+               .cea = params->cea,
+       };
+
+       if (!priv->encoder.crtc)
+               return -ENODEV;
+
+       memcpy(audio.status, params->iec.status,
+              min(sizeof(audio.status), sizeof(params->iec.status)));
+
+       switch (daifmt->fmt) {
+       case HDMI_I2S:
+               if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
+                   daifmt->bit_clk_master || daifmt->frame_clk_master) {
+                       dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+                               daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+                               daifmt->bit_clk_master,
+                               daifmt->frame_clk_master);
+                       return -EINVAL;
+               }
+               for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+                       if (priv->audio_port[i].format == AFMT_I2S)
+                               audio.config = priv->audio_port[i].config;
+               audio.format = AFMT_I2S;
+               break;
+       case HDMI_SPDIF:
+               for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+                       if (priv->audio_port[i].format == AFMT_SPDIF)
+                               audio.config = priv->audio_port[i].config;
+               audio.format = AFMT_SPDIF;
+               break;
+       default:
+               dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
+               return -EINVAL;
+       }
+
+       if (audio.config == 0) {
+               dev_err(dev, "%s: No audio configutation found\n", __func__);
+               return -EINVAL;
+       }
+
+       mutex_lock(&priv->audio_mutex);
+       ret = tda998x_configure_audio(priv,
+                                     &audio,
+                                     priv->encoder.crtc->hwmode.clock);
+
+       if (ret == 0)
+               priv->audio_params = audio;
+       mutex_unlock(&priv->audio_mutex);
+
+       return ret;
+}
+
+static void tda998x_audio_shutdown(struct device *dev, void *data)
+{
+       struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+       mutex_lock(&priv->audio_mutex);
+
+       reg_write(priv, REG_ENA_AP, 0);
+
+       priv->audio_params.format = AFMT_UNUSED;
+
+       mutex_unlock(&priv->audio_mutex);
+}
+
+int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+       struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+       mutex_lock(&priv->audio_mutex);
+
+       tda998x_audio_mute(priv, enable);
+
+       mutex_unlock(&priv->audio_mutex);
+       return 0;
+}
+
+static int tda998x_audio_get_eld(struct device *dev, void *data,
+                                uint8_t *buf, size_t len)
+{
+       struct tda998x_priv *priv = dev_get_drvdata(dev);
+       struct drm_mode_config *config = &priv->encoder.dev->mode_config;
+       struct drm_connector *connector;
+       int ret = -ENODEV;
+
+       mutex_lock(&config->mutex);
+       list_for_each_entry(connector, &config->connector_list, head) {
+               if (&priv->encoder == connector->encoder) {
+                       memcpy(buf, connector->eld,
+                              min(sizeof(connector->eld), len));
+                       ret = 0;
+               }
+       }
+       mutex_unlock(&config->mutex);
+
+       return ret;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+       .hw_params = tda998x_audio_hw_params,
+       .audio_shutdown = tda998x_audio_shutdown,
+       .digital_mute = tda998x_audio_digital_mute,
+       .get_eld = tda998x_audio_get_eld,
+};
+
+static int tda998x_audio_codec_init(struct tda998x_priv *priv,
+                                   struct device *dev)
+{
+       struct hdmi_codec_pdata codec_data = {
+               .ops = &audio_codec_ops,
+               .max_i2s_channels = 2,
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
+               if (priv->audio_port[i].format == AFMT_I2S &&
+                   priv->audio_port[i].config != 0)
+                       codec_data.i2s = 1;
+               if (priv->audio_port[i].format == AFMT_SPDIF &&
+                   priv->audio_port[i].config != 0)
+                       codec_data.spdif = 1;
+       }
+
+       priv->audio_pdev = platform_device_register_data(
+               dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+               &codec_data, sizeof(codec_data));
+
+       return PTR_ERR_OR_ZERO(priv->audio_pdev);
+}
+
 /* I2C driver functions */
 
+static int tda998x_get_audio_ports(struct tda998x_priv *priv,
+                                  struct device_node *np)
+{
+       const u32 *port_data;
+       u32 size;
+       int i;
+
+       port_data = of_get_property(np, "audio-ports", &size);
+       if (!port_data)
+               return 0;
+
+       size /= sizeof(u32);
+       if (size > 2 * ARRAY_SIZE(priv->audio_port) || size % 2 != 0) {
+               dev_err(&priv->hdmi->dev,
+                       "Bad number of elements in audio-ports dt-property\n");
+               return -EINVAL;
+       }
+
+       size /= 2;
+
+       for (i = 0; i < size; i++) {
+               u8 afmt = be32_to_cpup(&port_data[2*i]);
+               u8 ena_ap = be32_to_cpup(&port_data[2*i+1]);
+
+               if (afmt != AFMT_SPDIF && afmt != AFMT_I2S) {
+                       dev_err(&priv->hdmi->dev,
+                               "Bad audio format %u\n", afmt);
+                       return -EINVAL;
+               }
+
+               priv->audio_port[i].format = afmt;
+               priv->audio_port[i].config = ena_ap;
+       }
+
+       if (priv->audio_port[0].format == priv->audio_port[1].format) {
+               dev_err(&priv->hdmi->dev,
+                       "There can only be on I2S port and one SPDIF port\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 {
        struct device_node *np = client->dev.of_node;
@@ -1304,7 +1511,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        if (!np)
                return 0;               /* non-DT */
 
-       /* get the optional video properties */
+       /* get the device tree parameters */
        ret = of_property_read_u32(np, "video-ports", &video);
        if (ret == 0) {
                priv->vip_cntrl_0 = video >> 16;
@@ -1312,8 +1519,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
                priv->vip_cntrl_2 = video;
        }
 
-       return 0;
+       mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
 
+       ret = tda998x_get_audio_ports(priv, np);
+       if (ret)
+               goto fail;
+
+       if (priv->audio_port[0].format != AFMT_UNUSED)
+               tda998x_audio_codec_init(priv, &client->dev);
+
+       return 0;
 fail:
        /* if encoder_init fails, the encoder slave is never registered,
         * so cleanup here:
index 44f4a131c8ddb4c0e68f16ecde7dbad324255edc..0be55dc1ef4bbe748b6d66daff817093effd83d6 100644 (file)
@@ -56,9 +56,7 @@ static const struct file_operations i810_driver_fops = {
 };
 
 static struct drm_driver driver = {
-       .driver_features =
-           DRIVER_USE_AGP |
-           DRIVER_HAVE_DMA,
+       .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_LEGACY,
        .dev_priv_size = sizeof(drm_i810_buf_priv_t),
        .load = i810_driver_load,
        .lastclose = i810_driver_lastclose,
index 684fc1cd08fa90260529ca558b18118d118c8f9c..a998c2bce70a0a6c72ac1bb1671b4ebd311e6c40 100644 (file)
@@ -3,15 +3,20 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+subdir-ccflags-y += \
+       $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
 
 # Please keep these build lists sorted!
 
 # core driver code
 i915-y := i915_drv.o \
          i915_irq.o \
+         i915_memcpy.o \
+         i915_mm.o \
          i915_params.o \
          i915_pci.o \
           i915_suspend.o \
+         i915_sw_fence.o \
          i915_sysfs.o \
          intel_csr.o \
          intel_device_info.o \
@@ -25,7 +30,6 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
 i915-y += i915_cmd_parser.o \
          i915_gem_batch_pool.o \
          i915_gem_context.o \
-         i915_gem_debug.o \
          i915_gem_dmabuf.o \
          i915_gem_evict.o \
          i915_gem_execbuffer.o \
@@ -33,6 +37,7 @@ i915-y += i915_cmd_parser.o \
          i915_gem_gtt.o \
          i915_gem.o \
          i915_gem_render_state.o \
+         i915_gem_request.o \
          i915_gem_shrinker.o \
          i915_gem_stolen.o \
          i915_gem_tiling.o \
@@ -40,6 +45,7 @@ i915-y += i915_cmd_parser.o \
          i915_gpu_error.o \
          i915_trace_points.o \
          intel_breadcrumbs.o \
+         intel_engine_cs.o \
          intel_lrc.o \
          intel_mocs.o \
          intel_ringbuffer.o \
@@ -109,6 +115,6 @@ i915-y += intel_gvt.o
 include $(src)/gvt/Makefile
 endif
 
-obj-$(CONFIG_DRM_I915)  += i915.o
+obj-$(CONFIG_DRM_I915) += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
index b0fd6a7b060380d5d2d8dfe94192a3f3f59dcb9f..70980f82a15b3f76ef419fca1a1cb78875ae74f9 100644 (file)
  * The parser always rejects such commands.
  *
  * The majority of the problematic commands fall in the MI_* range, with only a
- * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
+ * few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW).
  *
  * Implementation:
- * Each ring maintains tables of commands and registers which the parser uses in
- * scanning batch buffers submitted to that ring.
+ * Each engine maintains tables of commands and registers which the parser
+ * uses in scanning batch buffers submitted to that engine.
  *
  * Since the set of commands that the parser must check for is significantly
  * smaller than the number of commands supported, the parser tables contain only
  * those commands required by the parser. This generally works because command
  * opcode ranges have standard command length encodings. So for commands that
  * the parser does not need to check, it can easily skip them. This is
- * implemented via a per-ring length decoding vfunc.
+ * implemented via a per-engine length decoding vfunc.
  *
  * Unfortunately, there are a number of commands that do not follow the standard
  * length encoding for their opcode range, primarily amongst the MI_* commands.
  * To handle this, the parser provides a way to define explicit "skip" entries
- * in the per-ring command tables.
+ * in the per-engine command tables.
  *
  * Other command table entries map fairly directly to high level categories
  * mentioned above: rejected, master-only, register whitelist. The parser
  * general bitmasking mechanism.
  */
 
-#define STD_MI_OPCODE_MASK  0xFF800000
-#define STD_3D_OPCODE_MASK  0xFFFF0000
-#define STD_2D_OPCODE_MASK  0xFFC00000
-#define STD_MFX_OPCODE_MASK 0xFFFF0000
+#define STD_MI_OPCODE_SHIFT  (32 - 9)
+#define STD_3D_OPCODE_SHIFT  (32 - 16)
+#define STD_2D_OPCODE_SHIFT  (32 - 10)
+#define STD_MFX_OPCODE_SHIFT (32 - 16)
+#define MIN_OPCODE_SHIFT 16
 
 #define CMD(op, opm, f, lm, fl, ...)                           \
        {                                                       \
                .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0),     \
-               .cmd = { (op), (opm) },                         \
+               .cmd = { (op), ~0u << (opm) },                  \
                .length = { (lm) },                             \
                __VA_ARGS__                                     \
        }
 
 /* Convenience macros to compress the tables */
-#define SMI STD_MI_OPCODE_MASK
-#define S3D STD_3D_OPCODE_MASK
-#define S2D STD_2D_OPCODE_MASK
-#define SMFX STD_MFX_OPCODE_MASK
+#define SMI STD_MI_OPCODE_SHIFT
+#define S3D STD_3D_OPCODE_SHIFT
+#define S2D STD_2D_OPCODE_SHIFT
+#define SMFX STD_MFX_OPCODE_SHIFT
 #define F true
 #define S CMD_DESC_SKIP
 #define R CMD_DESC_REJECT
@@ -350,6 +351,9 @@ static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
        CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
 };
 
+static const struct drm_i915_cmd_descriptor noop_desc =
+       CMD(MI_NOOP, SMI, F, 1, S);
+
 #undef CMD
 #undef SMI
 #undef S3D
@@ -458,6 +462,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
        REG32(GEN7_GPGPU_DISPATCHDIMX),
        REG32(GEN7_GPGPU_DISPATCHDIMY),
        REG32(GEN7_GPGPU_DISPATCHDIMZ),
+       REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
        REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
        REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
        REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
@@ -473,6 +478,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
        REG32(GEN7_L3SQCREG1),
        REG32(GEN7_L3CNTLREG2),
        REG32(GEN7_L3CNTLREG3),
+       REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
 };
 
 static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
@@ -502,7 +508,10 @@ static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
 };
 
 static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
+       REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+       REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
        REG32(BCS_SWCTRL),
+       REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
 };
 
 static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
@@ -603,7 +612,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
        return 0;
 }
 
-static bool validate_cmds_sorted(struct intel_engine_cs *engine,
+static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
                                 const struct drm_i915_cmd_table *cmd_tables,
                                 int cmd_table_count)
 {
@@ -624,8 +633,10 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine,
                        u32 curr = desc->cmd.value & desc->cmd.mask;
 
                        if (curr < previous) {
-                               DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
-                                         engine->id, i, j, curr, previous);
+                               DRM_ERROR("CMD: %s [%d] command table not sorted: "
+                                         "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+                                         engine->name, engine->id,
+                                         i, j, curr, previous);
                                ret = false;
                        }
 
@@ -636,7 +647,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine,
        return ret;
 }
 
-static bool check_sorted(int ring_id,
+static bool check_sorted(const struct intel_engine_cs *engine,
                         const struct drm_i915_reg_descriptor *reg_table,
                         int reg_count)
 {
@@ -648,8 +659,10 @@ static bool check_sorted(int ring_id,
                u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
 
                if (curr < previous) {
-                       DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
-                                 ring_id, i, curr, previous);
+                       DRM_ERROR("CMD: %s [%d] register table not sorted: "
+                                 "entry=%d reg=0x%08X prev=0x%08X\n",
+                                 engine->name, engine->id,
+                                 i, curr, previous);
                        ret = false;
                }
 
@@ -666,7 +679,7 @@ static bool validate_regs_sorted(struct intel_engine_cs *engine)
 
        for (i = 0; i < engine->reg_table_count; i++) {
                table = &engine->reg_tables[i];
-               if (!check_sorted(engine->id, table->regs, table->num_regs))
+               if (!check_sorted(engine, table->regs, table->num_regs))
                        return false;
        }
 
@@ -687,12 +700,26 @@ struct cmd_node {
  * non-opcode bits being set. But if we don't include those bits, some 3D
  * commands may hash to the same bucket due to not including opcode bits that
  * make the command unique. For now, we will risk hashing to the same bucket.
- *
- * If we attempt to generate a perfect hash, we should be able to look at bits
- * 31:29 of a command from a batch buffer and use the full mask for that
- * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
  */
-#define CMD_HASH_MASK STD_MI_OPCODE_MASK
+static inline u32 cmd_header_key(u32 x)
+{
+       u32 shift;
+
+       switch (x >> INSTR_CLIENT_SHIFT) {
+       default:
+       case INSTR_MI_CLIENT:
+               shift = STD_MI_OPCODE_SHIFT;
+               break;
+       case INSTR_RC_CLIENT:
+               shift = STD_3D_OPCODE_SHIFT;
+               break;
+       case INSTR_BC_CLIENT:
+               shift = STD_2D_OPCODE_SHIFT;
+               break;
+       }
+
+       return x >> shift;
+}
 
 static int init_hash_table(struct intel_engine_cs *engine,
                           const struct drm_i915_cmd_table *cmd_tables,
@@ -716,7 +743,7 @@ static int init_hash_table(struct intel_engine_cs *engine,
 
                        desc_node->desc = desc;
                        hash_add(engine->cmd_hash, &desc_node->node,
-                                desc->cmd.value & CMD_HASH_MASK);
+                                cmd_header_key(desc->cmd.value));
                }
        }
 
@@ -736,23 +763,21 @@ static void fini_hash_table(struct intel_engine_cs *engine)
 }
 
 /**
- * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
+ * intel_engine_init_cmd_parser() - set cmd parser related fields for an engine
  * @engine: the engine to initialize
  *
  * Optionally initializes fields related to batch buffer command parsing in the
  * struct intel_engine_cs based on whether the platform requires software
  * command parsing.
- *
- * Return: non-zero if initialization fails
  */
-int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
+void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
 {
        const struct drm_i915_cmd_table *cmd_tables;
        int cmd_table_count;
        int ret;
 
        if (!IS_GEN7(engine->i915))
-               return 0;
+               return;
 
        switch (engine->id) {
        case RCS:
@@ -806,36 +831,38 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
                engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
                break;
        default:
-               DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
-                         engine->id);
-               BUG();
+               MISSING_CASE(engine->id);
+               return;
        }
 
-       BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
-       BUG_ON(!validate_regs_sorted(engine));
-
-       WARN_ON(!hash_empty(engine->cmd_hash));
+       if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
+               DRM_ERROR("%s: command descriptions are not sorted\n",
+                         engine->name);
+               return;
+       }
+       if (!validate_regs_sorted(engine)) {
+               DRM_ERROR("%s: registers are not sorted\n", engine->name);
+               return;
+       }
 
        ret = init_hash_table(engine, cmd_tables, cmd_table_count);
        if (ret) {
-               DRM_ERROR("CMD: cmd_parser_init failed!\n");
+               DRM_ERROR("%s: initialised failed!\n", engine->name);
                fini_hash_table(engine);
-               return ret;
+               return;
        }
 
        engine->needs_cmd_parser = true;
-
-       return 0;
 }
 
 /**
- * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
+ * intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields
  * @engine: the engine to clean up
  *
  * Releases any resources related to command parsing that may have been
- * initialized for the specified ring.
+ * initialized for the specified engine.
  */
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
+void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
 {
        if (!engine->needs_cmd_parser)
                return;
@@ -850,12 +877,9 @@ find_cmd_in_table(struct intel_engine_cs *engine,
        struct cmd_node *desc_node;
 
        hash_for_each_possible(engine->cmd_hash, desc_node, node,
-                              cmd_header & CMD_HASH_MASK) {
+                              cmd_header_key(cmd_header)) {
                const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
-               u32 masked_cmd = desc->cmd.mask & cmd_header;
-               u32 masked_value = desc->cmd.value & desc->cmd.mask;
-
-               if (masked_cmd == masked_value)
+               if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
                        return desc;
        }
 
@@ -866,18 +890,21 @@ find_cmd_in_table(struct intel_engine_cs *engine,
  * Returns a pointer to a descriptor for the command specified by cmd_header.
  *
  * The caller must supply space for a default descriptor via the default_desc
- * parameter. If no descriptor for the specified command exists in the ring's
+ * parameter. If no descriptor for the specified command exists in the engine's
  * command parser tables, this function fills in default_desc based on the
- * ring's default length encoding and returns default_desc.
+ * engine's default length encoding and returns default_desc.
  */
 static const struct drm_i915_cmd_descriptor*
 find_cmd(struct intel_engine_cs *engine,
         u32 cmd_header,
+        const struct drm_i915_cmd_descriptor *desc,
         struct drm_i915_cmd_descriptor *default_desc)
 {
-       const struct drm_i915_cmd_descriptor *desc;
        u32 mask;
 
+       if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
+               return desc;
+
        desc = find_cmd_in_table(engine, cmd_header);
        if (desc)
                return desc;
@@ -886,152 +913,140 @@ find_cmd(struct intel_engine_cs *engine,
        if (!mask)
                return NULL;
 
-       BUG_ON(!default_desc);
-       default_desc->flags = CMD_DESC_SKIP;
+       default_desc->cmd.value = cmd_header;
+       default_desc->cmd.mask = ~0u << MIN_OPCODE_SHIFT;
        default_desc->length.mask = mask;
-
+       default_desc->flags = CMD_DESC_SKIP;
        return default_desc;
 }
 
 static const struct drm_i915_reg_descriptor *
-find_reg(const struct drm_i915_reg_descriptor *table,
-        int count, u32 addr)
+__find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
 {
-       int i;
-
-       for (i = 0; i < count; i++) {
-               if (i915_mmio_reg_offset(table[i].addr) == addr)
-                       return &table[i];
+       int start = 0, end = count;
+       while (start < end) {
+               int mid = start + (end - start) / 2;
+               int ret = addr - i915_mmio_reg_offset(table[mid].addr);
+               if (ret < 0)
+                       end = mid;
+               else if (ret > 0)
+                       start = mid + 1;
+               else
+                       return &table[mid];
        }
-
        return NULL;
 }
 
 static const struct drm_i915_reg_descriptor *
-find_reg_in_tables(const struct drm_i915_reg_table *tables,
-                  int count, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
 {
-       int i;
-       const struct drm_i915_reg_table *table;
-       const struct drm_i915_reg_descriptor *reg;
+       const struct drm_i915_reg_table *table = engine->reg_tables;
+       int count = engine->reg_table_count;
 
-       for (i = 0; i < count; i++) {
-               table = &tables[i];
+       do {
                if (!table->master || is_master) {
-                       reg = find_reg(table->regs, table->num_regs,
-                                      addr);
+                       const struct drm_i915_reg_descriptor *reg;
+
+                       reg = __find_reg(table->regs, table->num_regs, addr);
                        if (reg != NULL)
                                return reg;
                }
-       }
+       } while (table++, --count);
 
        return NULL;
 }
 
-static u32 *vmap_batch(struct drm_i915_gem_object *obj,
-                      unsigned start, unsigned len)
-{
-       int i;
-       void *addr = NULL;
-       struct sg_page_iter sg_iter;
-       int first_page = start >> PAGE_SHIFT;
-       int last_page = (len + start + 4095) >> PAGE_SHIFT;
-       int npages = last_page - first_page;
-       struct page **pages;
-
-       pages = drm_malloc_ab(npages, sizeof(*pages));
-       if (pages == NULL) {
-               DRM_DEBUG_DRIVER("Failed to get space for pages\n");
-               goto finish;
-       }
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
-               pages[i++] = sg_page_iter_page(&sg_iter);
-               if (i == npages)
-                       break;
-       }
-
-       addr = vmap(pages, i, 0, PAGE_KERNEL);
-       if (addr == NULL) {
-               DRM_DEBUG_DRIVER("Failed to vmap pages\n");
-               goto finish;
-       }
-
-finish:
-       if (pages)
-               drm_free_large(pages);
-       return (u32*)addr;
-}
-
-/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
-static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                       struct drm_i915_gem_object *src_obj,
                       u32 batch_start_offset,
-                      u32 batch_len)
+                      u32 batch_len,
+                      bool *needs_clflush_after)
 {
-       int needs_clflush = 0;
-       void *src_base, *src;
-       void *dst = NULL;
+       unsigned int src_needs_clflush;
+       unsigned int dst_needs_clflush;
+       void *dst, *src;
        int ret;
 
-       if (batch_len > dest_obj->base.size ||
-           batch_len + batch_start_offset > src_obj->base.size)
-               return ERR_PTR(-E2BIG);
-
-       if (WARN_ON(dest_obj->pages_pin_count == 0))
-               return ERR_PTR(-ENODEV);
-
-       ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
-       if (ret) {
-               DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
+       ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
+       if (ret)
                return ERR_PTR(ret);
-       }
 
-       src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
-       if (!src_base) {
-               DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
-               ret = -ENOMEM;
+       ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush);
+       if (ret) {
+               dst = ERR_PTR(ret);
                goto unpin_src;
        }
 
-       ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
-       if (ret) {
-               DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
-               goto unmap_src;
+       dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+       if (IS_ERR(dst))
+               goto unpin_dst;
+
+       src = ERR_PTR(-ENODEV);
+       if (src_needs_clflush &&
+           i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
+               src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
+               if (!IS_ERR(src)) {
+                       i915_memcpy_from_wc(dst,
+                                           src + batch_start_offset,
+                                           ALIGN(batch_len, 16));
+                       i915_gem_object_unpin_map(src_obj);
+               }
        }
-
-       dst = vmap_batch(dest_obj, 0, batch_len);
-       if (!dst) {
-               DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
-               ret = -ENOMEM;
-               goto unmap_src;
+       if (IS_ERR(src)) {
+               void *ptr;
+               int offset, n;
+
+               offset = offset_in_page(batch_start_offset);
+
+               /* We can avoid clflushing partial cachelines before the write
+                * if we only every write full cache-lines. Since we know that
+                * both the source and destination are in multiples of
+                * PAGE_SIZE, we can simply round up to the next cacheline.
+                * We don't care about copying too much here as we only
+                * validate up to the end of the batch.
+                */
+               if (dst_needs_clflush & CLFLUSH_BEFORE)
+                       batch_len = roundup(batch_len,
+                                           boot_cpu_data.x86_clflush_size);
+
+               ptr = dst;
+               for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
+                       int len = min_t(int, batch_len, PAGE_SIZE - offset);
+
+                       src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+                       if (src_needs_clflush)
+                               drm_clflush_virt_range(src + offset, len);
+                       memcpy(ptr, src + offset, len);
+                       kunmap_atomic(src);
+
+                       ptr += len;
+                       batch_len -= len;
+                       offset = 0;
+               }
        }
 
-       src = src_base + offset_in_page(batch_start_offset);
-       if (needs_clflush)
-               drm_clflush_virt_range(src, batch_len);
-
-       memcpy(dst, src, batch_len);
+       /* dst_obj is returned with vmap pinned */
+       *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
 
-unmap_src:
-       vunmap(src_base);
+unpin_dst:
+       i915_gem_obj_finish_shmem_access(dst_obj);
 unpin_src:
-       i915_gem_object_unpin_pages(src_obj);
-
-       return ret ? ERR_PTR(ret) : dst;
+       i915_gem_obj_finish_shmem_access(src_obj);
+       return dst;
 }
 
 /**
- * i915_needs_cmd_parser() - should a given ring use software command parsing?
+ * intel_engine_needs_cmd_parser() - should a given engine use software
+ *                                   command parsing?
  * @engine: the engine in question
  *
  * Only certain platforms require software batch buffer command parsing, and
  * only when enabled via module parameter.
  *
- * Return: true if the ring requires software command parsing
+ * Return: true if the engine requires software command parsing
  */
-bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
+bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
 {
        if (!engine->needs_cmd_parser)
                return false;
@@ -1048,6 +1063,9 @@ static bool check_cmd(const struct intel_engine_cs *engine,
                      const bool is_master,
                      bool *oacontrol_set)
 {
+       if (desc->flags & CMD_DESC_SKIP)
+               return true;
+
        if (desc->flags & CMD_DESC_REJECT) {
                DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
                return false;
@@ -1072,14 +1090,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
                     offset += step) {
                        const u32 reg_addr = cmd[offset] & desc->reg.mask;
                        const struct drm_i915_reg_descriptor *reg =
-                               find_reg_in_tables(engine->reg_tables,
-                                                  engine->reg_table_count,
-                                                  is_master,
-                                                  reg_addr);
+                               find_reg(engine, is_master, reg_addr);
 
                        if (!reg) {
-                               DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
-                                                reg_addr, *cmd, engine->id);
+                               DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n",
+                                                reg_addr, *cmd, engine->exec_id);
                                return false;
                        }
 
@@ -1159,11 +1174,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
                                desc->bits[i].mask;
 
                        if (dword != desc->bits[i].expected) {
-                               DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+                               DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (exec_id=%d)\n",
                                                 *cmd,
                                                 desc->bits[i].mask,
                                                 desc->bits[i].expected,
-                                                dword, engine->id);
+                                                dword, engine->exec_id);
                                return false;
                        }
                }
@@ -1189,23 +1204,26 @@ static bool check_cmd(const struct intel_engine_cs *engine,
  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  * if the batch appears legal but should use hardware parsing
  */
-int i915_parse_cmds(struct intel_engine_cs *engine,
-                   struct drm_i915_gem_object *batch_obj,
-                   struct drm_i915_gem_object *shadow_batch_obj,
-                   u32 batch_start_offset,
-                   u32 batch_len,
-                   bool is_master)
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+                           struct drm_i915_gem_object *batch_obj,
+                           struct drm_i915_gem_object *shadow_batch_obj,
+                           u32 batch_start_offset,
+                           u32 batch_len,
+                           bool is_master)
 {
-       u32 *cmd, *batch_base, *batch_end;
-       struct drm_i915_cmd_descriptor default_desc = { 0 };
+       u32 *cmd, *batch_end;
+       struct drm_i915_cmd_descriptor default_desc = noop_desc;
+       const struct drm_i915_cmd_descriptor *desc = &default_desc;
        bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
+       bool needs_clflush_after = false;
        int ret = 0;
 
-       batch_base = copy_batch(shadow_batch_obj, batch_obj,
-                               batch_start_offset, batch_len);
-       if (IS_ERR(batch_base)) {
+       cmd = copy_batch(shadow_batch_obj, batch_obj,
+                        batch_start_offset, batch_len,
+                        &needs_clflush_after);
+       if (IS_ERR(cmd)) {
                DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
-               return PTR_ERR(batch_base);
+               return PTR_ERR(cmd);
        }
 
        /*
@@ -1213,17 +1231,14 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
         * large or larger and copy_batch() will write MI_NOPs to the extra
         * space. Parsing should be faster in some cases this way.
         */
-       batch_end = batch_base + (batch_len / sizeof(*batch_end));
-
-       cmd = batch_base;
+       batch_end = cmd + (batch_len / sizeof(*batch_end));
        while (cmd < batch_end) {
-               const struct drm_i915_cmd_descriptor *desc;
                u32 length;
 
                if (*cmd == MI_BATCH_BUFFER_END)
                        break;
 
-               desc = find_cmd(engine, *cmd, &default_desc);
+               desc = find_cmd(engine, *cmd, desc, &default_desc);
                if (!desc) {
                        DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
                                         *cmd);
@@ -1274,7 +1289,9 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
                ret = -EINVAL;
        }
 
-       vunmap(batch_base);
+       if (ret == 0 && needs_clflush_after)
+               drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+       i915_gem_object_unpin_map(shadow_batch_obj);
 
        return ret;
 }
@@ -1295,7 +1312,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
 
        /* If the command parser is not enabled, report 0 - unsupported */
        for_each_engine(engine, dev_priv) {
-               if (i915_needs_cmd_parser(engine)) {
+               if (intel_engine_needs_cmd_parser(engine)) {
                        active = true;
                        break;
                }
index 844fea795bae1b860588fa22bc6ffc1621d12cb4..27b0e34dadecdadafd6506148a49dd450ca5106d 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-enum {
-       ACTIVE_LIST,
-       INACTIVE_LIST,
-       PINNED_LIST,
-};
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+       return to_i915(node->minor->dev);
+}
 
 /* As the drm_debugfs_init() routines are called before dev->dev_private is
  * allocated we need to hook into the minor for release. */
@@ -63,7 +62,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
 
        node->minor = minor;
        node->dent = ent;
-       node->info_ent = (void *) key;
+       node->info_ent = (void *)key;
 
        mutex_lock(&minor->debugfs_lock);
        list_add(&node->list, &minor->debugfs_list);
@@ -74,12 +73,11 @@ drm_add_fake_info_node(struct drm_minor *minor,
 
 static int i915_capabilities(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       const struct intel_device_info *info = INTEL_INFO(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       const struct intel_device_info *info = INTEL_INFO(dev_priv);
 
-       seq_printf(m, "gen: %d\n", info->gen);
-       seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+       seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+       seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
 #define SEP_SEMICOLON ;
        DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
@@ -91,7 +89,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
 
 static char get_active_flag(struct drm_i915_gem_object *obj)
 {
-       return obj->active ? '*' : ' ';
+       return i915_gem_object_is_active(obj) ? '*' : ' ';
 }
 
 static char get_pin_flag(struct drm_i915_gem_object *obj)
@@ -101,7 +99,7 @@ static char get_pin_flag(struct drm_i915_gem_object *obj)
 
 static char get_tiling_flag(struct drm_i915_gem_object *obj)
 {
-       switch (obj->tiling_mode) {
+       switch (i915_gem_object_get_tiling(obj)) {
        default:
        case I915_TILING_NONE: return ' ';
        case I915_TILING_X: return 'X';
@@ -111,7 +109,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
 
 static char get_global_flag(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
+       return i915_gem_object_to_ggtt(obj, NULL) ?  'g' : ' ';
 }
 
 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -125,7 +123,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
+               if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
                        size += vma->node.size;
        }
 
@@ -138,6 +136,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct intel_engine_cs *engine;
        struct i915_vma *vma;
+       unsigned int frontbuffer_bits;
        int pin_count = 0;
        enum intel_engine_id id;
 
@@ -155,30 +154,36 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                   obj->base.write_domain);
        for_each_engine_id(engine, dev_priv, id)
                seq_printf(m, "%x ",
-                               i915_gem_request_get_seqno(obj->last_read_req[id]));
-       seq_printf(m, "] %x %x%s%s%s",
-                  i915_gem_request_get_seqno(obj->last_write_req),
-                  i915_gem_request_get_seqno(obj->last_fenced_req),
-                  i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+                          i915_gem_active_get_seqno(&obj->last_read[id],
+                                                    &obj->base.dev->struct_mutex));
+       seq_printf(m, "] %x %s%s%s",
+                  i915_gem_active_get_seqno(&obj->last_write,
+                                            &obj->base.dev->struct_mutex),
+                  i915_cache_level_str(dev_priv, obj->cache_level),
                   obj->dirty ? " dirty" : "",
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
                seq_printf(m, " (name: %d)", obj->base.name);
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (vma->pin_count > 0)
+               if (i915_vma_is_pinned(vma))
                        pin_count++;
        }
        seq_printf(m, " (pinned x %d)", pin_count);
        if (obj->pin_display)
                seq_printf(m, " (display)");
-       if (obj->fence_reg != I915_FENCE_REG_NONE)
-               seq_printf(m, " (fence: %d)", obj->fence_reg);
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!drm_mm_node_allocated(&vma->node))
+                       continue;
+
                seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
-                          vma->is_ggtt ? "g" : "pp",
+                          i915_vma_is_ggtt(vma) ? "g" : "pp",
                           vma->node.start, vma->node.size);
-               if (vma->is_ggtt)
+               if (i915_vma_is_ggtt(vma))
                        seq_printf(m, ", type: %u", vma->ggtt_view.type);
+               if (vma->fence)
+                       seq_printf(m, " , fence: %d%s",
+                                  vma->fence->id,
+                                  i915_gem_active_isset(&vma->last_fence) ? "*" : "");
                seq_puts(m, ")");
        }
        if (obj->stolen)
@@ -192,58 +197,15 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                *t = '\0';
                seq_printf(m, " (%s mappable)", s);
        }
-       if (obj->last_write_req != NULL)
-               seq_printf(m, " (%s)",
-                          i915_gem_request_get_engine(obj->last_write_req)->name);
-       if (obj->frontbuffer_bits)
-               seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
-}
-
-static int i915_gem_object_list_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = m->private;
-       uintptr_t list = (uintptr_t) node->info_ent->data;
-       struct list_head *head;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct i915_vma *vma;
-       u64 total_obj_size, total_gtt_size;
-       int count, ret;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
+       engine = i915_gem_active_get_engine(&obj->last_write,
+                                           &dev_priv->drm.struct_mutex);
+       if (engine)
+               seq_printf(m, " (%s)", engine->name);
 
-       /* FIXME: the user of this interface might want more than just GGTT */
-       switch (list) {
-       case ACTIVE_LIST:
-               seq_puts(m, "Active:\n");
-               head = &ggtt->base.active_list;
-               break;
-       case INACTIVE_LIST:
-               seq_puts(m, "Inactive:\n");
-               head = &ggtt->base.inactive_list;
-               break;
-       default:
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-
-       total_obj_size = total_gtt_size = count = 0;
-       list_for_each_entry(vma, head, vm_link) {
-               seq_printf(m, "   ");
-               describe_obj(m, vma->obj);
-               seq_printf(m, "\n");
-               total_obj_size += vma->obj->base.size;
-               total_gtt_size += vma->node.size;
-               count++;
-       }
-       mutex_unlock(&dev->struct_mutex);
-
-       seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
-                  count, total_obj_size, total_gtt_size);
-       return 0;
+       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+       if (frontbuffer_bits)
+               seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
 }
 
 static int obj_rank_by_stolen(void *priv,
@@ -263,9 +225,8 @@ static int obj_rank_by_stolen(void *priv,
 
 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_i915_gem_object *obj;
        u64 total_obj_size, total_gtt_size;
        LIST_HEAD(stolen);
@@ -311,17 +272,6 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
        return 0;
 }
 
-#define count_objects(list, member) do { \
-       list_for_each_entry(obj, list, member) { \
-               size += i915_gem_obj_total_ggtt_size(obj); \
-               ++count; \
-               if (obj->map_and_fenceable) { \
-                       mappable_size += i915_gem_obj_ggtt_size(obj); \
-                       ++mappable_count; \
-               } \
-       } \
-} while (0)
-
 struct file_stats {
        struct drm_i915_file_private *file_priv;
        unsigned long count;
@@ -338,46 +288,29 @@ static int per_file_stats(int id, void *ptr, void *data)
 
        stats->count++;
        stats->total += obj->base.size;
-
+       if (!obj->bind_count)
+               stats->unbound += obj->base.size;
        if (obj->base.name || obj->base.dma_buf)
                stats->shared += obj->base.size;
 
-       if (USES_FULL_PPGTT(obj->base.dev)) {
-               list_for_each_entry(vma, &obj->vma_list, obj_link) {
-                       struct i915_hw_ppgtt *ppgtt;
-
-                       if (!drm_mm_node_allocated(&vma->node))
-                               continue;
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!drm_mm_node_allocated(&vma->node))
+                       continue;
 
-                       if (vma->is_ggtt) {
-                               stats->global += obj->base.size;
-                               continue;
-                       }
+               if (i915_vma_is_ggtt(vma)) {
+                       stats->global += vma->node.size;
+               } else {
+                       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
 
-                       ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
-                       if (ppgtt->file_priv != stats->file_priv)
+                       if (ppgtt->base.file != stats->file_priv)
                                continue;
-
-                       if (obj->active) /* XXX per-vma statistic */
-                               stats->active += obj->base.size;
-                       else
-                               stats->inactive += obj->base.size;
-
-                       return 0;
-               }
-       } else {
-               if (i915_gem_obj_ggtt_bound(obj)) {
-                       stats->global += obj->base.size;
-                       if (obj->active)
-                               stats->active += obj->base.size;
-                       else
-                               stats->inactive += obj->base.size;
-                       return 0;
                }
-       }
 
-       if (!list_empty(&obj->global_list))
-               stats->unbound += obj->base.size;
+               if (i915_vma_is_active(vma))
+                       stats->active += vma->node.size;
+               else
+                       stats->inactive += vma->node.size;
+       }
 
        return 0;
 }
@@ -424,9 +357,9 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
 
        for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
                if (ctx->engine[n].state)
-                       per_file_stats(0, ctx->engine[n].state, data);
-               if (ctx->engine[n].ringbuf)
-                       per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+                       per_file_stats(0, ctx->engine[n].state->obj, data);
+               if (ctx->engine[n].ring)
+                       per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
        }
 
        return 0;
@@ -435,48 +368,34 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
 static void print_context_stats(struct seq_file *m,
                                struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = &dev_priv->drm;
        struct file_stats stats;
        struct drm_file *file;
 
        memset(&stats, 0, sizeof(stats));
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
+       mutex_lock(&dev->struct_mutex);
        if (dev_priv->kernel_context)
                per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
 
-       list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+       list_for_each_entry(file, &dev->filelist, lhead) {
                struct drm_i915_file_private *fpriv = file->driver_priv;
                idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
        }
-       mutex_unlock(&dev_priv->drm.struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        print_file_stats(m, "[k]contexts", stats);
 }
 
-#define count_vmas(list, member) do { \
-       list_for_each_entry(vma, list, member) { \
-               size += i915_gem_obj_total_ggtt_size(vma->obj); \
-               ++count; \
-               if (vma->obj->map_and_fenceable) { \
-                       mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
-                       ++mappable_count; \
-               } \
-       } \
-} while (0)
-
-static int i915_gem_object_info(struct seq_file *m, void* data)
+static int i915_gem_object_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       u32 count, mappable_count, purgeable_count;
-       u64 size, mappable_size, purgeable_size;
-       unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
-       u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
+       u32 count, mapped_count, purgeable_count, dpy_count;
+       u64 size, mapped_size, purgeable_size, dpy_size;
        struct drm_i915_gem_object *obj;
        struct drm_file *file;
-       struct i915_vma *vma;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -487,70 +406,53 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                   dev_priv->mm.object_count,
                   dev_priv->mm.object_memory);
 
-       size = count = mappable_size = mappable_count = 0;
-       count_objects(&dev_priv->mm.bound_list, global_list);
-       seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
-                  count, mappable_count, size, mappable_size);
-
-       size = count = mappable_size = mappable_count = 0;
-       count_vmas(&ggtt->base.active_list, vm_link);
-       seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
-                  count, mappable_count, size, mappable_size);
+       size = count = 0;
+       mapped_size = mapped_count = 0;
+       purgeable_size = purgeable_count = 0;
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+               size += obj->base.size;
+               ++count;
 
-       size = count = mappable_size = mappable_count = 0;
-       count_vmas(&ggtt->base.inactive_list, vm_link);
-       seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
-                  count, mappable_count, size, mappable_size);
+               if (obj->madv == I915_MADV_DONTNEED) {
+                       purgeable_size += obj->base.size;
+                       ++purgeable_count;
+               }
 
-       size = count = purgeable_size = purgeable_count = 0;
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-               size += obj->base.size, ++count;
-               if (obj->madv == I915_MADV_DONTNEED)
-                       purgeable_size += obj->base.size, ++purgeable_count;
                if (obj->mapping) {
-                       pin_mapped_count++;
-                       pin_mapped_size += obj->base.size;
-                       if (obj->pages_pin_count == 0) {
-                               pin_mapped_purgeable_count++;
-                               pin_mapped_purgeable_size += obj->base.size;
-                       }
+                       mapped_count++;
+                       mapped_size += obj->base.size;
                }
        }
        seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
-       size = count = mappable_size = mappable_count = 0;
+       size = count = dpy_size = dpy_count = 0;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (obj->fault_mappable) {
-                       size += i915_gem_obj_ggtt_size(obj);
-                       ++count;
-               }
+               size += obj->base.size;
+               ++count;
+
                if (obj->pin_display) {
-                       mappable_size += i915_gem_obj_ggtt_size(obj);
-                       ++mappable_count;
+                       dpy_size += obj->base.size;
+                       ++dpy_count;
                }
+
                if (obj->madv == I915_MADV_DONTNEED) {
                        purgeable_size += obj->base.size;
                        ++purgeable_count;
                }
+
                if (obj->mapping) {
-                       pin_mapped_count++;
-                       pin_mapped_size += obj->base.size;
-                       if (obj->pages_pin_count == 0) {
-                               pin_mapped_purgeable_count++;
-                               pin_mapped_purgeable_size += obj->base.size;
-                       }
+                       mapped_count++;
+                       mapped_size += obj->base.size;
                }
        }
+       seq_printf(m, "%u bound objects, %llu bytes\n",
+                  count, size);
        seq_printf(m, "%u purgeable objects, %llu bytes\n",
                   purgeable_count, purgeable_size);
-       seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
-                  mappable_count, mappable_size);
-       seq_printf(m, "%u fault mappable objects, %llu bytes\n",
-                  count, size);
-       seq_printf(m,
-                  "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
-                  pin_mapped_count, pin_mapped_purgeable_count,
-                  pin_mapped_size, pin_mapped_purgeable_size);
+       seq_printf(m, "%u mapped objects, %llu bytes\n",
+                  mapped_count, mapped_size);
+       seq_printf(m, "%u display objects (pinned), %llu bytes\n",
+                  dpy_count, dpy_size);
 
        seq_printf(m, "%llu [%llu] gtt total\n",
                   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
@@ -563,6 +465,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        print_context_stats(m, dev_priv);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct file_stats stats;
+               struct drm_i915_file_private *file_priv = file->driver_priv;
+               struct drm_i915_gem_request *request;
                struct task_struct *task;
 
                memset(&stats, 0, sizeof(stats));
@@ -576,10 +480,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                 * still alive (e.g. get_pid(current) => fork() => exit()).
                 * Therefore, we need to protect this ->comm access using RCU.
                 */
+               mutex_lock(&dev->struct_mutex);
+               request = list_first_entry_or_null(&file_priv->mm.request_list,
+                                                  struct drm_i915_gem_request,
+                                                  client_list);
                rcu_read_lock();
-               task = pid_task(file->pid, PIDTYPE_PID);
+               task = pid_task(request && request->ctx->pid ?
+                               request->ctx->pid : file->pid,
+                               PIDTYPE_PID);
                print_file_stats(m, task ? task->comm : "<unknown>", stats);
                rcu_read_unlock();
+               mutex_unlock(&dev->struct_mutex);
        }
        mutex_unlock(&dev->filelist_mutex);
 
@@ -589,9 +500,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 static int i915_gem_gtt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       uintptr_t list = (uintptr_t) node->info_ent->data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(node);
+       struct drm_device *dev = &dev_priv->drm;
+       bool show_pin_display_only = !!node->info_ent->data;
        struct drm_i915_gem_object *obj;
        u64 total_obj_size, total_gtt_size;
        int count, ret;
@@ -602,7 +513,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 
        total_obj_size = total_gtt_size = count = 0;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
+               if (show_pin_display_only && !obj->pin_display)
                        continue;
 
                seq_puts(m, "   ");
@@ -623,9 +534,8 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
        int ret;
 
@@ -672,7 +582,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                   intel_crtc_get_vblank_counter(crtc));
                        seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
-                       if (INTEL_INFO(dev)->gen >= 4)
+                       if (INTEL_GEN(dev_priv) >= 4)
                                addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
                        else
                                addr = I915_READ(DSPADDR(crtc->plane));
@@ -693,9 +603,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 
 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_i915_gem_object *obj;
        struct intel_engine_cs *engine;
        int total = 0;
@@ -738,9 +647,8 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
        struct drm_i915_gem_request *req;
        int ret, any;
@@ -754,21 +662,20 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
                int count;
 
                count = 0;
-               list_for_each_entry(req, &engine->request_list, list)
+               list_for_each_entry(req, &engine->request_list, link)
                        count++;
                if (count == 0)
                        continue;
 
                seq_printf(m, "%s requests: %d\n", engine->name, count);
-               list_for_each_entry(req, &engine->request_list, list) {
+               list_for_each_entry(req, &engine->request_list, link) {
+                       struct pid *pid = req->ctx->pid;
                        struct task_struct *task;
 
                        rcu_read_lock();
-                       task = NULL;
-                       if (req->pid)
-                               task = pid_task(req->pid, PIDTYPE_PID);
+                       task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
                        seq_printf(m, "    %x @ %d: %s [%d]\n",
-                                  req->seqno,
+                                  req->fence.seqno,
                                   (int) (jiffies - req->emitted_jiffies),
                                   task ? task->comm : "<unknown>",
                                   task ? task->pid : -1);
@@ -793,8 +700,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
 
        seq_printf(m, "Current sequence (%s): %x\n",
                   engine->name, intel_engine_get_seqno(engine));
-       seq_printf(m, "Current user interrupts (%s): %lx\n",
-                  engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
 
        spin_lock(&b->lock);
        for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -808,41 +713,25 @@ static void i915_ring_seqno_info(struct seq_file *m,
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_engine_cs *engine;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-       intel_runtime_pm_get(dev_priv);
 
        for_each_engine(engine, dev_priv)
                i915_ring_seqno_info(m, engine);
 
-       intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
-
        return 0;
 }
 
 
 static int i915_interrupt_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_engine_cs *engine;
-       int ret, i, pipe;
+       int i, pipe;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
        intel_runtime_pm_get(dev_priv);
 
-       if (IS_CHERRYVIEW(dev)) {
+       if (IS_CHERRYVIEW(dev_priv)) {
                seq_printf(m, "Master Interrupt Control:\t%08x\n",
                           I915_READ(GEN8_MASTER_IRQ));
 
@@ -881,7 +770,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                           I915_READ(GEN8_PCU_IIR));
                seq_printf(m, "PCU interrupt enable:\t%08x\n",
                           I915_READ(GEN8_PCU_IER));
-       } else if (INTEL_INFO(dev)->gen >= 8) {
+       } else if (INTEL_GEN(dev_priv) >= 8) {
                seq_printf(m, "Master Interrupt Control:\t%08x\n",
                           I915_READ(GEN8_MASTER_IRQ));
 
@@ -937,7 +826,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                           I915_READ(GEN8_PCU_IIR));
                seq_printf(m, "PCU interrupt enable:\t%08x\n",
                           I915_READ(GEN8_PCU_IER));
-       } else if (IS_VALLEYVIEW(dev)) {
+       } else if (IS_VALLEYVIEW(dev_priv)) {
                seq_printf(m, "Display IER:\t%08x\n",
                           I915_READ(VLV_IER));
                seq_printf(m, "Display IIR:\t%08x\n",
@@ -975,7 +864,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                seq_printf(m, "DPINVGTT:\t%08x\n",
                           I915_READ(DPINVGTT));
 
-       } else if (!HAS_PCH_SPLIT(dev)) {
+       } else if (!HAS_PCH_SPLIT(dev_priv)) {
                seq_printf(m, "Interrupt enable:    %08x\n",
                           I915_READ(IER));
                seq_printf(m, "Interrupt identity:  %08x\n",
@@ -1007,7 +896,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                           I915_READ(GTIMR));
        }
        for_each_engine(engine, dev_priv) {
-               if (INTEL_INFO(dev)->gen >= 6) {
+               if (INTEL_GEN(dev_priv) >= 6) {
                        seq_printf(m,
                                   "Graphics Interrupt mask (%s):       %08x\n",
                                   engine->name, I915_READ_IMR(engine));
@@ -1015,16 +904,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                i915_ring_seqno_info(m, engine);
        }
        intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
 
 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        int i, ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1033,14 +920,14 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 
        seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
-               struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
+               struct i915_vma *vma = dev_priv->fence_regs[i].vma;
 
                seq_printf(m, "Fence %d, pin count = %d, object = ",
                           i, dev_priv->fence_regs[i].pin_count);
-               if (obj == NULL)
+               if (!vma)
                        seq_puts(m, "unused");
                else
-                       describe_obj(m, obj);
+                       describe_obj(m, vma->obj);
                seq_putc(m, '\n');
        }
 
@@ -1051,8 +938,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 static int i915_hws_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(node);
        struct intel_engine_cs *engine;
        const u32 *hws;
        int i;
@@ -1077,33 +963,25 @@ i915_error_state_write(struct file *filp,
                       loff_t *ppos)
 {
        struct i915_error_state_file_priv *error_priv = filp->private_data;
-       struct drm_device *dev = error_priv->dev;
-       int ret;
 
        DRM_DEBUG_DRIVER("Resetting error state\n");
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       i915_destroy_error_state(dev);
-       mutex_unlock(&dev->struct_mutex);
+       i915_destroy_error_state(error_priv->dev);
 
        return cnt;
 }
 
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
        struct i915_error_state_file_priv *error_priv;
 
        error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
        if (!error_priv)
                return -ENOMEM;
 
-       error_priv->dev = dev;
+       error_priv->dev = &dev_priv->drm;
 
-       i915_error_state_get(dev, error_priv);
+       i915_error_state_get(&dev_priv->drm, error_priv);
 
        file->private_data = error_priv;
 
@@ -1129,7 +1007,8 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
        ssize_t ret_count = 0;
        int ret;
 
-       ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
+       ret = i915_error_state_buf_init(&error_str,
+                                       to_i915(error_priv->dev), count, *pos);
        if (ret)
                return ret;
 
@@ -1162,16 +1041,15 @@ static const struct file_operations i915_error_state_fops = {
 static int
 i915_next_seqno_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        int ret;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
        if (ret)
                return ret;
 
        *val = dev_priv->next_seqno;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
        return 0;
 }
@@ -1179,7 +1057,8 @@ i915_next_seqno_get(void *data, u64 *val)
 static int
 i915_next_seqno_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
+       struct drm_i915_private *dev_priv = data;
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1198,16 +1077,13 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
 
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        int ret = 0;
 
        intel_runtime_pm_get(dev_priv);
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-       if (IS_GEN5(dev)) {
+       if (IS_GEN5(dev_priv)) {
                u16 rgvswctl = I915_READ16(MEMSWCTL);
                u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
@@ -1217,7 +1093,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           MEMSTAT_VID_SHIFT);
                seq_printf(m, "Current P-state: %d\n",
                           (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-       } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                u32 freq_sts;
 
                mutex_lock(&dev_priv->rps.hw_lock);
@@ -1244,7 +1120,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           "efficient (RPe) frequency: %d MHz\n",
                           intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
                mutex_unlock(&dev_priv->rps.hw_lock);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                u32 rp_state_limits;
                u32 gt_perf_status;
                u32 rp_state_cap;
@@ -1256,7 +1132,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                int max_freq;
 
                rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-               if (IS_BROXTON(dev)) {
+               if (IS_BROXTON(dev_priv)) {
                        rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
                        gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
                } else {
@@ -1272,11 +1148,11 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
                reqf = I915_READ(GEN6_RPNSWREQ);
-               if (IS_GEN9(dev))
+               if (IS_GEN9(dev_priv))
                        reqf >>= 23;
                else {
                        reqf &= ~GEN6_TURBO_DISABLE;
-                       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+                       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                                reqf >>= 24;
                        else
                                reqf >>= 25;
@@ -1294,9 +1170,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
                rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
                rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
-               if (IS_GEN9(dev))
+               if (IS_GEN9(dev_priv))
                        cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
-               else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                        cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
                else
                        cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1305,7 +1181,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
                mutex_unlock(&dev->struct_mutex);
 
-               if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
                        pm_ier = I915_READ(GEN6_PMIER);
                        pm_imr = I915_READ(GEN6_PMIMR);
                        pm_isr = I915_READ(GEN6_PMISR);
@@ -1323,7 +1199,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
                seq_printf(m, "Render p-state ratio: %d\n",
-                          (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
+                          (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
                seq_printf(m, "Render p-state VID: %d\n",
                           gt_perf_status & 0xff);
                seq_printf(m, "Render p-state limit: %d\n",
@@ -1352,22 +1228,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "Down threshold: %d%%\n",
                           dev_priv->rps.down_threshold);
 
-               max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+               max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
-               max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
                             GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
 
                max_freq = (rp_state_cap & 0xff00) >> 8;
-               max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
                             GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
 
-               max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+               max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 :
                            rp_state_cap >> 0) & 0xff;
-               max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
                             GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
@@ -1381,6 +1257,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
                seq_printf(m, "Min freq: %d MHz\n",
                           intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+               seq_printf(m, "Boost freq: %d MHz\n",
+                          intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
                seq_printf(m, "Max freq: %d MHz\n",
                           intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
                seq_printf(m,
@@ -1401,9 +1279,7 @@ out:
 
 static int i915_hangcheck_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_engine_cs *engine;
        u64 acthd[I915_NUM_ENGINES];
        u32 seqno[I915_NUM_ENGINES];
@@ -1411,6 +1287,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
        enum intel_engine_id id;
        int j;
 
+       if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+               seq_printf(m, "Wedged\n");
+       if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
+               seq_printf(m, "Reset in progress\n");
+       if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
+               seq_printf(m, "Waiter holding struct mutex\n");
+       if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
+               seq_printf(m, "struct_mutex blocked for reset\n");
+
        if (!i915.enable_hangcheck) {
                seq_printf(m, "Hangcheck disabled\n");
                return 0;
@@ -1419,7 +1304,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
        intel_runtime_pm_get(dev_priv);
 
        for_each_engine_id(engine, dev_priv, id) {
-               acthd[id] = intel_ring_get_active_head(engine);
+               acthd[id] = intel_engine_get_active_head(engine);
                seqno[id] = intel_engine_get_seqno(engine);
        }
 
@@ -1440,11 +1325,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                           engine->hangcheck.seqno,
                           seqno[id],
                           engine->last_submitted_seqno);
-               seq_printf(m, "\twaiters? %d\n",
-                          intel_engine_has_waiter(engine));
-               seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
-                          engine->hangcheck.user_interrupts,
-                          READ_ONCE(engine->breadcrumbs.irq_wakeups));
+               seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
+                          yesno(intel_engine_has_waiter(engine)),
+                          yesno(test_bit(engine->id,
+                                         &dev_priv->gpu_error.missed_irq_rings)));
                seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
                           (long long)engine->hangcheck.acthd,
                           (long long)acthd[id]);
@@ -1472,9 +1356,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
 static int ironlake_drpc_info(struct seq_file *m)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        u32 rgvmodectl, rstdbyctl;
        u16 crstandvid;
        int ret;
@@ -1540,9 +1423,7 @@ static int ironlake_drpc_info(struct seq_file *m)
 
 static int i915_forcewake_domains(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_uncore_forcewake_domain *fw_domain;
 
        spin_lock_irq(&dev_priv->uncore.lock);
@@ -1558,9 +1439,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
 
 static int vlv_drpc_info(struct seq_file *m)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        u32 rpmodectl1, rcctl1, pw_status;
 
        intel_runtime_pm_get(dev_priv);
@@ -1598,10 +1477,10 @@ static int vlv_drpc_info(struct seq_file *m)
 
 static int gen6_drpc_info(struct seq_file *m)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+       u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
        unsigned forcewake_count;
        int count = 0, ret;
 
@@ -1629,6 +1508,10 @@ static int gen6_drpc_info(struct seq_file *m)
 
        rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
+       if (INTEL_GEN(dev_priv) >= 9) {
+               gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
+               gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
+       }
        mutex_unlock(&dev->struct_mutex);
        mutex_lock(&dev_priv->rps.hw_lock);
        sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -1647,6 +1530,12 @@ static int gen6_drpc_info(struct seq_file *m)
                   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
        seq_printf(m, "RC6 Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+       if (INTEL_GEN(dev_priv) >= 9) {
+               seq_printf(m, "Render Well Gating Enabled: %s\n",
+                       yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+               seq_printf(m, "Media Well Gating Enabled: %s\n",
+                       yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+       }
        seq_printf(m, "Deep RC6 Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
        seq_printf(m, "Deepest RC6 Enabled: %s\n",
@@ -1675,6 +1564,14 @@ static int gen6_drpc_info(struct seq_file *m)
 
        seq_printf(m, "Core Power Down: %s\n",
                   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+       if (INTEL_GEN(dev_priv) >= 9) {
+               seq_printf(m, "Render Power Well: %s\n",
+                       (gen9_powergate_status &
+                        GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
+               seq_printf(m, "Media Power Well: %s\n",
+                       (gen9_powergate_status &
+                        GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
+       }
 
        /* Not exactly sure what this is */
        seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
@@ -1692,17 +1589,16 @@ static int gen6_drpc_info(struct seq_file *m)
                   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
        seq_printf(m, "RC6++ voltage: %dmV\n",
                   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
-       return 0;
+       return i915_forcewake_domains(m, NULL);
 }
 
 static int i915_drpc_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                return vlv_drpc_info(m);
-       else if (INTEL_INFO(dev)->gen >= 6)
+       else if (INTEL_GEN(dev_priv) >= 6)
                return gen6_drpc_info(m);
        else
                return ironlake_drpc_info(m);
@@ -1710,9 +1606,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
 
 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
        seq_printf(m, "FB tracking busy bits: 0x%08x\n",
                   dev_priv->fb_tracking.busy_bits);
@@ -1725,11 +1619,9 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-       if (!HAS_FBC(dev)) {
+       if (!HAS_FBC(dev_priv)) {
                seq_puts(m, "FBC unsupported on this chipset\n");
                return 0;
        }
@@ -1743,7 +1635,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
                seq_printf(m, "FBC disabled: %s\n",
                           dev_priv->fbc.no_fbc_reason);
 
-       if (INTEL_INFO(dev_priv)->gen >= 7)
+       if (INTEL_GEN(dev_priv) >= 7)
                seq_printf(m, "Compressing: %s\n",
                           yesno(I915_READ(FBC_STATUS2) &
                                 FBC_COMPRESSION_MASK));
@@ -1756,10 +1648,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 
 static int i915_fbc_fc_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
-       if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+       if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
                return -ENODEV;
 
        *val = dev_priv->fbc.false_color;
@@ -1769,11 +1660,10 @@ static int i915_fbc_fc_get(void *data, u64 *val)
 
 static int i915_fbc_fc_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        u32 reg;
 
-       if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+       if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
                return -ENODEV;
 
        mutex_lock(&dev_priv->fbc.lock);
@@ -1795,11 +1685,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
 
 static int i915_ips_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-       if (!HAS_IPS(dev)) {
+       if (!HAS_IPS(dev_priv)) {
                seq_puts(m, "not supported\n");
                return 0;
        }
@@ -1809,7 +1697,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
        seq_printf(m, "Enabled by kernel parameter: %s\n",
                   yesno(i915.enable_ips));
 
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8) {
                seq_puts(m, "Currently: unknown\n");
        } else {
                if (I915_READ(IPS_CTL) & IPS_ENABLE)
@@ -1825,23 +1713,21 @@ static int i915_ips_status(struct seq_file *m, void *unused)
 
 static int i915_sr_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        bool sr_enabled = false;
 
        intel_runtime_pm_get(dev_priv);
 
-       if (HAS_PCH_SPLIT(dev))
+       if (HAS_PCH_SPLIT(dev_priv))
                sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-       else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
-                IS_I945G(dev) || IS_I945GM(dev))
+       else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) ||
+                IS_I945G(dev_priv) || IS_I945GM(dev_priv))
                sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
-       else if (IS_I915GM(dev))
+       else if (IS_I915GM(dev_priv))
                sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
-       else if (IS_PINEVIEW(dev))
+       else if (IS_PINEVIEW(dev_priv))
                sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
-       else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 
        intel_runtime_pm_put(dev_priv);
@@ -1854,13 +1740,12 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 
 static int i915_emon_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        unsigned long temp, chipset, gfx;
        int ret;
 
-       if (!IS_GEN5(dev))
+       if (!IS_GEN5(dev_priv))
                return -ENODEV;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1882,27 +1767,23 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 
 static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        int ret = 0;
        int gpu_freq, ia_freq;
        unsigned int max_gpu_freq, min_gpu_freq;
 
-       if (!HAS_CORE_RING_FREQ(dev)) {
+       if (!HAS_LLC(dev_priv)) {
                seq_puts(m, "unsupported on this chipset\n");
                return 0;
        }
 
        intel_runtime_pm_get(dev_priv);
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                goto out;
 
-       if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                /* Convert GT frequency to 50 HZ units */
                min_gpu_freq =
                        dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1922,7 +1803,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                                       &ia_freq);
                seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
                           intel_gpu_freq(dev_priv, (gpu_freq *
-                               (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+                               (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
                                 GEN9_FREQ_SCALER : 1))),
                           ((ia_freq >> 0) & 0xff) * 100,
                           ((ia_freq >> 8) & 0xff) * 100);
@@ -1937,9 +1818,8 @@ out:
 
 static int i915_opregion(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_opregion *opregion = &dev_priv->opregion;
        int ret;
 
@@ -1958,10 +1838,7 @@ out:
 
 static int i915_vbt(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_opregion *opregion = &dev_priv->opregion;
+       struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 
        if (opregion->vbt)
                seq_write(m, opregion->vbt, opregion->vbt_size);
@@ -1971,8 +1848,8 @@ static int i915_vbt(struct seq_file *m, void *unused)
 
 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_framebuffer *fbdev_fb = NULL;
        struct drm_framebuffer *drm_fb;
        int ret;
@@ -1982,8 +1859,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                return ret;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-       if (to_i915(dev)->fbdev) {
-               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+       if (dev_priv->fbdev) {
+               fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
 
                seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
                           fbdev_fb->base.width,
@@ -2019,19 +1896,17 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
        return 0;
 }
 
-static void describe_ctx_ringbuf(struct seq_file *m,
-                                struct intel_ringbuffer *ringbuf)
+static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 {
        seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
-                  ringbuf->space, ringbuf->head, ringbuf->tail,
-                  ringbuf->last_retired_head);
+                  ring->space, ring->head, ring->tail,
+                  ring->last_retired_head);
 }
 
 static int i915_context_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        int ret;
@@ -2042,18 +1917,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
        list_for_each_entry(ctx, &dev_priv->context_list, link) {
                seq_printf(m, "HW context %u ", ctx->hw_id);
-               if (IS_ERR(ctx->file_priv)) {
-                       seq_puts(m, "(deleted) ");
-               } else if (ctx->file_priv) {
-                       struct pid *pid = ctx->file_priv->file->pid;
+               if (ctx->pid) {
                        struct task_struct *task;
 
-                       task = get_pid_task(pid, PIDTYPE_PID);
+                       task = get_pid_task(ctx->pid, PIDTYPE_PID);
                        if (task) {
                                seq_printf(m, "(%s [%d]) ",
                                           task->comm, task->pid);
                                put_task_struct(task);
                        }
+               } else if (IS_ERR(ctx->file_priv)) {
+                       seq_puts(m, "(deleted) ");
                } else {
                        seq_puts(m, "(kernel) ");
                }
@@ -2067,9 +1941,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
                        seq_printf(m, "%s: ", engine->name);
                        seq_putc(m, ce->initialised ? 'I' : 'i');
                        if (ce->state)
-                               describe_obj(m, ce->state);
-                       if (ce->ringbuf)
-                               describe_ctx_ringbuf(m, ce->ringbuf);
+                               describe_obj(m, ce->state->obj);
+                       if (ce->ring)
+                               describe_ctx_ring(m, ce->ring);
                        seq_putc(m, '\n');
                }
 
@@ -2085,36 +1959,34 @@ static void i915_dump_lrc_obj(struct seq_file *m,
                              struct i915_gem_context *ctx,
                              struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+       struct i915_vma *vma = ctx->engine[engine->id].state;
        struct page *page;
-       uint32_t *reg_state;
        int j;
-       unsigned long ggtt_offset = 0;
 
        seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
 
-       if (ctx_obj == NULL) {
-               seq_puts(m, "\tNot allocated\n");
+       if (!vma) {
+               seq_puts(m, "\tFake context\n");
                return;
        }
 
-       if (!i915_gem_obj_ggtt_bound(ctx_obj))
-               seq_puts(m, "\tNot bound in GGTT\n");
-       else
-               ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+       if (vma->flags & I915_VMA_GLOBAL_BIND)
+               seq_printf(m, "\tBound in GGTT at 0x%08x\n",
+                          i915_ggtt_offset(vma));
 
-       if (i915_gem_object_get_pages(ctx_obj)) {
-               seq_puts(m, "\tFailed to get pages for context object\n");
+       if (i915_gem_object_get_pages(vma->obj)) {
+               seq_puts(m, "\tFailed to get pages for context object\n\n");
                return;
        }
 
-       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       if (!WARN_ON(page == NULL)) {
-               reg_state = kmap_atomic(page);
+       page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
+       if (page) {
+               u32 *reg_state = kmap_atomic(page);
 
                for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
-                       seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-                                  ggtt_offset + 4096 + (j * 4),
+                       seq_printf(m,
+                                  "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                                  j * 4,
                                   reg_state[j], reg_state[j + 1],
                                   reg_state[j + 2], reg_state[j + 3]);
                }
@@ -2126,9 +1998,8 @@ static void i915_dump_lrc_obj(struct seq_file *m,
 
 static int i915_dump_lrc(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        int ret;
@@ -2153,9 +2024,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 
 static int i915_execlists(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = (struct drm_info_node *)m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
        u32 status_pointer;
        u8 read_pointer;
@@ -2190,7 +2060,7 @@ static int i915_execlists(struct seq_file *m, void *data)
                status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
                seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
-               read_pointer = engine->next_context_status_buffer;
+               read_pointer = GEN8_CSB_READ_PTR(status_pointer);
                write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
                if (read_pointer > write_pointer)
                        write_pointer += GEN8_CSB_ENTRIES;
@@ -2256,9 +2126,8 @@ static const char *swizzle_string(unsigned swizzle)
 
 static int i915_swizzle_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2271,7 +2140,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
                   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
 
-       if (IS_GEN3(dev) || IS_GEN4(dev)) {
+       if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
                seq_printf(m, "DDC = 0x%08x\n",
                           I915_READ(DCC));
                seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2280,7 +2149,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                           I915_READ16(C0DRB3));
                seq_printf(m, "C1DRB3 = 0x%04x\n",
                           I915_READ16(C1DRB3));
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
                           I915_READ(MAD_DIMM_C0));
                seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -2289,7 +2158,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                           I915_READ(MAD_DIMM_C2));
                seq_printf(m, "TILECTL = 0x%08x\n",
                           I915_READ(TILECTL));
-               if (INTEL_INFO(dev)->gen >= 8)
+               if (INTEL_GEN(dev_priv) >= 8)
                        seq_printf(m, "GAMTARBMODE = 0x%08x\n",
                                   I915_READ(GAMTARBMODE));
                else
@@ -2329,9 +2198,9 @@ static int per_file_ctx(int id, void *ptr, void *data)
        return 0;
 }
 
-static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen8_ppgtt_info(struct seq_file *m,
+                           struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
        int i;
@@ -2350,9 +2219,9 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
        }
 }
 
-static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen6_ppgtt_info(struct seq_file *m,
+                           struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        if (IS_GEN6(dev_priv))
@@ -2384,22 +2253,23 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
 static int i915_ppgtt_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_file *file;
+       int ret;
 
-       int ret = mutex_lock_interruptible(&dev->struct_mutex);
+       mutex_lock(&dev->filelist_mutex);
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
-               return ret;
+               goto out_unlock;
+
        intel_runtime_pm_get(dev_priv);
 
-       if (INTEL_INFO(dev)->gen >= 8)
-               gen8_ppgtt_info(m, dev);
-       else if (INTEL_INFO(dev)->gen >= 6)
-               gen6_ppgtt_info(m, dev);
+       if (INTEL_GEN(dev_priv) >= 8)
+               gen8_ppgtt_info(m, dev_priv);
+       else if (INTEL_GEN(dev_priv) >= 6)
+               gen6_ppgtt_info(m, dev_priv);
 
-       mutex_lock(&dev->filelist_mutex);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct drm_i915_file_private *file_priv = file->driver_priv;
                struct task_struct *task;
@@ -2407,19 +2277,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
                task = get_pid_task(file->pid, PIDTYPE_PID);
                if (!task) {
                        ret = -ESRCH;
-                       goto out_unlock;
+                       goto out_rpm;
                }
                seq_printf(m, "\nproc: %s\n", task->comm);
                put_task_struct(task);
                idr_for_each(&file_priv->context_idr, per_file_ctx,
                             (void *)(unsigned long)m);
        }
-out_unlock:
-       mutex_unlock(&dev->filelist_mutex);
 
+out_rpm:
        intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
-
+out_unlock:
+       mutex_unlock(&dev->filelist_mutex);
        return ret;
 }
 
@@ -2434,23 +2304,41 @@ static int count_irq_waiters(struct drm_i915_private *i915)
        return count;
 }
 
+static const char *rps_power_to_str(unsigned int power)
+{
+       static const char * const strings[] = {
+               [LOW_POWER] = "low power",
+               [BETWEEN] = "mixed",
+               [HIGH_POWER] = "high power",
+       };
+
+       if (power >= ARRAY_SIZE(strings) || !strings[power])
+               return "unknown";
+
+       return strings[power];
+}
+
 static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_file *file;
 
        seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
        seq_printf(m, "GPU busy? %s [%x]\n",
                   yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
        seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
-       seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
-                  intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+       seq_printf(m, "Frequency requested %d\n",
+                  intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+       seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
                   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
                   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
                   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
                   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+       seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
+                  intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+                  intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                  intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
 
        mutex_lock(&dev->filelist_mutex);
        spin_lock(&dev_priv->rps.client_lock);
@@ -2467,27 +2355,44 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
                           list_empty(&file_priv->rps.link) ? "" : ", active");
                rcu_read_unlock();
        }
-       seq_printf(m, "Semaphore boosts: %d%s\n",
-                  dev_priv->rps.semaphores.boosts,
-                  list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
-       seq_printf(m, "MMIO flip boosts: %d%s\n",
-                  dev_priv->rps.mmioflips.boosts,
-                  list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
-       seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
+       seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
        spin_unlock(&dev_priv->rps.client_lock);
        mutex_unlock(&dev->filelist_mutex);
 
+       if (INTEL_GEN(dev_priv) >= 6 &&
+           dev_priv->rps.enabled &&
+           dev_priv->gt.active_engines) {
+               u32 rpup, rpupei;
+               u32 rpdown, rpdownei;
+
+               intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+               rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
+               rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
+               rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
+               rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
+               intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+               seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
+                          rps_power_to_str(dev_priv->rps.power));
+               seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
+                          100 * rpup / rpupei,
+                          dev_priv->rps.up_threshold);
+               seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
+                          100 * rpdown / rpdownei,
+                          dev_priv->rps.down_threshold);
+       } else {
+               seq_puts(m, "\nRPS Autotuning inactive\n");
+       }
+
        return 0;
 }
 
 static int i915_llc(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        const bool edram = INTEL_GEN(dev_priv) > 8;
 
-       seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+       seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
        seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
                   intel_uncore_edram_size(dev_priv)/1024/1024);
 
@@ -2496,8 +2401,7 @@ static int i915_llc(struct seq_file *m, void *data)
 
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        u32 tmp, i;
 
@@ -2543,6 +2447,7 @@ static void i915_guc_client_info(struct seq_file *m,
                                 struct i915_guc_client *client)
 {
        struct intel_engine_cs *engine;
+       enum intel_engine_id id;
        uint64_t tot = 0;
 
        seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
@@ -2553,27 +2458,26 @@ static void i915_guc_client_info(struct seq_file *m,
                client->wq_size, client->wq_offset, client->wq_tail);
 
        seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
-       seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
        seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
        seq_printf(m, "\tLast submission result: %d\n", client->retcode);
 
-       for_each_engine(engine, dev_priv) {
+       for_each_engine_id(engine, dev_priv, id) {
+               u64 submissions = client->submissions[id];
+               tot += submissions;
                seq_printf(m, "\tSubmissions: %llu %s\n",
-                               client->submissions[engine->id],
-                               engine->name);
-               tot += client->submissions[engine->id];
+                               submissions, engine->name);
        }
        seq_printf(m, "\tTotal: %llu\n", tot);
 }
 
 static int i915_guc_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_guc guc;
        struct i915_guc_client client = {};
        struct intel_engine_cs *engine;
+       enum intel_engine_id id;
        u64 total = 0;
 
        if (!HAS_GUC_SCHED(dev_priv))
@@ -2600,11 +2504,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
        seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
 
        seq_printf(m, "\nGuC submissions:\n");
-       for_each_engine(engine, dev_priv) {
+       for_each_engine_id(engine, dev_priv, id) {
+               u64 submissions = guc.submissions[id];
+               total += submissions;
                seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
-                       engine->name, guc.submissions[engine->id],
-                       guc.last_seqno[engine->id]);
-               total += guc.submissions[engine->id];
+                       engine->name, submissions, guc.last_seqno[id]);
        }
        seq_printf(m, "\t%s: %llu\n", "Total", total);
 
@@ -2618,18 +2522,16 @@ static int i915_guc_info(struct seq_file *m, void *data)
 
 static int i915_guc_log_dump(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
-       u32 *log;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_i915_gem_object *obj;
        int i = 0, pg;
 
-       if (!log_obj)
+       if (!dev_priv->guc.log_vma)
                return 0;
 
-       for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
-               log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
+       obj = dev_priv->guc.log_vma->obj;
+       for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
+               u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
 
                for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
                        seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -2646,15 +2548,13 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        u32 psrperf = 0;
        u32 stat[3];
        enum pipe pipe;
        bool enabled = false;
 
-       if (!HAS_PSR(dev)) {
+       if (!HAS_PSR(dev_priv)) {
                seq_puts(m, "PSR not supported\n");
                return 0;
        }
@@ -2671,7 +2571,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        seq_printf(m, "Re-enable work scheduled: %s\n",
                   yesno(work_busy(&dev_priv->psr.work.work)));
 
-       if (HAS_DDI(dev))
+       if (HAS_DDI(dev_priv))
                enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
        else {
                for_each_pipe(dev_priv, pipe) {
@@ -2688,7 +2588,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
        seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
 
-       if (!HAS_DDI(dev))
+       if (!HAS_DDI(dev_priv))
                for_each_pipe(dev_priv, pipe) {
                        if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
                            (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
@@ -2700,7 +2600,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
         * VLV/CHV PSR has no kind of performance counter
         * SKL+ Perf counter is reset to 0 everytime DC state is entered
         */
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                psrperf = I915_READ(EDP_PSR_PERF_CNT) &
                        EDP_PSR_PERF_CNT_MASK;
 
@@ -2714,8 +2614,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
 static int i915_sink_crc(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_connector *connector;
        struct intel_dp *intel_dp = NULL;
        int ret;
@@ -2754,13 +2654,11 @@ out:
 
 static int i915_energy_uJ(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        u64 power;
        u32 units;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
        intel_runtime_pm_get(dev_priv);
@@ -2780,9 +2678,8 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
 
 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        if (!HAS_RUNTIME_PM(dev_priv))
                seq_puts(m, "Runtime power management not supported\n");
@@ -2792,22 +2689,20 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
                   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
        seq_printf(m, "Usage count: %d\n",
-                  atomic_read(&dev->dev->power.usage_count));
+                  atomic_read(&dev_priv->drm.dev->power.usage_count));
 #else
        seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
 #endif
        seq_printf(m, "PCI device power state: %s [%d]\n",
-                  pci_power_name(dev_priv->drm.pdev->current_state),
-                  dev_priv->drm.pdev->current_state);
+                  pci_power_name(pdev->current_state),
+                  pdev->current_state);
 
        return 0;
 }
 
 static int i915_power_domain_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        int i;
 
@@ -2840,12 +2735,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
 
 static int i915_dmc_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_csr *csr;
 
-       if (!HAS_CSR(dev)) {
+       if (!HAS_CSR(dev_priv)) {
                seq_puts(m, "not supported\n");
                return 0;
        }
@@ -2863,12 +2756,12 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
        seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
                   CSR_VERSION_MINOR(csr->version));
 
-       if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
+       if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
                seq_printf(m, "DC3 -> DC5 count: %d\n",
                           I915_READ(SKL_CSR_DC3_DC5_COUNT));
                seq_printf(m, "DC5 -> DC6 count: %d\n",
                           I915_READ(SKL_CSR_DC5_DC6_COUNT));
-       } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
+       } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
                seq_printf(m, "DC3 -> DC5 count: %d\n",
                           I915_READ(BXT_CSR_DC3_DC5_COUNT));
        }
@@ -2905,8 +2798,8 @@ static void intel_encoder_info(struct seq_file *m,
                               struct intel_crtc *intel_crtc,
                               struct intel_encoder *intel_encoder)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = &intel_crtc->base;
        struct intel_connector *intel_connector;
        struct drm_encoder *encoder;
@@ -2932,8 +2825,8 @@ static void intel_encoder_info(struct seq_file *m,
 
 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = &intel_crtc->base;
        struct intel_encoder *intel_encoder;
        struct drm_plane_state *plane_state = crtc->primary->state;
@@ -2967,6 +2860,9 @@ static void intel_dp_info(struct seq_file *m,
        seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
        if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
                intel_panel_info(m, &intel_connector->panel);
+
+       drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+                               &intel_dp->aux);
 }
 
 static void intel_hdmi_info(struct seq_file *m,
@@ -3031,12 +2927,11 @@ static void intel_connector_info(struct seq_file *m,
                intel_seq_print_mode(m, 2, mode);
 }
 
-static bool cursor_active(struct drm_device *dev, int pipe)
+static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 state;
 
-       if (IS_845G(dev) || IS_I865G(dev))
+       if (IS_845G(dev_priv) || IS_I865G(dev_priv))
                state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
        else
                state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -3044,9 +2939,9 @@ static bool cursor_active(struct drm_device *dev, int pipe)
        return state;
 }
 
-static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+static bool cursor_position(struct drm_i915_private *dev_priv,
+                           int pipe, int *x, int *y)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pos;
 
        pos = I915_READ(CURPOS(pipe));
@@ -3059,7 +2954,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
        if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
                *y = -*y;
 
-       return cursor_active(dev, pipe);
+       return cursor_active(dev_priv, pipe);
 }
 
 static const char *plane_type(enum drm_plane_type type)
@@ -3089,12 +2984,12 @@ static const char *plane_rotation(unsigned int rotation)
         */
        snprintf(buf, sizeof(buf),
                 "%s%s%s%s%s%s(0x%08x)",
-                (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
-                (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
-                (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
-                (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
-                (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
-                (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
+                (rotation & DRM_ROTATE_0) ? "0 " : "",
+                (rotation & DRM_ROTATE_90) ? "90 " : "",
+                (rotation & DRM_ROTATE_180) ? "180 " : "",
+                (rotation & DRM_ROTATE_270) ? "270 " : "",
+                (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
+                (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
                 rotation);
 
        return buf;
@@ -3102,13 +2997,14 @@ static const char *plane_rotation(unsigned int rotation)
 
 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_plane *intel_plane;
 
        for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
                struct drm_plane_state *state;
                struct drm_plane *plane = &intel_plane->base;
+               char *format_name;
 
                if (!plane->state) {
                        seq_puts(m, "plane->state is NULL!\n");
@@ -3117,6 +3013,12 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 
                state = plane->state;
 
+               if (state->fb) {
+                       format_name = drm_get_format_name(state->fb->pixel_format);
+               } else {
+                       format_name = kstrdup("N/A", GFP_KERNEL);
+               }
+
                seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
                           plane->base.id,
                           plane_type(intel_plane->base.type),
@@ -3130,8 +3032,10 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
                           ((state->src_w & 0xffff) * 15625) >> 10,
                           (state->src_h >> 16),
                           ((state->src_h & 0xffff) * 15625) >> 10,
-                          state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
+                          format_name,
                           plane_rotation(state->rotation));
+
+               kfree(format_name);
        }
 }
 
@@ -3165,9 +3069,8 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 
 static int i915_display_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
        struct drm_connector *connector;
 
@@ -3191,7 +3094,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
                if (pipe_config->base.active) {
                        intel_crtc_info(m, crtc);
 
-                       active = cursor_position(dev, crtc->pipe, &x, &y);
+                       active = cursor_position(dev_priv, crtc->pipe, &x, &y);
                        seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
                                   yesno(crtc->cursor_base),
                                   x, y, crtc->base.cursor->state->crtc_w,
@@ -3220,15 +3123,14 @@ static int i915_display_info(struct seq_file *m, void *unused)
 
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
-       int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       int num_rings = INTEL_INFO(dev_priv)->num_rings;
        enum intel_engine_id id;
        int j, ret;
 
-       if (!i915_semaphore_is_enabled(dev_priv)) {
+       if (!i915.semaphores) {
                seq_puts(m, "Semaphores are disabled\n");
                return 0;
        }
@@ -3238,11 +3140,11 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
                return ret;
        intel_runtime_pm_get(dev_priv);
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev_priv)) {
                struct page *page;
                uint64_t *seqno;
 
-               page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+               page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
 
                seqno = (uint64_t *)kmap_atomic(page);
                for_each_engine_id(engine, dev_priv, id) {
@@ -3293,9 +3195,8 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 
 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        int i;
 
        drm_modeset_lock_all(dev);
@@ -3323,9 +3224,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
        int i;
        int ret;
        struct intel_engine_cs *engine;
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_workarounds *workarounds = &dev_priv->workarounds;
        enum intel_engine_id id;
 
@@ -3361,15 +3261,14 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 
 static int i915_ddb_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct skl_ddb_allocation *ddb;
        struct skl_ddb_entry *entry;
        enum pipe pipe;
        int plane;
 
-       if (INTEL_INFO(dev)->gen < 9)
+       if (INTEL_GEN(dev_priv) < 9)
                return 0;
 
        drm_modeset_lock_all(dev);
@@ -3399,7 +3298,8 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 }
 
 static void drrs_status_per_crtc(struct seq_file *m,
-               struct drm_device *dev, struct intel_crtc *intel_crtc)
+                                struct drm_device *dev,
+                                struct intel_crtc *intel_crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_drrs *drrs = &dev_priv->drrs;
@@ -3468,8 +3368,8 @@ static void drrs_status_per_crtc(struct seq_file *m,
 
 static int i915_drrs_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *intel_crtc;
        int active_crtc_cnt = 0;
 
@@ -3492,14 +3392,14 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
 
 struct pipe_crc_info {
        const char *name;
-       struct drm_device *dev;
+       struct drm_i915_private *dev_priv;
        enum pipe pipe;
 };
 
 static int i915_dp_mst_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *intel_encoder;
        struct intel_digital_port *intel_dig_port;
        struct drm_connector *connector;
@@ -3528,10 +3428,10 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 {
        struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = to_i915(info->dev);
+       struct drm_i915_private *dev_priv = info->dev_priv;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
-       if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+       if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
                return -ENODEV;
 
        spin_lock_irq(&pipe_crc->lock);
@@ -3552,7 +3452,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
 {
        struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = to_i915(info->dev);
+       struct drm_i915_private *dev_priv = info->dev_priv;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
        spin_lock_irq(&pipe_crc->lock);
@@ -3579,8 +3479,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
                   loff_t *pos)
 {
        struct pipe_crc_info *info = filep->private_data;
-       struct drm_device *dev = info->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = info->dev_priv;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
        char buf[PIPE_CRC_BUFFER_LEN];
        int n_entries;
@@ -3621,7 +3520,6 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
        while (n_entries > 0) {
                struct intel_pipe_crc_entry *entry =
                        &pipe_crc->entries[pipe_crc->tail];
-               int ret;
 
                if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
                             INTEL_PIPE_CRC_ENTRIES_NR) < 1)
@@ -3638,8 +3536,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
 
                spin_unlock_irq(&pipe_crc->lock);
 
-               ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
-               if (ret == PIPE_CRC_LINE_LEN)
+               if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
                        return -EFAULT;
 
                user_buf += PIPE_CRC_LINE_LEN;
@@ -3678,11 +3575,11 @@ static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
                                enum pipe pipe)
 {
-       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = to_i915(minor->dev);
        struct dentry *ent;
        struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
 
-       info->dev = dev;
+       info->dev_priv = dev_priv;
        ent = debugfs_create_file(info->name, S_IRUGO, root, info,
                                  &i915_pipe_crc_fops);
        if (!ent)
@@ -3712,8 +3609,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
 
 static int display_crc_ctl_show(struct seq_file *m, void *data)
 {
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        int i;
 
        for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3725,9 +3621,7 @@ static int display_crc_ctl_show(struct seq_file *m, void *data)
 
 static int display_crc_ctl_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
-
-       return single_open(file, display_crc_ctl_show, dev);
+       return single_open(file, display_crc_ctl_show, inode->i_private);
 }
 
 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -3750,9 +3644,11 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
        return 0;
 }
 
-static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe,
                                     enum intel_pipe_crc_source *source)
 {
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *encoder;
        struct intel_crtc *crtc;
        struct intel_digital_port *dig_port;
@@ -3802,16 +3698,15 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
        return ret;
 }
 
-static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                enum pipe pipe,
                                enum intel_pipe_crc_source *source,
                                uint32_t *val)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        bool need_stable_symbols = false;
 
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-               int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+               int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
                if (ret)
                        return ret;
        }
@@ -3829,7 +3724,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
                need_stable_symbols = true;
                break;
        case INTEL_PIPE_CRC_SOURCE_DP_D:
-               if (!IS_CHERRYVIEW(dev))
+               if (!IS_CHERRYVIEW(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
                need_stable_symbols = true;
@@ -3873,16 +3768,15 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
        return 0;
 }
 
-static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                 enum pipe pipe,
                                 enum intel_pipe_crc_source *source,
                                 uint32_t *val)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        bool need_stable_symbols = false;
 
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-               int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+               int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
                if (ret)
                        return ret;
        }
@@ -3892,24 +3786,24 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
                break;
        case INTEL_PIPE_CRC_SOURCE_TV:
-               if (!SUPPORTS_TV(dev))
+               if (!SUPPORTS_TV(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
                break;
        case INTEL_PIPE_CRC_SOURCE_DP_B:
-               if (!IS_G4X(dev))
+               if (!IS_G4X(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
                need_stable_symbols = true;
                break;
        case INTEL_PIPE_CRC_SOURCE_DP_C:
-               if (!IS_G4X(dev))
+               if (!IS_G4X(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
                need_stable_symbols = true;
                break;
        case INTEL_PIPE_CRC_SOURCE_DP_D:
-               if (!IS_G4X(dev))
+               if (!IS_G4X(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
                need_stable_symbols = true;
@@ -3933,7 +3827,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
        if (need_stable_symbols) {
                uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
-               WARN_ON(!IS_G4X(dev));
+               WARN_ON(!IS_G4X(dev_priv));
 
                I915_WRITE(PORT_DFT_I9XX,
                           I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
@@ -3949,10 +3843,9 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
        return 0;
 }
 
-static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
                                         enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
        switch (pipe) {
@@ -3974,10 +3867,9 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
 
 }
 
-static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
                                         enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
        if (pipe == PIPE_A)
@@ -4018,9 +3910,10 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
        return 0;
 }
 
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+                                       bool enable)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc =
                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
        struct intel_crtc_state *pipe_config;
@@ -4054,7 +3947,7 @@ out:
                drm_atomic_state_free(state);
 }
 
-static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                enum pipe pipe,
                                enum intel_pipe_crc_source *source,
                                uint32_t *val)
@@ -4070,8 +3963,8 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
                break;
        case INTEL_PIPE_CRC_SOURCE_PF:
-               if (IS_HASWELL(dev) && pipe == PIPE_A)
-                       hsw_trans_edp_pipe_A_crc_wa(dev, true);
+               if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+                       hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
 
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
                break;
@@ -4085,13 +3978,14 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
        return 0;
 }
 
-static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
+                              enum pipe pipe,
                               enum intel_pipe_crc_source source)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-       struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
-                                                                       pipe));
+       struct intel_crtc *crtc =
+                       to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
        enum intel_display_power_domain power_domain;
        u32 val = 0; /* shut up gcc */
        int ret;
@@ -4109,16 +4003,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                return -EIO;
        }
 
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                ret = i8xx_pipe_crc_ctl_reg(&source, &val);
-       else if (INTEL_INFO(dev)->gen < 5)
-               ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
-       else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
-       else if (IS_GEN5(dev) || IS_GEN6(dev))
+       else if (INTEL_GEN(dev_priv) < 5)
+               ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+       else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
                ret = ilk_pipe_crc_ctl_reg(&source, &val);
        else
-               ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+               ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
 
        if (ret != 0)
                goto out;
@@ -4182,12 +4076,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 
                kfree(entries);
 
-               if (IS_G4X(dev))
-                       g4x_undo_pipe_scramble_reset(dev, pipe);
-               else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-                       vlv_undo_pipe_scramble_reset(dev, pipe);
-               else if (IS_HASWELL(dev) && pipe == PIPE_A)
-                       hsw_trans_edp_pipe_A_crc_wa(dev, false);
+               if (IS_G4X(dev_priv))
+                       g4x_undo_pipe_scramble_reset(dev_priv, pipe);
+               else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                       vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+               else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+                       hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
 
                hsw_enable_ips(crtc);
        }
@@ -4291,7 +4185,8 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
        return -EINVAL;
 }
 
-static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
+                                char *buf, size_t len)
 {
 #define N_WORDS 3
        int n_words;
@@ -4322,14 +4217,14 @@ static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
                return -EINVAL;
        }
 
-       return pipe_crc_set_source(dev, pipe, source);
+       return pipe_crc_set_source(dev_priv, pipe, source);
 }
 
 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
                                     size_t len, loff_t *offp)
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = m->private;
        char *tmpbuf;
        int ret;
 
@@ -4352,7 +4247,7 @@ static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
        }
        tmpbuf[len] = '\0';
 
-       ret = display_crc_ctl_parse(dev, tmpbuf, len);
+       ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
 
 out:
        kfree(tmpbuf);
@@ -4373,8 +4268,8 @@ static const struct file_operations i915_display_crc_ctl_fops = {
 };
 
 static ssize_t i915_displayport_test_active_write(struct file *file,
-                                           const char __user *ubuf,
-                                           size_t len, loff_t *offp)
+                                                 const char __user *ubuf,
+                                                 size_t len, loff_t *offp)
 {
        char *input_buffer;
        int status = 0;
@@ -4404,7 +4299,6 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
        DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
 
        list_for_each_entry(connector, connector_list, head) {
-
                if (connector->connector_type !=
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
@@ -4442,7 +4336,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
        struct intel_dp *intel_dp;
 
        list_for_each_entry(connector, connector_list, head) {
-
                if (connector->connector_type !=
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
@@ -4462,11 +4355,12 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 }
 
 static int i915_displayport_test_active_open(struct inode *inode,
-                                      struct file *file)
+                                            struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       return single_open(file, i915_displayport_test_active_show, dev);
+       return single_open(file, i915_displayport_test_active_show,
+                          &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_active_fops = {
@@ -4486,7 +4380,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
        struct intel_dp *intel_dp;
 
        list_for_each_entry(connector, connector_list, head) {
-
                if (connector->connector_type !=
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
@@ -4502,11 +4395,12 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
        return 0;
 }
 static int i915_displayport_test_data_open(struct inode *inode,
-                                      struct file *file)
+                                          struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       return single_open(file, i915_displayport_test_data_show, dev);
+       return single_open(file, i915_displayport_test_data_show,
+                          &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_data_fops = {
@@ -4525,7 +4419,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
        struct intel_dp *intel_dp;
 
        list_for_each_entry(connector, connector_list, head) {
-
                if (connector->connector_type !=
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
@@ -4544,9 +4437,10 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 static int i915_displayport_test_type_open(struct inode *inode,
                                       struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       return single_open(file, i915_displayport_test_type_show, dev);
+       return single_open(file, i915_displayport_test_type_show,
+                          &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_type_fops = {
@@ -4559,13 +4453,14 @@ static const struct file_operations i915_displayport_test_type_fops = {
 
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
-       struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = m->private;
+       struct drm_device *dev = &dev_priv->drm;
        int level;
        int num_levels;
 
-       if (IS_CHERRYVIEW(dev))
+       if (IS_CHERRYVIEW(dev_priv))
                num_levels = 3;
-       else if (IS_VALLEYVIEW(dev))
+       else if (IS_VALLEYVIEW(dev_priv))
                num_levels = 1;
        else
                num_levels = ilk_wm_max_level(dev) + 1;
@@ -4579,8 +4474,8 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
                 * - WM1+ latency values in 0.5us units
                 * - latencies are in us on gen9/vlv/chv
                 */
-               if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
-                   IS_CHERRYVIEW(dev))
+               if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
+                   IS_CHERRYVIEW(dev_priv))
                        latency *= 10;
                else if (level > 0)
                        latency *= 5;
@@ -4594,14 +4489,13 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        const uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.pri_latency;
+               latencies = dev_priv->wm.pri_latency;
 
        wm_latency_show(m, latencies);
 
@@ -4610,14 +4504,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
 
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        const uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.spr_latency;
+               latencies = dev_priv->wm.spr_latency;
 
        wm_latency_show(m, latencies);
 
@@ -4626,14 +4519,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
 
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        const uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.cur_latency;
+               latencies = dev_priv->wm.cur_latency;
 
        wm_latency_show(m, latencies);
 
@@ -4642,48 +4534,49 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
 
 static int pri_wm_latency_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (INTEL_INFO(dev)->gen < 5)
+       if (INTEL_GEN(dev_priv) < 5)
                return -ENODEV;
 
-       return single_open(file, pri_wm_latency_show, dev);
+       return single_open(file, pri_wm_latency_show, dev_priv);
 }
 
 static int spr_wm_latency_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (HAS_GMCH_DISPLAY(dev))
+       if (HAS_GMCH_DISPLAY(dev_priv))
                return -ENODEV;
 
-       return single_open(file, spr_wm_latency_show, dev);
+       return single_open(file, spr_wm_latency_show, dev_priv);
 }
 
 static int cur_wm_latency_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (HAS_GMCH_DISPLAY(dev))
+       if (HAS_GMCH_DISPLAY(dev_priv))
                return -ENODEV;
 
-       return single_open(file, cur_wm_latency_show, dev);
+       return single_open(file, cur_wm_latency_show, dev_priv);
 }
 
 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
                                size_t len, loff_t *offp, uint16_t wm[8])
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = m->private;
+       struct drm_device *dev = &dev_priv->drm;
        uint16_t new[8] = { 0 };
        int num_levels;
        int level;
        int ret;
        char tmp[32];
 
-       if (IS_CHERRYVIEW(dev))
+       if (IS_CHERRYVIEW(dev_priv))
                num_levels = 3;
-       else if (IS_VALLEYVIEW(dev))
+       else if (IS_VALLEYVIEW(dev_priv))
                num_levels = 1;
        else
                num_levels = ilk_wm_max_level(dev) + 1;
@@ -4717,14 +4610,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
                                    size_t len, loff_t *offp)
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.pri_latency;
+               latencies = dev_priv->wm.pri_latency;
 
        return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4733,14 +4625,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
                                    size_t len, loff_t *offp)
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.spr_latency;
+               latencies = dev_priv->wm.spr_latency;
 
        return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4749,14 +4640,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
                                    size_t len, loff_t *offp)
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.cur_latency;
+               latencies = dev_priv->wm.cur_latency;
 
        return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4791,8 +4681,7 @@ static const struct file_operations i915_cur_wm_latency_fops = {
 static int
 i915_wedged_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        *val = i915_terminally_wedged(&dev_priv->gpu_error);
 
@@ -4802,8 +4691,7 @@ i915_wedged_get(void *data, u64 *val)
 static int
 i915_wedged_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        /*
         * There is no safeguard against this debugfs entry colliding
@@ -4833,8 +4721,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
 static int
 i915_ring_missed_irq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        *val = dev_priv->gpu_error.missed_irq_rings;
        return 0;
@@ -4843,8 +4730,8 @@ i915_ring_missed_irq_get(void *data, u64 *val)
 static int
 i915_ring_missed_irq_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        /* Lock against concurrent debugfs callers */
@@ -4864,8 +4751,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
 static int
 i915_ring_test_irq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        *val = dev_priv->gpu_error.test_irq_rings;
 
@@ -4875,8 +4761,7 @@ i915_ring_test_irq_get(void *data, u64 *val)
 static int
 i915_ring_test_irq_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        val &= INTEL_INFO(dev_priv)->ring_mask;
        DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
@@ -4908,8 +4793,8 @@ i915_drop_caches_get(void *data, u64 *val)
 static int
 i915_drop_caches_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4921,7 +4806,9 @@ i915_drop_caches_set(void *data, u64 val)
                return ret;
 
        if (val & DROP_ACTIVE) {
-               ret = i915_gem_wait_for_idle(dev_priv);
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE |
+                                            I915_WAIT_LOCKED);
                if (ret)
                        goto unlock;
        }
@@ -4948,38 +4835,25 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
 static int
 i915_max_freq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret;
+       struct drm_i915_private *dev_priv = data;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
-       if (ret)
-               return ret;
-
        *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-
        return 0;
 }
 
 static int
 i915_max_freq_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        u32 hw_max, hw_min;
        int ret;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
        DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -5015,38 +4889,25 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
 static int
 i915_min_freq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret;
+       struct drm_i915_private *dev_priv = data;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
-       if (ret)
-               return ret;
-
        *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-
        return 0;
 }
 
 static int
 i915_min_freq_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        u32 hw_max, hw_min;
        int ret;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
        DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -5061,7 +4922,8 @@ i915_min_freq_set(void *data, u64 val)
        hw_max = dev_priv->rps.max_freq;
        hw_min = dev_priv->rps.min_freq;
 
-       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+       if (val < hw_min ||
+           val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
@@ -5082,12 +4944,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
 static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
+       struct drm_device *dev = &dev_priv->drm;
        u32 snpcr;
        int ret;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+       if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
                return -ENODEV;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -5098,7 +4960,7 @@ i915_cache_sharing_get(void *data, u64 *val)
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
        intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
@@ -5108,11 +4970,10 @@ i915_cache_sharing_get(void *data, u64 *val)
 static int
 i915_cache_sharing_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        u32 snpcr;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+       if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
                return -ENODEV;
 
        if (val > 3)
@@ -5135,18 +4996,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
                        i915_cache_sharing_get, i915_cache_sharing_set,
                        "%llu\n");
 
-struct sseu_dev_status {
-       unsigned int slice_total;
-       unsigned int subslice_total;
-       unsigned int subslice_per_slice;
-       unsigned int eu_total;
-       unsigned int eu_per_subslice;
-};
-
-static void cherryview_sseu_device_status(struct drm_device *dev,
-                                         struct sseu_dev_status *stat)
+static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
+                                         struct sseu_dev_info *sseu)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        int ss_max = 2;
        int ss;
        u32 sig1[ss_max], sig2[ss_max];
@@ -5163,28 +5015,27 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
                        /* skip disabled subslice */
                        continue;
 
-               stat->slice_total = 1;
-               stat->subslice_per_slice++;
+               sseu->slice_mask = BIT(0);
+               sseu->subslice_mask |= BIT(ss);
                eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
                         ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
                         ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
                         ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
-               stat->eu_total += eu_cnt;
-               stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
+               sseu->eu_total += eu_cnt;
+               sseu->eu_per_subslice = max_t(unsigned int,
+                                             sseu->eu_per_subslice, eu_cnt);
        }
-       stat->subslice_total = stat->subslice_per_slice;
 }
 
-static void gen9_sseu_device_status(struct drm_device *dev,
-                                   struct sseu_dev_status *stat)
+static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
+                                   struct sseu_dev_info *sseu)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        int s_max = 3, ss_max = 4;
        int s, ss;
        u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
 
        /* BXT has a single slice and at most 3 subslices. */
-       if (IS_BROXTON(dev)) {
+       if (IS_BROXTON(dev_priv)) {
                s_max = 1;
                ss_max = 3;
        }
@@ -5205,126 +5056,134 @@ static void gen9_sseu_device_status(struct drm_device *dev,
                     GEN9_PGCTL_SSB_EU311_ACK;
 
        for (s = 0; s < s_max; s++) {
-               unsigned int ss_cnt = 0;
-
                if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
                        /* skip disabled slice */
                        continue;
 
-               stat->slice_total++;
+               sseu->slice_mask |= BIT(s);
 
-               if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-                       ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+               if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+                       sseu->subslice_mask =
+                               INTEL_INFO(dev_priv)->sseu.subslice_mask;
 
                for (ss = 0; ss < ss_max; ss++) {
                        unsigned int eu_cnt;
 
-                       if (IS_BROXTON(dev) &&
-                           !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
-                               /* skip disabled subslice */
-                               continue;
+                       if (IS_BROXTON(dev_priv)) {
+                               if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+                                       /* skip disabled subslice */
+                                       continue;
 
-                       if (IS_BROXTON(dev))
-                               ss_cnt++;
+                               sseu->subslice_mask |= BIT(ss);
+                       }
 
                        eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
                                               eu_mask[ss%2]);
-                       stat->eu_total += eu_cnt;
-                       stat->eu_per_subslice = max(stat->eu_per_subslice,
-                                                   eu_cnt);
+                       sseu->eu_total += eu_cnt;
+                       sseu->eu_per_subslice = max_t(unsigned int,
+                                                     sseu->eu_per_subslice,
+                                                     eu_cnt);
                }
-
-               stat->subslice_total += ss_cnt;
-               stat->subslice_per_slice = max(stat->subslice_per_slice,
-                                              ss_cnt);
        }
 }
 
-static void broadwell_sseu_device_status(struct drm_device *dev,
-                                        struct sseu_dev_status *stat)
+static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
+                                        struct sseu_dev_info *sseu)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int s;
        u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
+       int s;
 
-       stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
+       sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
 
-       if (stat->slice_total) {
-               stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
-               stat->subslice_total = stat->slice_total *
-                                      stat->subslice_per_slice;
-               stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
-               stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
+       if (sseu->slice_mask) {
+               sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+               sseu->eu_per_subslice =
+                               INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+               sseu->eu_total = sseu->eu_per_subslice *
+                                sseu_subslice_total(sseu);
 
                /* subtract fused off EU(s) from enabled slice(s) */
-               for (s = 0; s < stat->slice_total; s++) {
-                       u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
+               for (s = 0; s < fls(sseu->slice_mask); s++) {
+                       u8 subslice_7eu =
+                               INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
 
-                       stat->eu_total -= hweight8(subslice_7eu);
+                       sseu->eu_total -= hweight8(subslice_7eu);
                }
        }
 }
 
-static int i915_sseu_status(struct seq_file *m, void *unused)
+static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
+                                const struct sseu_dev_info *sseu)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct sseu_dev_status stat;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       const char *type = is_available_info ? "Available" : "Enabled";
 
-       if (INTEL_INFO(dev)->gen < 8)
-               return -ENODEV;
+       seq_printf(m, "  %s Slice Mask: %04x\n", type,
+                  sseu->slice_mask);
+       seq_printf(m, "  %s Slice Total: %u\n", type,
+                  hweight8(sseu->slice_mask));
+       seq_printf(m, "  %s Subslice Total: %u\n", type,
+                  sseu_subslice_total(sseu));
+       seq_printf(m, "  %s Subslice Mask: %04x\n", type,
+                  sseu->subslice_mask);
+       seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
+                  hweight8(sseu->subslice_mask));
+       seq_printf(m, "  %s EU Total: %u\n", type,
+                  sseu->eu_total);
+       seq_printf(m, "  %s EU Per Subslice: %u\n", type,
+                  sseu->eu_per_subslice);
+
+       if (!is_available_info)
+               return;
+
+       seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
+       if (HAS_POOLED_EU(dev_priv))
+               seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
 
-       seq_puts(m, "SSEU Device Info\n");
-       seq_printf(m, "  Available Slice Total: %u\n",
-                  INTEL_INFO(dev)->slice_total);
-       seq_printf(m, "  Available Subslice Total: %u\n",
-                  INTEL_INFO(dev)->subslice_total);
-       seq_printf(m, "  Available Subslice Per Slice: %u\n",
-                  INTEL_INFO(dev)->subslice_per_slice);
-       seq_printf(m, "  Available EU Total: %u\n",
-                  INTEL_INFO(dev)->eu_total);
-       seq_printf(m, "  Available EU Per Subslice: %u\n",
-                  INTEL_INFO(dev)->eu_per_subslice);
-       seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
-       if (HAS_POOLED_EU(dev))
-               seq_printf(m, "  Min EU in pool: %u\n",
-                          INTEL_INFO(dev)->min_eu_in_pool);
        seq_printf(m, "  Has Slice Power Gating: %s\n",
-                  yesno(INTEL_INFO(dev)->has_slice_pg));
+                  yesno(sseu->has_slice_pg));
        seq_printf(m, "  Has Subslice Power Gating: %s\n",
-                  yesno(INTEL_INFO(dev)->has_subslice_pg));
+                  yesno(sseu->has_subslice_pg));
        seq_printf(m, "  Has EU Power Gating: %s\n",
-                  yesno(INTEL_INFO(dev)->has_eu_pg));
+                  yesno(sseu->has_eu_pg));
+}
+
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct sseu_dev_info sseu;
+
+       if (INTEL_GEN(dev_priv) < 8)
+               return -ENODEV;
+
+       seq_puts(m, "SSEU Device Info\n");
+       i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
 
        seq_puts(m, "SSEU Device Status\n");
-       memset(&stat, 0, sizeof(stat));
-       if (IS_CHERRYVIEW(dev)) {
-               cherryview_sseu_device_status(dev, &stat);
-       } else if (IS_BROADWELL(dev)) {
-               broadwell_sseu_device_status(dev, &stat);
-       } else if (INTEL_INFO(dev)->gen >= 9) {
-               gen9_sseu_device_status(dev, &stat);
-       }
-       seq_printf(m, "  Enabled Slice Total: %u\n",
-                  stat.slice_total);
-       seq_printf(m, "  Enabled Subslice Total: %u\n",
-                  stat.subslice_total);
-       seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
-                  stat.subslice_per_slice);
-       seq_printf(m, "  Enabled EU Total: %u\n",
-                  stat.eu_total);
-       seq_printf(m, "  Enabled EU Per Subslice: %u\n",
-                  stat.eu_per_subslice);
+       memset(&sseu, 0, sizeof(sseu));
+
+       intel_runtime_pm_get(dev_priv);
+
+       if (IS_CHERRYVIEW(dev_priv)) {
+               cherryview_sseu_device_status(dev_priv, &sseu);
+       } else if (IS_BROADWELL(dev_priv)) {
+               broadwell_sseu_device_status(dev_priv, &sseu);
+       } else if (INTEL_GEN(dev_priv) >= 9) {
+               gen9_sseu_device_status(dev_priv, &sseu);
+       }
+
+       intel_runtime_pm_put(dev_priv);
+
+       i915_print_sseu_info(m, false, &sseu);
 
        return 0;
 }
 
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return 0;
 
        intel_runtime_pm_get(dev_priv);
@@ -5335,10 +5194,9 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
 
 static int i915_forcewake_release(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return 0;
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5355,12 +5213,11 @@ static const struct file_operations i915_forcewake_fops = {
 
 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
        struct dentry *ent;
 
        ent = debugfs_create_file("i915_forcewake_user",
                                  S_IRUSR,
-                                 root, dev,
+                                 root, to_i915(minor->dev),
                                  &i915_forcewake_fops);
        if (!ent)
                return -ENOMEM;
@@ -5373,12 +5230,11 @@ static int i915_debugfs_create(struct dentry *root,
                               const char *name,
                               const struct file_operations *fops)
 {
-       struct drm_device *dev = minor->dev;
        struct dentry *ent;
 
        ent = debugfs_create_file(name,
                                  S_IRUGO | S_IWUSR,
-                                 root, dev,
+                                 root, to_i915(minor->dev),
                                  fops);
        if (!ent)
                return -ENOMEM;
@@ -5390,9 +5246,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_capabilities", i915_capabilities, 0},
        {"i915_gem_objects", i915_gem_object_info, 0},
        {"i915_gem_gtt", i915_gem_gtt_info, 0},
-       {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
-       {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-       {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+       {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
        {"i915_gem_stolen", i915_gem_stolen_list_info },
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
@@ -5467,9 +5321,8 @@ static const struct i915_debugfs_files {
        {"i915_dp_test_active", &i915_displayport_test_active_fops}
 };
 
-void intel_display_crc_init(struct drm_device *dev)
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe) {
@@ -5517,7 +5370,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
        drm_debugfs_remove_files(i915_debugfs_list,
                                 I915_DEBUGFS_ENTRIES, minor);
 
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
+       drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
                                 1, minor);
 
        for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
@@ -5529,7 +5382,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
 
        for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
                struct drm_info_list *info_list =
-                       (struct drm_info_list *) i915_debugfs_files[i].fops;
+                       (struct drm_info_list *)i915_debugfs_files[i].fops;
 
                drm_debugfs_remove_files(info_list, 1, minor);
        }
@@ -5609,6 +5462,40 @@ static const struct file_operations i915_dpcd_fops = {
        .release = single_release,
 };
 
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct intel_dp *intel_dp =
+               enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+
+       if (connector->status != connector_status_connected)
+               return -ENODEV;
+
+       seq_printf(m, "Panel power up delay: %d\n",
+                  intel_dp->panel_power_up_delay);
+       seq_printf(m, "Panel power down delay: %d\n",
+                  intel_dp->panel_power_down_delay);
+       seq_printf(m, "Backlight on delay: %d\n",
+                  intel_dp->backlight_on_delay);
+       seq_printf(m, "Backlight off delay: %d\n",
+                  intel_dp->backlight_off_delay);
+
+       return 0;
+}
+
+static int i915_panel_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, i915_panel_show, inode->i_private);
+}
+
+static const struct file_operations i915_panel_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_panel_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -5628,8 +5515,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
 
        if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
            connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-               debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
-                                   &i915_dpcd_fops);
+               debugfs_create_file("i915_dpcd", S_IRUGO, root,
+                                   connector, &i915_dpcd_fops);
+
+       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+               debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+                                   connector, &i915_panel_fops);
 
        return 0;
 }
index 95ddd56b89f08954a4c767e149dad2d427a5de27..7f4e8adec8a81bd5c21f5ac491090ab899abadb3 100644 (file)
@@ -77,7 +77,7 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
              const char *fmt, ...)
 {
        static bool shown_bug_once;
-       struct device *dev = dev_priv->drm.dev;
+       struct device *kdev = dev_priv->drm.dev;
        bool is_error = level[1] <= KERN_ERR[1];
        bool is_debug = level[1] == KERN_DEBUG[1];
        struct va_format vaf;
@@ -91,11 +91,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+       dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
                   __builtin_return_address(0), &vaf);
 
        if (is_error && !shown_bug_once) {
-               dev_notice(dev, "%s", FDO_BUG_MSG);
+               dev_notice(kdev, "%s", FDO_BUG_MSG);
                shown_bug_once = true;
        }
 
@@ -228,31 +228,11 @@ static void intel_detect_pch(struct drm_device *dev)
        pci_dev_put(pch);
 }
 
-bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_GEN(dev_priv) < 6)
-               return false;
-
-       if (i915.semaphores >= 0)
-               return i915.semaphores;
-
-       /* TODO: make semaphores and Execlists play nicely together */
-       if (i915.enable_execlists)
-               return false;
-
-#ifdef CONFIG_INTEL_IOMMU
-       /* Enable semaphores on SNB when IO remapping is off */
-       if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
-               return false;
-#endif
-
-       return true;
-}
-
 static int i915_getparam(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        drm_i915_getparam_t *param = data;
        int value;
 
@@ -263,13 +243,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
                /* Reject all old ums/dri params. */
                return -ENODEV;
        case I915_PARAM_CHIPSET_ID:
-               value = dev->pdev->device;
+               value = pdev->device;
                break;
        case I915_PARAM_REVISION:
-               value = dev->pdev->revision;
-               break;
-       case I915_PARAM_HAS_GEM:
-               value = 1;
+               value = pdev->revision;
                break;
        case I915_PARAM_NUM_FENCES_AVAIL:
                value = dev_priv->num_fence_regs;
@@ -277,13 +254,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_OVERLAY:
                value = dev_priv->overlay ? 1 : 0;
                break;
-       case I915_PARAM_HAS_PAGEFLIPPING:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXECBUF2:
-               /* depends on GEM */
-               value = 1;
-               break;
        case I915_PARAM_HAS_BSD:
                value = intel_engine_initialized(&dev_priv->engine[VCS]);
                break;
@@ -296,67 +266,34 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_BSD2:
                value = intel_engine_initialized(&dev_priv->engine[VCS2]);
                break;
-       case I915_PARAM_HAS_RELAXED_FENCING:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_COHERENT_RINGS:
-               value = 1;
-               break;
        case I915_PARAM_HAS_EXEC_CONSTANTS:
-               value = INTEL_INFO(dev)->gen >= 4;
-               break;
-       case I915_PARAM_HAS_RELAXED_DELTA:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_GEN7_SOL_RESET:
-               value = 1;
+               value = INTEL_GEN(dev_priv) >= 4;
                break;
        case I915_PARAM_HAS_LLC:
-               value = HAS_LLC(dev);
+               value = HAS_LLC(dev_priv);
                break;
        case I915_PARAM_HAS_WT:
-               value = HAS_WT(dev);
+               value = HAS_WT(dev_priv);
                break;
        case I915_PARAM_HAS_ALIASING_PPGTT:
-               value = USES_PPGTT(dev);
-               break;
-       case I915_PARAM_HAS_WAIT_TIMEOUT:
-               value = 1;
+               value = USES_PPGTT(dev_priv);
                break;
        case I915_PARAM_HAS_SEMAPHORES:
-               value = i915_semaphore_is_enabled(dev_priv);
-               break;
-       case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
-               value = 1;
+               value = i915.semaphores;
                break;
        case I915_PARAM_HAS_SECURE_BATCHES:
                value = capable(CAP_SYS_ADMIN);
                break;
-       case I915_PARAM_HAS_PINNED_BATCHES:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_NO_RELOC:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_HANDLE_LUT:
-               value = 1;
-               break;
        case I915_PARAM_CMD_PARSER_VERSION:
                value = i915_cmd_parser_get_version(dev_priv);
                break;
-       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
-               value = 1;
-               break;
-       case I915_PARAM_MMAP_VERSION:
-               value = 1;
-               break;
        case I915_PARAM_SUBSLICE_TOTAL:
-               value = INTEL_INFO(dev)->subslice_total;
+               value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_EU_TOTAL:
-               value = INTEL_INFO(dev)->eu_total;
+               value = INTEL_INFO(dev_priv)->sseu.eu_total;
                if (!value)
                        return -ENODEV;
                break;
@@ -364,16 +301,43 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
                break;
        case I915_PARAM_HAS_RESOURCE_STREAMER:
-               value = HAS_RESOURCE_STREAMER(dev);
-               break;
-       case I915_PARAM_HAS_EXEC_SOFTPIN:
-               value = 1;
+               value = HAS_RESOURCE_STREAMER(dev_priv);
                break;
        case I915_PARAM_HAS_POOLED_EU:
-               value = HAS_POOLED_EU(dev);
+               value = HAS_POOLED_EU(dev_priv);
                break;
        case I915_PARAM_MIN_EU_IN_POOL:
-               value = INTEL_INFO(dev)->min_eu_in_pool;
+               value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+               break;
+       case I915_PARAM_MMAP_GTT_VERSION:
+               /* Though we've started our numbering from 1, and so class all
+                * earlier versions as 0, in effect their value is undefined as
+                * the ioctl will report EINVAL for the unknown param!
+                */
+               value = i915_gem_mmap_gtt_version();
+               break;
+       case I915_PARAM_MMAP_VERSION:
+               /* Remember to bump this if the version changes! */
+       case I915_PARAM_HAS_GEM:
+       case I915_PARAM_HAS_PAGEFLIPPING:
+       case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
+       case I915_PARAM_HAS_RELAXED_FENCING:
+       case I915_PARAM_HAS_COHERENT_RINGS:
+       case I915_PARAM_HAS_RELAXED_DELTA:
+       case I915_PARAM_HAS_GEN7_SOL_RESET:
+       case I915_PARAM_HAS_WAIT_TIMEOUT:
+       case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+       case I915_PARAM_HAS_PINNED_BATCHES:
+       case I915_PARAM_HAS_EXEC_NO_RELOC:
+       case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+       case I915_PARAM_HAS_EXEC_SOFTPIN:
+               /* For the time being all of these are always true;
+                * if some supported hardware does not have one of these
+                * features this value needs to be provided from
+                * INTEL_INFO(), a feature macro, or similar.
+                */
+               value = 1;
                break;
        default:
                DRM_DEBUG("Unknown parameter %d\n", param->param);
@@ -537,7 +501,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
                pr_info("switched on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                /* i915 resume handler doesn't set to D0 */
-               pci_set_power_state(dev->pdev, PCI_D0);
+               pci_set_power_state(pdev, PCI_D0);
                i915_resume_switcheroo(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
@@ -595,7 +559,6 @@ static void i915_gem_fini(struct drm_device *dev)
        }
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_reset(dev);
        i915_gem_cleanup_engines(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
@@ -606,6 +569,7 @@ static void i915_gem_fini(struct drm_device *dev)
 static int i915_load_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
 
        if (i915_inject_load_failure())
@@ -622,13 +586,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
         * then we do not take part in VGA arbitration and the
         * vga_client_register() fails with -ENODEV.
         */
-       ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+       ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
        if (ret && ret != -ENODEV)
                goto out;
 
        intel_register_dsm_handler();
 
-       ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+       ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
        if (ret)
                goto cleanup_vga_client;
 
@@ -680,9 +644,9 @@ cleanup_irq:
 cleanup_csr:
        intel_csr_ucode_fini(dev_priv);
        intel_power_domains_fini(dev_priv);
-       vga_switcheroo_unregister_client(dev->pdev);
+       vga_switcheroo_unregister_client(pdev);
 cleanup_vga_client:
-       vga_client_register(dev->pdev, NULL, NULL, NULL);
+       vga_client_register(pdev, NULL, NULL, NULL);
 out:
        return ret;
 }
@@ -706,7 +670,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
        primary =
                pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 
-       ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+       ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
 
        kfree(ap);
 
@@ -848,6 +812,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        mutex_init(&dev_priv->wm.wm_mutex);
        mutex_init(&dev_priv->pps_mutex);
 
+       i915_memcpy_init_early(dev_priv);
+
        ret = i915_workqueues_init(dev_priv);
        if (ret < 0)
                return ret;
@@ -868,7 +834,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        intel_init_audio_hooks(dev_priv);
        i915_gem_load_init(&dev_priv->drm);
 
-       intel_display_crc_init(&dev_priv->drm);
+       intel_display_crc_init(dev_priv);
 
        intel_device_info_dump(dev_priv);
 
@@ -900,6 +866,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 static int i915_mmio_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int mmio_bar;
        int mmio_size;
 
@@ -916,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
                mmio_size = 512 * 1024;
        else
                mmio_size = 2 * 1024 * 1024;
-       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+       dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
        if (dev_priv->regs == NULL) {
                DRM_ERROR("failed to map registers\n");
 
@@ -932,9 +899,10 @@ static int i915_mmio_setup(struct drm_device *dev)
 static void i915_mmio_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        intel_teardown_mchbar(dev);
-       pci_iounmap(dev->pdev, dev_priv->regs);
+       pci_iounmap(pdev, dev_priv->regs);
 }
 
 /**
@@ -999,6 +967,9 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
        i915.enable_ppgtt =
                intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
        DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+
+       i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
+       DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
 }
 
 /**
@@ -1010,9 +981,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
  */
 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_device *dev = &dev_priv->drm;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       uint32_t aperture_size;
        int ret;
 
        if (i915_inject_load_failure())
@@ -1022,16 +992,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        intel_sanitize_options(dev_priv);
 
-       ret = i915_ggtt_init_hw(dev);
+       ret = i915_ggtt_probe_hw(dev_priv);
        if (ret)
                return ret;
 
-       ret = i915_ggtt_enable_hw(dev);
-       if (ret) {
-               DRM_ERROR("failed to enable GGTT\n");
-               goto out_ggtt;
-       }
-
        /* WARNING: Apparently we must kick fbdev drivers before vgacon,
         * otherwise the vga fbdev driver falls over. */
        ret = i915_kick_out_firmware_fb(dev_priv);
@@ -1046,11 +1010,21 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
                goto out_ggtt;
        }
 
-       pci_set_master(dev->pdev);
+       ret = i915_ggtt_init_hw(dev_priv);
+       if (ret)
+               return ret;
+
+       ret = i915_ggtt_enable_hw(dev_priv);
+       if (ret) {
+               DRM_ERROR("failed to enable GGTT\n");
+               goto out_ggtt;
+       }
+
+       pci_set_master(pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
        if (IS_GEN2(dev)) {
-               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+               ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
                if (ret) {
                        DRM_ERROR("failed to set DMA mask\n");
 
@@ -1058,7 +1032,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
                }
        }
 
-
        /* 965GM sometimes incorrectly writes to hardware status page (HWS)
         * using 32bit addressing, overwriting memory if HWS is located
         * above 4GB.
@@ -1068,7 +1041,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
         * which also needs to be handled carefully.
         */
        if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
-               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+               ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
                if (ret) {
                        DRM_ERROR("failed to set DMA mask\n");
@@ -1077,19 +1050,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
                }
        }
 
-       aperture_size = ggtt->mappable_end;
-
-       ggtt->mappable =
-               io_mapping_create_wc(ggtt->mappable_base,
-                                    aperture_size);
-       if (!ggtt->mappable) {
-               ret = -EIO;
-               goto out_ggtt;
-       }
-
-       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
-                                             aperture_size);
-
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
                           PM_QOS_DEFAULT_VALUE);
 
@@ -1111,14 +1071,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
         * stuck interrupts on some machines.
         */
        if (!IS_I945G(dev) && !IS_I945GM(dev)) {
-               if (pci_enable_msi(dev->pdev) < 0)
+               if (pci_enable_msi(pdev) < 0)
                        DRM_DEBUG_DRIVER("can't enable MSI");
        }
 
        return 0;
 
 out_ggtt:
-       i915_ggtt_cleanup_hw(dev);
+       i915_ggtt_cleanup_hw(dev_priv);
 
        return ret;
 }
@@ -1129,16 +1089,13 @@ out_ggtt:
  */
 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
-       if (dev->pdev->msi_enabled)
-               pci_disable_msi(dev->pdev);
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
 
        pm_qos_remove_request(&dev_priv->pm_qos);
-       arch_phys_wc_del(ggtt->mtrr);
-       io_mapping_free(ggtt->mappable);
-       i915_ggtt_cleanup_hw(dev);
+       i915_ggtt_cleanup_hw(dev_priv);
 }
 
 /**
@@ -1164,7 +1121,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
        /* Reveal our presence to userspace */
        if (drm_dev_register(dev, 0) == 0) {
                i915_debugfs_register(dev_priv);
-               i915_setup_sysfs(dev);
+               i915_setup_sysfs(dev_priv);
        } else
                DRM_ERROR("Failed to register driver for userspace access!\n");
 
@@ -1201,7 +1158,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
        acpi_video_unregister();
        intel_opregion_unregister(dev_priv);
 
-       i915_teardown_sysfs(&dev_priv->drm);
+       i915_teardown_sysfs(dev_priv);
        i915_debugfs_unregister(dev_priv);
        drm_dev_unregister(&dev_priv->drm);
 
@@ -1281,6 +1238,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        intel_runtime_pm_enable(dev_priv);
 
+       /* Everything is in place, we can now relax! */
+       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+                driver.name, driver.major, driver.minor, driver.patchlevel,
+                driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+
        intel_runtime_pm_put(dev_priv);
 
        return 0;
@@ -1305,6 +1267,7 @@ out_free_priv:
 void i915_driver_unload(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        intel_fbdev_fini(dev);
 
@@ -1333,8 +1296,8 @@ void i915_driver_unload(struct drm_device *dev)
        kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
        dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
-       vga_switcheroo_unregister_client(dev->pdev);
-       vga_client_register(dev->pdev, NULL, NULL, NULL);
+       vga_switcheroo_unregister_client(pdev);
+       vga_client_register(pdev, NULL, NULL, NULL);
 
        intel_csr_ucode_fini(dev_priv);
 
@@ -1343,7 +1306,7 @@ void i915_driver_unload(struct drm_device *dev)
        i915_destroy_error_state(dev);
 
        /* Flush any outstanding unpin_work. */
-       flush_workqueue(dev_priv->wq);
+       drain_workqueue(dev_priv->wq);
 
        intel_guc_fini(dev);
        i915_gem_fini(dev);
@@ -1431,6 +1394,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 static int i915_drm_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        pci_power_t opregion_target_state;
        int error;
 
@@ -1447,19 +1411,17 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        drm_kms_helper_poll_disable(dev);
 
-       pci_save_state(dev->pdev);
+       pci_save_state(pdev);
 
        error = i915_gem_suspend(dev);
        if (error) {
-               dev_err(&dev->pdev->dev,
+               dev_err(&pdev->dev,
                        "GEM idle failed, resume might fail\n");
                goto out;
        }
 
        intel_guc_suspend(dev);
 
-       intel_suspend_gt_powersave(dev_priv);
-
        intel_display_suspend(dev);
 
        intel_dp_mst_suspend(dev);
@@ -1495,9 +1457,10 @@ out:
        return error;
 }
 
-static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 {
-       struct drm_i915_private *dev_priv = to_i915(drm_dev);
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        bool fw_csr;
        int ret;
 
@@ -1531,7 +1494,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
                goto out;
        }
 
-       pci_disable_device(drm_dev->pdev);
+       pci_disable_device(pdev);
        /*
         * During hibernation on some platforms the BIOS may try to access
         * the device even though it's already in D3 and hang the machine. So
@@ -1545,7 +1508,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
         * Acer Aspire 1830T
         */
        if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
-               pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+               pci_set_power_state(pdev, PCI_D3hot);
 
        dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
 
@@ -1585,18 +1548,18 @@ static int i915_drm_resume(struct drm_device *dev)
        int ret;
 
        disable_rpm_wakeref_asserts(dev_priv);
+       intel_sanitize_gt_powersave(dev_priv);
 
-       ret = i915_ggtt_enable_hw(dev);
+       ret = i915_ggtt_enable_hw(dev_priv);
        if (ret)
                DRM_ERROR("failed to re-enable GGTT\n");
 
        intel_csr_ucode_resume(dev_priv);
 
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_restore_gtt_mappings(dev);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_resume(dev);
 
        i915_restore_state(dev);
+       intel_pps_unlock_regs_wa(dev_priv);
        intel_opregion_setup(dev_priv);
 
        intel_init_pch_refclk(dev);
@@ -1615,7 +1578,7 @@ static int i915_drm_resume(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
        if (i915_gem_init_hw(dev)) {
                DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-               atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+               i915_gem_set_wedged(dev_priv);
        }
        mutex_unlock(&dev->struct_mutex);
 
@@ -1652,6 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
        intel_opregion_notify_adapter(dev_priv, PCI_D0);
 
+       intel_autoenable_gt_powersave(dev_priv);
        drm_kms_helper_poll_enable(dev);
 
        enable_rpm_wakeref_asserts(dev_priv);
@@ -1662,6 +1626,7 @@ static int i915_drm_resume(struct drm_device *dev)
 static int i915_drm_resume_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
 
        /*
@@ -1684,7 +1649,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * the device powered we can also remove the following set power state
         * call.
         */
-       ret = pci_set_power_state(dev->pdev, PCI_D0);
+       ret = pci_set_power_state(pdev, PCI_D0);
        if (ret) {
                DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
                goto out;
@@ -1703,12 +1668,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * depend on the device enable refcount we can't anyway depend on them
         * disabling/enabling the device.
         */
-       if (pci_enable_device(dev->pdev)) {
+       if (pci_enable_device(pdev)) {
                ret = -EIO;
                goto out;
        }
 
-       pci_set_master(dev->pdev);
+       pci_set_master(pdev);
 
        disable_rpm_wakeref_asserts(dev_priv);
 
@@ -1760,8 +1725,10 @@ int i915_resume_switcheroo(struct drm_device *dev)
  * i915_reset - reset chip after a hang
  * @dev: drm device to reset
  *
- * Reset the chip.  Useful if a hang is detected. Returns zero on successful
- * reset or otherwise an error code.
+ * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Caller must hold the struct_mutex.
  *
  * Procedure is fairly simple:
  *   - reset the chip using the reset reg
@@ -1771,31 +1738,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init display
  */
-int i915_reset(struct drm_i915_private *dev_priv)
+void i915_reset(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = &dev_priv->drm;
        struct i915_gpu_error *error = &dev_priv->gpu_error;
-       unsigned reset_counter;
        int ret;
 
-       intel_reset_gt_powersave(dev_priv);
+       lockdep_assert_held(&dev->struct_mutex);
 
-       mutex_lock(&dev->struct_mutex);
+       if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
+               return;
 
        /* Clear any previous failed attempts at recovery. Time to try again. */
-       atomic_andnot(I915_WEDGED, &error->reset_counter);
-
-       /* Clear the reset-in-progress flag and increment the reset epoch. */
-       reset_counter = atomic_inc_return(&error->reset_counter);
-       if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
-               ret = -EIO;
-               goto error;
-       }
+       __clear_bit(I915_WEDGED, &error->flags);
+       error->reset_count++;
 
        pr_notice("drm/i915: Resetting chip after gpu hang\n");
-
-       i915_gem_reset(dev);
-
        ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
        if (ret) {
                if (ret != -ENODEV)
@@ -1805,6 +1763,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
                goto error;
        }
 
+       i915_gem_reset(dev_priv);
        intel_overlay_reset(dev_priv);
 
        /* Ok, now get things going again... */
@@ -1827,44 +1786,43 @@ int i915_reset(struct drm_i915_private *dev_priv)
                goto error;
        }
 
-       mutex_unlock(&dev->struct_mutex);
-
        /*
         * rps/rc6 re-init is necessary to restore state lost after the
         * reset and the re-install of gt irqs. Skip for ironlake per
         * previous concerns that it doesn't respond well to some forms
         * of re-init after reset.
         */
-       if (INTEL_INFO(dev)->gen > 5)
-               intel_enable_gt_powersave(dev_priv);
+       intel_sanitize_gt_powersave(dev_priv);
+       intel_autoenable_gt_powersave(dev_priv);
 
-       return 0;
+wakeup:
+       wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
+       return;
 
 error:
-       atomic_or(I915_WEDGED, &error->reset_counter);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       i915_gem_set_wedged(dev_priv);
+       goto wakeup;
 }
 
-static int i915_pm_suspend(struct device *dev)
+static int i915_pm_suspend(struct device *kdev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
 
-       if (!drm_dev) {
-               dev_err(dev, "DRM not initialized, aborting suspend.\n");
+       if (!dev) {
+               dev_err(kdev, "DRM not initialized, aborting suspend.\n");
                return -ENODEV;
        }
 
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_suspend(drm_dev);
+       return i915_drm_suspend(dev);
 }
 
-static int i915_pm_suspend_late(struct device *dev)
+static int i915_pm_suspend_late(struct device *kdev)
 {
-       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
        /*
         * We have a suspend ordering issue with the snd-hda driver also
@@ -1875,57 +1833,57 @@ static int i915_pm_suspend_late(struct device *dev)
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_suspend_late(drm_dev, false);
+       return i915_drm_suspend_late(dev, false);
 }
 
-static int i915_pm_poweroff_late(struct device *dev)
+static int i915_pm_poweroff_late(struct device *kdev)
 {
-       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_suspend_late(drm_dev, true);
+       return i915_drm_suspend_late(dev, true);
 }
 
-static int i915_pm_resume_early(struct device *dev)
+static int i915_pm_resume_early(struct device *kdev)
 {
-       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_resume_early(drm_dev);
+       return i915_drm_resume_early(dev);
 }
 
-static int i915_pm_resume(struct device *dev)
+static int i915_pm_resume(struct device *kdev)
 {
-       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_resume(drm_dev);
+       return i915_drm_resume(dev);
 }
 
 /* freeze: before creating the hibernation_image */
-static int i915_pm_freeze(struct device *dev)
+static int i915_pm_freeze(struct device *kdev)
 {
-       return i915_pm_suspend(dev);
+       return i915_pm_suspend(kdev);
 }
 
-static int i915_pm_freeze_late(struct device *dev)
+static int i915_pm_freeze_late(struct device *kdev)
 {
        int ret;
 
-       ret = i915_pm_suspend_late(dev);
+       ret = i915_pm_suspend_late(kdev);
        if (ret)
                return ret;
 
-       ret = i915_gem_freeze_late(dev_to_i915(dev));
+       ret = i915_gem_freeze_late(kdev_to_i915(kdev));
        if (ret)
                return ret;
 
@@ -1933,25 +1891,25 @@ static int i915_pm_freeze_late(struct device *dev)
 }
 
 /* thaw: called after creating the hibernation image, but before turning off. */
-static int i915_pm_thaw_early(struct device *dev)
+static int i915_pm_thaw_early(struct device *kdev)
 {
-       return i915_pm_resume_early(dev);
+       return i915_pm_resume_early(kdev);
 }
 
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_thaw(struct device *kdev)
 {
-       return i915_pm_resume(dev);
+       return i915_pm_resume(kdev);
 }
 
 /* restore: called after loading the hibernation image. */
-static int i915_pm_restore_early(struct device *dev)
+static int i915_pm_restore_early(struct device *kdev)
 {
-       return i915_pm_resume_early(dev);
+       return i915_pm_resume_early(kdev);
 }
 
-static int i915_pm_restore(struct device *dev)
+static int i915_pm_restore(struct device *kdev)
 {
-       return i915_pm_resume(dev);
+       return i915_pm_resume(kdev);
 }
 
 /*
@@ -2313,9 +2271,9 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-static int intel_runtime_suspend(struct device *device)
+static int intel_runtime_suspend(struct device *kdev)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
+       struct pci_dev *pdev = to_pci_dev(kdev);
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
@@ -2341,7 +2299,7 @@ static int intel_runtime_suspend(struct device *device)
                 * Bump the expiration timestamp, otherwise the suspend won't
                 * be rescheduled.
                 */
-               pm_runtime_mark_last_busy(device);
+               pm_runtime_mark_last_busy(kdev);
 
                return -EAGAIN;
        }
@@ -2420,9 +2378,9 @@ static int intel_runtime_suspend(struct device *device)
        return 0;
 }
 
-static int intel_runtime_resume(struct device *device)
+static int intel_runtime_resume(struct device *kdev)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
+       struct pci_dev *pdev = to_pci_dev(kdev);
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
@@ -2462,7 +2420,6 @@ static int intel_runtime_resume(struct device *device)
         * we can do is to hope that things will still work (and disable RPM).
         */
        i915_gem_init_swizzling(dev);
-       gen6_update_ring_freq(dev_priv);
 
        intel_runtime_pm_enable_interrupts(dev_priv);
 
@@ -2618,6 +2575,7 @@ static struct drm_driver driver = {
        .postclose = i915_driver_postclose,
        .set_busid = drm_pci_set_busid,
 
+       .gem_close_object = i915_gem_close_object,
        .gem_free_object = i915_gem_free_object,
        .gem_vm_ops = &i915_gem_vm_ops,
 
index 21f939074abc88d28bef842b8261da298db1192f..4dd307ed43368660b49be77fa8ec4ae597bc3914 100644 (file)
@@ -61,6 +61,7 @@
 #include "i915_gem.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_render_state.h"
+#include "i915_gem_request.h"
 
 #include "intel_gvt.h"
 
@@ -69,7 +70,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20160711"
+#define DRIVER_DATE            "20160919"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -401,7 +402,7 @@ struct drm_i915_file_private {
                unsigned boosts;
        } rps;
 
-       unsigned int bsd_ring;
+       unsigned int bsd_engine;
 };
 
 /* Used by dp and fdi links */
@@ -431,8 +432,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
 #define DRIVER_MINOR           6
 #define DRIVER_PATCHLEVEL      0
 
-#define WATCH_LISTS    0
-
 struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
@@ -456,15 +455,21 @@ struct intel_opregion {
 struct intel_overlay;
 struct intel_overlay_error_state;
 
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 32
-/* 32 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 6
-
 struct drm_i915_fence_reg {
-       struct list_head lru_list;
-       struct drm_i915_gem_object *obj;
+       struct list_head link;
+       struct drm_i915_private *i915;
+       struct i915_vma *vma;
        int pin_count;
+       int id;
+       /**
+        * Whether the tiling parameters for the currently
+        * associated fence register have changed. Note that
+        * for the purposes of tracking tiling changes we also
+        * treat the unfenced register, the register slot that
+        * the object occupies whilst it executes a fenced
+        * command (such as BLT on gen2/3), as a "fence".
+        */
+       bool dirty;
 };
 
 struct sdvo_device_mapping {
@@ -476,130 +481,6 @@ struct sdvo_device_mapping {
        u8 ddc_pin;
 };
 
-struct intel_display_error_state;
-
-struct drm_i915_error_state {
-       struct kref ref;
-       struct timeval time;
-
-       char error_msg[128];
-       bool simulated;
-       int iommu;
-       u32 reset_count;
-       u32 suspend_count;
-
-       /* Generic register state */
-       u32 eir;
-       u32 pgtbl_er;
-       u32 ier;
-       u32 gtier[4];
-       u32 ccid;
-       u32 derrmr;
-       u32 forcewake;
-       u32 error; /* gen6+ */
-       u32 err_int; /* gen7 */
-       u32 fault_data0; /* gen8, gen9 */
-       u32 fault_data1; /* gen8, gen9 */
-       u32 done_reg;
-       u32 gac_eco;
-       u32 gam_ecochk;
-       u32 gab_ctl;
-       u32 gfx_mode;
-       u32 extra_instdone[I915_NUM_INSTDONE_REG];
-       u64 fence[I915_MAX_NUM_FENCES];
-       struct intel_overlay_error_state *overlay;
-       struct intel_display_error_state *display;
-       struct drm_i915_error_object *semaphore_obj;
-
-       struct drm_i915_error_ring {
-               bool valid;
-               /* Software tracked state */
-               bool waiting;
-               int num_waiters;
-               int hangcheck_score;
-               enum intel_ring_hangcheck_action hangcheck_action;
-               int num_requests;
-
-               /* our own tracking of ring head and tail */
-               u32 cpu_ring_head;
-               u32 cpu_ring_tail;
-
-               u32 last_seqno;
-               u32 semaphore_seqno[I915_NUM_ENGINES - 1];
-
-               /* Register state */
-               u32 start;
-               u32 tail;
-               u32 head;
-               u32 ctl;
-               u32 hws;
-               u32 ipeir;
-               u32 ipehr;
-               u32 instdone;
-               u32 bbstate;
-               u32 instpm;
-               u32 instps;
-               u32 seqno;
-               u64 bbaddr;
-               u64 acthd;
-               u32 fault_reg;
-               u64 faddr;
-               u32 rc_psmi; /* sleep state */
-               u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
-
-               struct drm_i915_error_object {
-                       int page_count;
-                       u64 gtt_offset;
-                       u32 *pages[0];
-               } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
-               struct drm_i915_error_object *wa_ctx;
-
-               struct drm_i915_error_request {
-                       long jiffies;
-                       u32 seqno;
-                       u32 tail;
-               } *requests;
-
-               struct drm_i915_error_waiter {
-                       char comm[TASK_COMM_LEN];
-                       pid_t pid;
-                       u32 seqno;
-               } *waiters;
-
-               struct {
-                       u32 gfx_mode;
-                       union {
-                               u64 pdp[4];
-                               u32 pp_dir_base;
-                       };
-               } vm_info;
-
-               pid_t pid;
-               char comm[TASK_COMM_LEN];
-       } ring[I915_NUM_ENGINES];
-
-       struct drm_i915_error_buffer {
-               u32 size;
-               u32 name;
-               u32 rseqno[I915_NUM_ENGINES], wseqno;
-               u64 gtt_offset;
-               u32 read_domains;
-               u32 write_domain;
-               s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
-               s32 pinned:2;
-               u32 tiling:2;
-               u32 dirty:1;
-               u32 purgeable:1;
-               u32 userptr:1;
-               s32 ring:4;
-               u32 cache_level:3;
-       } **active_bo, **pinned_bo;
-
-       u32 *active_bo_count, *pinned_bo_count;
-       u32 vm_count;
-};
-
 struct intel_connector;
 struct intel_encoder;
 struct intel_crtc_state;
@@ -629,8 +510,12 @@ struct drm_i915_display_funcs {
                                         struct intel_initial_plane_config *);
        int (*crtc_compute_clock)(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state);
-       void (*crtc_enable)(struct drm_crtc *crtc);
-       void (*crtc_disable)(struct drm_crtc *crtc);
+       void (*crtc_enable)(struct intel_crtc_state *pipe_config,
+                           struct drm_atomic_state *old_state);
+       void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
+                            struct drm_atomic_state *old_state);
+       void (*update_crtcs)(struct drm_atomic_state *state,
+                            unsigned int *crtc_vblank_mask);
        void (*audio_codec_enable)(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode);
@@ -694,8 +579,6 @@ struct intel_uncore_funcs {
                                uint16_t val, bool trace);
        void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
                                uint32_t val, bool trace);
-       void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
-                               uint64_t val, bool trace);
 };
 
 struct intel_uncore {
@@ -756,7 +639,7 @@ struct intel_csr {
        func(is_i915g) sep \
        func(is_i945gm) sep \
        func(is_g33) sep \
-       func(need_gfx_hws) sep \
+       func(hws_needs_physical) sep \
        func(is_g4x) sep \
        func(is_pineview) sep \
        func(is_broadwater) sep \
@@ -771,6 +654,19 @@ struct intel_csr {
        func(is_kabylake) sep \
        func(is_preliminary) sep \
        func(has_fbc) sep \
+       func(has_psr) sep \
+       func(has_runtime_pm) sep \
+       func(has_csr) sep \
+       func(has_resource_streamer) sep \
+       func(has_rc6) sep \
+       func(has_rc6p) sep \
+       func(has_dp_mst) sep \
+       func(has_gmbus_irq) sep \
+       func(has_hw_contexts) sep \
+       func(has_logical_ring_contexts) sep \
+       func(has_l3_dpf) sep \
+       func(has_gmch_display) sep \
+       func(has_guc) sep \
        func(has_pipe_cxsr) sep \
        func(has_hotplug) sep \
        func(cursor_needs_physical) sep \
@@ -786,6 +682,24 @@ struct intel_csr {
 #define DEFINE_FLAG(name) u8 name:1
 #define SEP_SEMICOLON ;
 
+struct sseu_dev_info {
+       u8 slice_mask;
+       u8 subslice_mask;
+       u8 eu_total;
+       u8 eu_per_subslice;
+       u8 min_eu_in_pool;
+       /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+       u8 subslice_7eu[3];
+       u8 has_slice_pg:1;
+       u8 has_subslice_pg:1;
+       u8 has_eu_pg:1;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+       return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
 struct intel_device_info {
        u32 display_mmio_offset;
        u16 device_id;
@@ -794,7 +708,9 @@ struct intel_device_info {
        u8 gen;
        u16 gen_mask;
        u8 ring_mask; /* Rings supported by the HW */
+       u8 num_rings;
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+       u16 ddb_size; /* in blocks */
        /* Register offsets for the various display pipes and transcoders */
        int pipe_offsets[I915_MAX_TRANSCODERS];
        int trans_offsets[I915_MAX_TRANSCODERS];
@@ -802,17 +718,7 @@ struct intel_device_info {
        int cursor_offsets[I915_MAX_PIPES];
 
        /* Slice/subslice/EU info */
-       u8 slice_total;
-       u8 subslice_total;
-       u8 subslice_per_slice;
-       u8 eu_total;
-       u8 eu_per_subslice;
-       u8 min_eu_in_pool;
-       /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
-       u8 subslice_7eu[3];
-       u8 has_slice_pg:1;
-       u8 has_subslice_pg:1;
-       u8 has_eu_pg:1;
+       struct sseu_dev_info sseu;
 
        struct color_luts {
                u16 degamma_lut_size;
@@ -823,6 +729,134 @@ struct intel_device_info {
 #undef DEFINE_FLAG
 #undef SEP_SEMICOLON
 
+struct intel_display_error_state;
+
+struct drm_i915_error_state {
+       struct kref ref;
+       struct timeval time;
+
+       char error_msg[128];
+       bool simulated;
+       int iommu;
+       u32 reset_count;
+       u32 suspend_count;
+       struct intel_device_info device_info;
+
+       /* Generic register state */
+       u32 eir;
+       u32 pgtbl_er;
+       u32 ier;
+       u32 gtier[4];
+       u32 ccid;
+       u32 derrmr;
+       u32 forcewake;
+       u32 error; /* gen6+ */
+       u32 err_int; /* gen7 */
+       u32 fault_data0; /* gen8, gen9 */
+       u32 fault_data1; /* gen8, gen9 */
+       u32 done_reg;
+       u32 gac_eco;
+       u32 gam_ecochk;
+       u32 gab_ctl;
+       u32 gfx_mode;
+       u32 extra_instdone[I915_NUM_INSTDONE_REG];
+       u64 fence[I915_MAX_NUM_FENCES];
+       struct intel_overlay_error_state *overlay;
+       struct intel_display_error_state *display;
+       struct drm_i915_error_object *semaphore;
+
+       struct drm_i915_error_engine {
+               int engine_id;
+               /* Software tracked state */
+               bool waiting;
+               int num_waiters;
+               int hangcheck_score;
+               enum intel_engine_hangcheck_action hangcheck_action;
+               struct i915_address_space *vm;
+               int num_requests;
+
+               /* our own tracking of ring head and tail */
+               u32 cpu_ring_head;
+               u32 cpu_ring_tail;
+
+               u32 last_seqno;
+               u32 semaphore_seqno[I915_NUM_ENGINES - 1];
+
+               /* Register state */
+               u32 start;
+               u32 tail;
+               u32 head;
+               u32 ctl;
+               u32 mode;
+               u32 hws;
+               u32 ipeir;
+               u32 ipehr;
+               u32 instdone;
+               u32 bbstate;
+               u32 instpm;
+               u32 instps;
+               u32 seqno;
+               u64 bbaddr;
+               u64 acthd;
+               u32 fault_reg;
+               u64 faddr;
+               u32 rc_psmi; /* sleep state */
+               u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+
+               struct drm_i915_error_object {
+                       int page_count;
+                       u64 gtt_offset;
+                       u64 gtt_size;
+                       u32 *pages[0];
+               } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
+               struct drm_i915_error_object *wa_ctx;
+
+               struct drm_i915_error_request {
+                       long jiffies;
+                       pid_t pid;
+                       u32 seqno;
+                       u32 head;
+                       u32 tail;
+               } *requests;
+
+               struct drm_i915_error_waiter {
+                       char comm[TASK_COMM_LEN];
+                       pid_t pid;
+                       u32 seqno;
+               } *waiters;
+
+               struct {
+                       u32 gfx_mode;
+                       union {
+                               u64 pdp[4];
+                               u32 pp_dir_base;
+                       };
+               } vm_info;
+
+               pid_t pid;
+               char comm[TASK_COMM_LEN];
+       } engine[I915_NUM_ENGINES];
+
+       struct drm_i915_error_buffer {
+               u32 size;
+               u32 name;
+               u32 rseqno[I915_NUM_ENGINES], wseqno;
+               u64 gtt_offset;
+               u32 read_domains;
+               u32 write_domain;
+               s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+               u32 tiling:2;
+               u32 dirty:1;
+               u32 purgeable:1;
+               u32 userptr:1;
+               s32 engine:4;
+               u32 cache_level:3;
+       } *active_bo[I915_NUM_ENGINES], *pinned_bo;
+       u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
+       struct i915_address_space *active_vm[I915_NUM_ENGINES];
+};
+
 enum i915_cache_level {
        I915_CACHE_NONE = 0,
        I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
@@ -879,22 +913,23 @@ struct i915_gem_context {
        struct drm_i915_private *i915;
        struct drm_i915_file_private *file_priv;
        struct i915_hw_ppgtt *ppgtt;
+       struct pid *pid;
 
        struct i915_ctx_hang_stats hang_stats;
 
-       /* Unique identifier for this context, used by the hw for tracking */
        unsigned long flags;
 #define CONTEXT_NO_ZEROMAP             BIT(0)
 #define CONTEXT_NO_ERROR_CAPTURE       BIT(1)
-       unsigned hw_id;
+
+       /* Unique identifier for this context, used by the hw for tracking */
+       unsigned int hw_id;
        u32 user_handle;
 
        u32 ggtt_alignment;
 
        struct intel_context {
-               struct drm_i915_gem_object *state;
-               struct intel_ringbuffer *ringbuf;
-               struct i915_vma *lrc_vma;
+               struct i915_vma *state;
+               struct intel_ring *ring;
                uint32_t *lrc_reg_state;
                u64 lrc_desc;
                int pin_count;
@@ -908,6 +943,7 @@ struct i915_gem_context {
        struct list_head link;
 
        u8 remap_slice;
+       bool closed:1;
 };
 
 enum fb_op_origin {
@@ -1061,13 +1097,6 @@ struct intel_gmbus {
 
 struct i915_suspend_saved_registers {
        u32 saveDSPARB;
-       u32 saveLVDS;
-       u32 savePP_ON_DELAYS;
-       u32 savePP_OFF_DELAYS;
-       u32 savePP_ON;
-       u32 savePP_OFF;
-       u32 savePP_CONTROL;
-       u32 savePP_DIVISOR;
        u32 saveFBC_CONTROL;
        u32 saveCACHE_MODE_0;
        u32 saveMI_ARB_STATE;
@@ -1156,6 +1185,7 @@ struct intel_gen6_power_mgmt {
        bool interrupts_enabled;
        u32 pm_iir;
 
+       /* PM interrupt bits that should never be masked */
        u32 pm_intr_keep;
 
        /* Frequencies are stored in potentially platform dependent multiples.
@@ -1173,6 +1203,7 @@ struct intel_gen6_power_mgmt {
        u8 max_freq_softlimit;  /* Max frequency permitted by the driver */
        u8 max_freq;            /* Maximum frequency, RP0 if not overclocking */
        u8 min_freq;            /* AKA RPn. Minimum frequency */
+       u8 boost_freq;          /* Frequency to request when wait boosting */
        u8 idle_freq;           /* Frequency to request when we are idle */
        u8 efficient_freq;      /* AKA RPe. Pre-determined balanced frequency */
        u8 rp1_freq;            /* "less than" RP0 power/freqency */
@@ -1190,11 +1221,9 @@ struct intel_gen6_power_mgmt {
        bool client_boost;
 
        bool enabled;
-       struct delayed_work delayed_resume_work;
+       struct delayed_work autoenable_work;
        unsigned boosts;
 
-       struct intel_rps_client semaphores, mmioflips;
-
        /* manual wa residency calculations */
        struct intel_rps_ei up_ei, down_ei;
 
@@ -1319,7 +1348,6 @@ struct i915_gem_mm {
        struct notifier_block oom_notifier;
        struct notifier_block vmap_notifier;
        struct shrinker shrinker;
-       bool shrinker_no_lock_stealing;
 
        /** LRU list of objects with fence regs on them. */
        struct list_head fence_list;
@@ -1331,7 +1359,7 @@ struct i915_gem_mm {
        bool interruptible;
 
        /* the indicator for dispatch video commands on two BSD rings */
-       unsigned int bsd_ring_dispatch_index;
+       atomic_t bsd_engine_dispatch_index;
 
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
@@ -1379,9 +1407,10 @@ struct i915_gpu_error {
         * State variable controlling the reset flow and count
         *
         * This is a counter which gets incremented when reset is triggered,
-        * and again when reset has been handled. So odd values (lowest bit set)
-        * means that reset is in progress and even values that
-        * (reset_counter >> 1):th reset was successfully completed.
+        *
+        * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
+        * meaning that any waiters holding onto the struct_mutex should
+        * relinquish the lock immediately in order for the reset to start.
         *
         * If reset is not completed succesfully, the I915_WEDGE bit is
         * set meaning that hardware is terminally sour and there is no
@@ -1396,10 +1425,11 @@ struct i915_gpu_error {
         * naturally enforces the correct ordering between the bail-out of the
         * waiter and the gpu reset work code.
         */
-       atomic_t reset_counter;
+       unsigned long reset_count;
 
-#define I915_RESET_IN_PROGRESS_FLAG    1
-#define I915_WEDGED                    (1 << 31)
+       unsigned long flags;
+#define I915_RESET_IN_PROGRESS 0
+#define I915_WEDGED            (BITS_PER_LONG - 1)
 
        /**
         * Waitqueue to signal when a hang is detected. Used to for waiters
@@ -1670,7 +1700,7 @@ struct intel_pipe_crc {
 };
 
 struct i915_frontbuffer_tracking {
-       struct mutex lock;
+       spinlock_t lock;
 
        /*
         * Tracking bits for delayed frontbuffer flushing du to gpu activity or
@@ -1705,18 +1735,6 @@ struct i915_virtual_gpu {
        bool active;
 };
 
-struct i915_execbuffer_params {
-       struct drm_device               *dev;
-       struct drm_file                 *file;
-       uint32_t                        dispatch_flags;
-       uint32_t                        args_batch_start_offset;
-       uint64_t                        batch_obj_vm_offset;
-       struct intel_engine_cs *engine;
-       struct drm_i915_gem_object      *batch_obj;
-       struct i915_gem_context            *ctx;
-       struct drm_i915_gem_request     *request;
-};
-
 /* used in computing the new watermarks state */
 struct intel_wm_config {
        unsigned int num_pipes_active;
@@ -1763,13 +1781,15 @@ struct drm_i915_private {
 
        uint32_t psr_mmio_base;
 
+       uint32_t pps_mmio_base;
+
        wait_queue_head_t gmbus_wait_queue;
 
        struct pci_dev *bridge_dev;
        struct i915_gem_context *kernel_context;
        struct intel_engine_cs engine[I915_NUM_ENGINES];
-       struct drm_i915_gem_object *semaphore_obj;
-       uint32_t last_seqno, next_seqno;
+       struct i915_vma *semaphore;
+       u32 next_seqno;
 
        struct drm_dma_handle *status_page_dmah;
        struct resource mch_res;
@@ -1854,6 +1874,7 @@ struct drm_i915_private {
        enum modeset_restore modeset_restore;
        struct mutex modeset_restore_lock;
        struct drm_atomic_state *modeset_restore_state;
+       struct drm_modeset_acquire_ctx reset_ctx;
 
        struct list_head vm_list; /* Global list of all address spaces */
        struct i915_ggtt ggtt; /* VM representing the global address space */
@@ -1962,6 +1983,13 @@ struct drm_i915_private {
        struct i915_suspend_saved_registers regfile;
        struct vlv_s0ix_state vlv_s0ix_state;
 
+       enum {
+               I915_SKL_SAGV_UNKNOWN = 0,
+               I915_SKL_SAGV_DISABLED,
+               I915_SKL_SAGV_ENABLED,
+               I915_SKL_SAGV_NOT_CONTROLLED
+       } skl_sagv_status;
+
        struct {
                /*
                 * Raw watermark latency values:
@@ -2016,12 +2044,8 @@ struct drm_i915_private {
 
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
        struct {
-               int (*execbuf_submit)(struct i915_execbuffer_params *params,
-                                     struct drm_i915_gem_execbuffer2 *args,
-                                     struct list_head *vmas);
-               int (*init_engines)(struct drm_device *dev);
+               void (*resume)(struct drm_i915_private *);
                void (*cleanup_engine)(struct intel_engine_cs *engine);
-               void (*stop_engine)(struct intel_engine_cs *engine);
 
                /**
                 * Is the GPU currently considered idle, or busy executing
@@ -2068,9 +2092,9 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
        return container_of(dev, struct drm_i915_private, drm);
 }
 
-static inline struct drm_i915_private *dev_to_i915(struct device *dev)
+static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
 {
-       return to_i915(dev_get_drvdata(dev));
+       return to_i915(dev_get_drvdata(kdev));
 }
 
 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
@@ -2093,13 +2117,16 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
                for_each_if (((id__) = (engine__)->id, \
                              intel_engine_initialized(engine__)))
 
+#define __mask_next_bit(mask) ({                                       \
+       int __idx = ffs(mask) - 1;                                      \
+       mask &= ~BIT(__idx);                                            \
+       __idx;                                                          \
+})
+
 /* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__) \
-       for ((engine__) = &(dev_priv__)->engine[0]; \
-            (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
-            (engine__)++) \
-               for_each_if (((mask__) & intel_engine_flag(engine__)) && \
-                            intel_engine_initialized(engine__))
+#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
+       for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;        \
+            tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
 
 enum hdmi_force_audio {
        HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
@@ -2144,8 +2171,6 @@ struct drm_i915_gem_object_ops {
  */
 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER_BITS \
-       (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
        (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
 #define INTEL_FRONTBUFFER_CURSOR(pipe) \
@@ -2169,18 +2194,21 @@ struct drm_i915_gem_object {
        struct drm_mm_node *stolen;
        struct list_head global_list;
 
-       struct list_head engine_list[I915_NUM_ENGINES];
        /** Used in execbuf to temporarily hold a ref */
        struct list_head obj_exec_link;
 
        struct list_head batch_pool_link;
 
+       unsigned long flags;
        /**
         * This is set if the object is on the active lists (has pending
         * rendering and so a non-zero seqno), and is not set if it i s on
         * inactive (ready to be unbound) list.
         */
-       unsigned int active:I915_NUM_ENGINES;
+#define I915_BO_ACTIVE_SHIFT 0
+#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
+#define __I915_BO_ACTIVE(bo) \
+       ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
 
        /**
         * This is set if the object has been written to since last bound
@@ -2188,37 +2216,11 @@ struct drm_i915_gem_object {
         */
        unsigned int dirty:1;
 
-       /**
-        * Fence register bits (if any) for this object.  Will be set
-        * as needed when mapped into the GTT.
-        * Protected by dev->struct_mutex.
-        */
-       signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
-
        /**
         * Advice: are the backing pages purgeable?
         */
        unsigned int madv:2;
 
-       /**
-        * Current tiling mode for the object.
-        */
-       unsigned int tiling_mode:2;
-       /**
-        * Whether the tiling parameters for the currently associated fence
-        * register have changed. Note that for the purposes of tracking
-        * tiling changes we also treat the unfenced register, the register
-        * slot that the object occupies whilst it executes a fenced
-        * command (such as BLT on gen2/3), as a "fence".
-        */
-       unsigned int fence_dirty:1;
-
-       /**
-        * Is the object at the current location in the gtt mappable and
-        * fenceable? Used to avoid costly recalculations.
-        */
-       unsigned int map_and_fenceable:1;
-
        /**
         * Whether the current gtt mapping needs to be mappable (and isn't just
         * mappable by accident). Track pin and fault separate for a more
@@ -2234,9 +2236,17 @@ struct drm_i915_gem_object {
        unsigned int cache_level:3;
        unsigned int cache_dirty:1;
 
-       unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+       atomic_t frontbuffer_bits;
+       unsigned int frontbuffer_ggtt_origin; /* write once */
+
+       /** Current tiling stride for the object, if it's tiled. */
+       unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
+#define STRIDE_MASK (~TILING_MASK)
 
-       unsigned int has_wc_mmap;
+       /** Count of VMA actually bound by this object */
+       unsigned int bind_count;
        unsigned int pin_display;
 
        struct sg_table *pages;
@@ -2256,14 +2266,9 @@ struct drm_i915_gem_object {
         * requests on one ring where the write request is older than the
         * read request. This allows for the CPU to read from an active
         * buffer by only waiting for the write to complete.
-        * */
-       struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
-       struct drm_i915_gem_request *last_write_req;
-       /** Breadcrumb of last fenced GPU access to the buffer. */
-       struct drm_i915_gem_request *last_fenced_req;
-
-       /** Current tiling stride for the object, if it's tiled. */
-       uint32_t stride;
+        */
+       struct i915_gem_active last_read[I915_NUM_ENGINES];
+       struct i915_gem_active last_write;
 
        /** References from framebuffers, locks out tiling changes. */
        unsigned long framebuffer_references;
@@ -2287,7 +2292,56 @@ struct drm_i915_gem_object {
                } userptr;
        };
 };
-#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+       /* Assert that to_intel_bo(NULL) == NULL */
+       BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+       return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+       return to_intel_bo(drm_gem_object_lookup(file, handle));
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+       drm_gem_object_reference(&obj->base);
+       return obj;
+}
+
+__deprecated
+extern void drm_gem_object_reference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+       drm_gem_object_unreference(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
+{
+       drm_gem_object_unreference_unlocked(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
 
 static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
@@ -2295,6 +2349,67 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
        return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
 }
 
+static inline unsigned long
+i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
+{
+       return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+       return i915_gem_object_get_active(obj);
+}
+
+static inline void
+i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
+{
+       obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline void
+i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
+{
+       obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline bool
+i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
+                                 int engine)
+{
+       return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline unsigned int
+i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+{
+       return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+{
+       return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+{
+       return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
+{
+       i915_gem_object_get(vma->obj);
+       return vma;
+}
+
+static inline void i915_vma_put(struct i915_vma *vma)
+{
+       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       i915_gem_object_put(vma->obj);
+}
+
 /*
  * Optimised SGL iterator for GEM objects
  */
@@ -2365,171 +2480,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
             (((__iter).curr += PAGE_SIZE) < (__iter).max) ||           \
             ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
 
-/**
- * Request queue structure.
- *
- * The request queue allows us to note sequence numbers that have been emitted
- * and may be associated with active buffers to be retired.
- *
- * By keeping this list, we can avoid having to do questionable sequence
- * number comparisons on buffer last_read|write_seqno. It also allows an
- * emission time to be associated with the request for tracking how far ahead
- * of the GPU the submission is.
- *
- * The requests are reference counted, so upon creation they should have an
- * initial reference taken using kref_init
- */
-struct drm_i915_gem_request {
-       struct kref ref;
-
-       /** On Which ring this request was generated */
-       struct drm_i915_private *i915;
-       struct intel_engine_cs *engine;
-       struct intel_signal_node signaling;
-
-        /** GEM sequence number associated with the previous request,
-         * when the HWS breadcrumb is equal to this the GPU is processing
-         * this request.
-         */
-       u32 previous_seqno;
-
-        /** GEM sequence number associated with this request,
-         * when the HWS breadcrumb is equal or greater than this the GPU
-         * has finished processing this request.
-         */
-       u32 seqno;
-
-       /** Position in the ringbuffer of the start of the request */
-       u32 head;
-
-       /**
-        * Position in the ringbuffer of the start of the postfix.
-        * This is required to calculate the maximum available ringbuffer
-        * space without overwriting the postfix.
-        */
-        u32 postfix;
-
-       /** Position in the ringbuffer of the end of the whole request */
-       u32 tail;
-
-       /** Preallocate space in the ringbuffer for the emitting the request */
-       u32 reserved_space;
-
-       /**
-        * Context and ring buffer related to this request
-        * Contexts are refcounted, so when this request is associated with a
-        * context, we must increment the context's refcount, to guarantee that
-        * it persists while any request is linked to it. Requests themselves
-        * are also refcounted, so the request will only be freed when the last
-        * reference to it is dismissed, and the code in
-        * i915_gem_request_free() will then decrement the refcount on the
-        * context.
-        */
-       struct i915_gem_context *ctx;
-       struct intel_ringbuffer *ringbuf;
-
-       /**
-        * Context related to the previous request.
-        * As the contexts are accessed by the hardware until the switch is
-        * completed to a new context, the hardware may still be writing
-        * to the context object after the breadcrumb is visible. We must
-        * not unpin/unbind/prune that object whilst still active and so
-        * we keep the previous context pinned until the following (this)
-        * request is retired.
-        */
-       struct i915_gem_context *previous_context;
-
-       /** Batch buffer related to this request if any (used for
-           error state dump only) */
-       struct drm_i915_gem_object *batch_obj;
-
-       /** Time at which this request was emitted, in jiffies. */
-       unsigned long emitted_jiffies;
-
-       /** global list entry for this request */
-       struct list_head list;
-
-       struct drm_i915_file_private *file_priv;
-       /** file_priv list entry for this request */
-       struct list_head client_list;
-
-       /** process identifier submitting this request */
-       struct pid *pid;
-
-       /**
-        * The ELSP only accepts two elements at a time, so we queue
-        * context/tail pairs on a given queue (ring->execlist_queue) until the
-        * hardware is available. The queue serves a double purpose: we also use
-        * it to keep track of the up to 2 contexts currently in the hardware
-        * (usually one in execution and the other queued up by the GPU): We
-        * only remove elements from the head of the queue when the hardware
-        * informs us that an element has been completed.
-        *
-        * All accesses to the queue are mediated by a spinlock
-        * (ring->execlist_lock).
-        */
-
-       /** Execlist link in the submission queue.*/
-       struct list_head execlist_link;
-
-       /** Execlists no. of times this request has been sent to the ELSP */
-       int elsp_submitted;
-
-       /** Execlists context hardware id. */
-       unsigned ctx_hw_id;
-};
-
-struct drm_i915_gem_request * __must_check
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-                      struct i915_gem_context *ctx);
-void i915_gem_request_free(struct kref *req_ref);
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-                                  struct drm_file *file);
-
-static inline uint32_t
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
-       return req ? req->seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
-       return req ? req->engine : NULL;
-}
-
-static inline struct drm_i915_gem_request *
-i915_gem_request_reference(struct drm_i915_gem_request *req)
-{
-       if (req)
-               kref_get(&req->ref);
-       return req;
-}
-
-static inline void
-i915_gem_request_unreference(struct drm_i915_gem_request *req)
-{
-       kref_put(&req->ref, i915_gem_request_free);
-}
-
-static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
-                                          struct drm_i915_gem_request *src)
-{
-       if (src)
-               i915_gem_request_reference(src);
-
-       if (*pdst)
-               i915_gem_request_unreference(*pdst);
-
-       *pdst = src;
-}
-
-/*
- * XXX: i915_gem_request_completed should be here but currently needs the
- * definition of i915_seqno_passed() which is below. It will be moved in
- * a later patch when the call to i915_seqno_passed() is obsoleted...
- */
-
 /*
  * A command that requires special handling by the command parser.
  */
@@ -2617,8 +2567,9 @@ struct drm_i915_cmd_descriptor {
 /*
  * A table of commands requiring special handling by the command parser.
  *
- * Each ring has an array of tables. Each table consists of an array of command
- * descriptors, which must be sorted with command opcodes in ascending order.
+ * Each engine has an array of tables. Each table consists of an array of
+ * command descriptors, which must be sorted with command opcodes in
+ * ascending order.
  */
 struct drm_i915_cmd_table {
        const struct drm_i915_cmd_descriptor *table;
@@ -2636,7 +2587,7 @@ struct drm_i915_cmd_table {
                BUILD_BUG(); \
        __p; \
 })
-#define INTEL_INFO(p)  (&__I915__(p)->info)
+#define INTEL_INFO(p)  (&__I915__(p)->info)
 #define INTEL_GEN(p)   (INTEL_INFO(p)->gen)
 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
 
@@ -2803,10 +2754,10 @@ struct drm_i915_cmd_table {
 #define HAS_EDRAM(dev)         (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
 #define HAS_WT(dev)            ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
                                 HAS_EDRAM(dev))
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HWS_NEEDS_PHYSICAL(dev)        (INTEL_INFO(dev)->hws_needs_physical)
 
-#define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
-#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
+#define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
 #define USES_PPGTT(dev)                (i915.enable_ppgtt)
 #define USES_FULL_PPGTT(dev)   (i915.enable_ppgtt >= 2)
 #define USES_FULL_48BIT_PPGTT(dev)     (i915.enable_ppgtt == 3)
@@ -2830,7 +2781,7 @@ struct drm_i915_cmd_table {
  * interrupt source and so prevents the other device from working properly.
  */
 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
@@ -2846,38 +2797,27 @@ struct drm_i915_cmd_table {
 
 #define HAS_IPS(dev)           (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
 
-#define HAS_DP_MST(dev)                (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
-                                INTEL_INFO(dev)->gen >= 9)
+#define HAS_DP_MST(dev)        (INTEL_INFO(dev)->has_dp_mst)
 
 #define HAS_DDI(dev)           (INTEL_INFO(dev)->has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)    (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev)           (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
-                                IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
-                                IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-#define HAS_RUNTIME_PM(dev)    (IS_GEN6(dev) || IS_HASWELL(dev) || \
-                                IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
-                                IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
-                                IS_KABYLAKE(dev) || IS_BROXTON(dev))
-#define HAS_RC6(dev)           (INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev)          (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
-
-#define HAS_CSR(dev)   (IS_GEN9(dev))
+#define HAS_PSR(dev)           (INTEL_INFO(dev)->has_psr)
+#define HAS_RUNTIME_PM(dev)    (INTEL_INFO(dev)->has_runtime_pm)
+#define HAS_RC6(dev)           (INTEL_INFO(dev)->has_rc6)
+#define HAS_RC6p(dev)          (INTEL_INFO(dev)->has_rc6p)
+
+#define HAS_CSR(dev)   (INTEL_INFO(dev)->has_csr)
 
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
  * command submission once loaded. But these are logically independent
  * properties, so we have separate macros to test them.
  */
-#define HAS_GUC(dev)           (IS_GEN9(dev))
+#define HAS_GUC(dev)           (INTEL_INFO(dev)->has_guc)
 #define HAS_GUC_UCODE(dev)     (HAS_GUC(dev))
 #define HAS_GUC_SCHED(dev)     (HAS_GUC(dev))
 
-#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
-                                   INTEL_INFO(dev)->gen >= 8)
-
-#define HAS_CORE_RING_FREQ(dev)        (INTEL_INFO(dev)->gen >= 6 && \
-                                !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
-                                !IS_BROXTON(dev))
+#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
 
 #define HAS_POOLED_EU(dev)     (INTEL_INFO(dev)->has_pooled_eu)
 
@@ -2905,11 +2845,10 @@ struct drm_i915_cmd_table {
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \
-                              IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
 
 /* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 #define GT_FREQUENCY_MULTIPLIER 50
@@ -2930,7 +2869,9 @@ extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
-                               int enable_ppgtt);
+                               int enable_ppgtt);
+
+bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
 
 /* i915_drv.c */
 void __printf(3, 4)
@@ -2946,7 +2887,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 #endif
 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-extern int i915_reset(struct drm_i915_private *dev_priv);
+extern void i915_reset(struct drm_i915_private *dev_priv);
 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -3107,11 +3048,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
-void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-                                       struct drm_i915_gem_request *req);
-int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
-                                  struct drm_i915_gem_execbuffer2 *args,
-                                  struct list_head *vmas);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -3150,47 +3086,28 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
                                                  size_t size);
 struct drm_i915_gem_object *i915_gem_object_create_from_data(
                struct drm_device *dev, const void *data, size_t size);
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
-void i915_gem_vma_destroy(struct i915_vma *vma);
-
-/* Flags used by pin/bind&friends. */
-#define PIN_MAPPABLE   (1<<0)
-#define PIN_NONBLOCK   (1<<1)
-#define PIN_GLOBAL     (1<<2)
-#define PIN_OFFSET_BIAS        (1<<3)
-#define PIN_USER       (1<<4)
-#define PIN_UPDATE     (1<<5)
-#define PIN_ZONE_4G    (1<<6)
-#define PIN_HIGH       (1<<7)
-#define PIN_OFFSET_FIXED       (1<<8)
-#define PIN_OFFSET_MASK (~4095)
-int __must_check
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-                   struct i915_address_space *vm,
-                   uint32_t alignment,
-                   uint64_t flags);
-int __must_check
+
+struct i915_vma * __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         const struct i915_ggtt_view *view,
-                        uint32_t alignment,
-                        uint64_t flags);
+                        u64 size,
+                        u64 alignment,
+                        u64 flags);
 
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                  u32 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
-/*
- * BEWARE: Do not use the function below unless you can _absolutely_
- * _guarantee_ VMA in question is _not in use_ anywhere.
- */
-int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
-                                   int *needs_clflush);
-
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __sg_page_count(struct scatterlist *sg)
@@ -3250,13 +3167,20 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
        obj->pages_pin_count--;
 }
 
+enum i915_map_type {
+       I915_MAP_WB = 0,
+       I915_MAP_WC,
+};
+
 /**
  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
  * @obj - the object to map into kernel address space
+ * @type - the type of mapping, used to select pgprot_t
  *
  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
  * pages and then returns a contiguous mapping of the backing storage into
- * the kernel address space.
+ * the kernel address space. Based on the @type of mapping, the PTE will be
+ * set to either WriteBack or WriteCombine (via pgprot_t).
  *
  * The caller must hold the struct_mutex, and is responsible for calling
  * i915_gem_object_unpin_map() when the mapping is no longer required.
@@ -3264,7 +3188,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
  * Returns the pointer through which to access the mapped object, or an
  * ERR_PTR() on error.
  */
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+                                          enum i915_map_type type);
 
 /**
  * i915_gem_object_unpin_map - releases an earlier mapping
@@ -3283,122 +3208,73 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
        i915_gem_object_unpin_pages(obj);
 }
 
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+                                   unsigned int *needs_clflush);
+int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
+                                    unsigned int *needs_clflush);
+#define CLFLUSH_BEFORE 0x1
+#define CLFLUSH_AFTER 0x2
+#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+
+static inline void
+i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_unpin_pages(obj);
+}
+
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                        struct intel_engine_cs *to,
-                        struct drm_i915_gem_request **to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct drm_i915_gem_request *req);
+                            struct drm_i915_gem_request *req,
+                            unsigned int flags);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      uint32_t handle, uint64_t *offset);
+int i915_gem_mmap_gtt_version(void);
 
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
                       unsigned frontbuffer_bits);
 
-/**
- * Returns true if seq1 is later than seq2.
- */
-static inline bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
-       return (int32_t)(seq1 - seq2) >= 0;
-}
-
-static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
-{
-       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
-                                req->previous_seqno);
-}
-
-static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
-{
-       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
-                                req->seqno);
-}
-
-bool __i915_spin_request(const struct drm_i915_gem_request *request,
-                        int state, unsigned long timeout_us);
-static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
-                                    int state, unsigned long timeout_us)
-{
-       return (i915_gem_request_started(request) &&
-               __i915_spin_request(request, state, timeout_us));
-}
-
-int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
-
-static inline u32 i915_reset_counter(struct i915_gpu_error *error)
-{
-       return atomic_read(&error->reset_counter);
-}
-
-static inline bool __i915_reset_in_progress(u32 reset)
-{
-       return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
-}
-
-static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
-{
-       return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
-}
-
-static inline bool __i915_terminally_wedged(u32 reset)
-{
-       return unlikely(reset & I915_WEDGED);
-}
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
-       return __i915_reset_in_progress(i915_reset_counter(error));
+       return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
 }
 
-static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
-       return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
+       return unlikely(test_bit(I915_WEDGED, &error->flags));
 }
 
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
 {
-       return __i915_terminally_wedged(i915_reset_counter(error));
+       return i915_reset_in_progress(error) | i915_terminally_wedged(error);
 }
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
 {
-       return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
+       return READ_ONCE(error->reset_count);
 }
 
-void i915_gem_reset(struct drm_device *dev);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
-int i915_gem_init_engines(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+                                       unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-void __i915_add_request(struct drm_i915_gem_request *req,
-                       struct drm_i915_gem_object *batch_obj,
-                       bool flush_caches);
-#define i915_add_request(req) \
-       __i915_add_request(req, NULL, true)
-#define i915_add_request_no_flush(req) \
-       __i915_add_request(req, NULL, false)
-int __i915_wait_request(struct drm_i915_gem_request *req,
-                       bool interruptible,
-                       s64 *timeout,
-                       struct intel_rps_client *rps);
-int __must_check i915_wait_request(struct drm_i915_gem_request *req);
+void i915_gem_resume(struct drm_device *dev);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@@ -3408,22 +3284,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
                                  bool write);
 int __must_check
 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
+struct i915_vma * __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
                                     const struct i915_ggtt_view *view);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
-                                             const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                                int align);
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-                           int tiling_mode, bool fenced);
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
+                          int tiling_mode);
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
+                               int tiling_mode, bool fenced);
 
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level);
@@ -3434,86 +3308,82 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                struct drm_gem_object *gem_obj, int flags);
 
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-                                 const struct i915_ggtt_view *view);
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
-                       struct i915_address_space *vm);
-static inline u64
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
-{
-       return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
-}
-
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
-                                 const struct i915_ggtt_view *view);
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-                       struct i915_address_space *vm);
-
 struct i915_vma *
 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                   struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
-                         const struct i915_ggtt_view *view);
+                    struct i915_address_space *vm,
+                    const struct i915_ggtt_view *view);
 
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
-                                      const struct i915_ggtt_view *view);
-
-static inline struct i915_vma *
-i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
-       return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
-}
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
+                                 struct i915_address_space *vm,
+                                 const struct i915_ggtt_view *view);
 
-/* Some GGTT VM helpers */
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
        return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
+static inline struct i915_vma *
+i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
+                       const struct i915_ggtt_view *view)
+{
+       return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
+}
 
-static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+static inline unsigned long
+i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
+                           const struct i915_ggtt_view *view)
 {
-       return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
+       return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
 }
 
-unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
+/* i915_gem_fence.c */
+int __must_check i915_vma_get_fence(struct i915_vma *vma);
+int __must_check i915_vma_put_fence(struct i915_vma *vma);
 
-static inline int __must_check
-i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
-                     uint32_t alignment,
-                     unsigned flags)
+/**
+ * i915_vma_pin_fence - pin fencing state
+ * @vma: vma to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * vma (and its object) is ready to be used as a scanout target. Fencing
+ * status must be synchronize first by calling i915_vma_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_vma_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the vma has a fence, false otherwise.
+ */
+static inline bool
+i915_vma_pin_fence(struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-       return i915_gem_object_pin(obj, &ggtt->base,
-                                  alignment, flags | PIN_GLOBAL);
+       if (vma->fence) {
+               vma->fence->pin_count++;
+               return true;
+       } else
+               return false;
 }
 
-void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
-                                    const struct i915_ggtt_view *view);
+/**
+ * i915_vma_unpin_fence - unpin fencing state
+ * @vma: vma to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_vma_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
 static inline void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_vma_unpin_fence(struct i915_vma *vma)
 {
-       i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+       if (vma->fence) {
+               GEM_BUG_ON(vma->fence->pin_count <= 0);
+               vma->fence->pin_count--;
+       }
 }
 
-/* i915_gem_fence.c */
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
-
 void i915_gem_restore_fences(struct drm_device *dev);
 
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -3524,10 +3394,10 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_lost(struct drm_i915_private *dev_priv);
 void i915_gem_context_fini(struct drm_device *dev);
-void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
 void i915_gem_context_free(struct kref *ctx_ref);
 struct drm_i915_gem_object *
 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@@ -3548,12 +3418,14 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
        return ctx;
 }
 
-static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
+static inline struct i915_gem_context *
+i915_gem_context_get(struct i915_gem_context *ctx)
 {
        kref_get(&ctx->ref);
+       return ctx;
 }
 
-static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
+static inline void i915_gem_context_put(struct i915_gem_context *ctx)
 {
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        kref_put(&ctx->ref, i915_gem_context_free);
@@ -3576,13 +3448,10 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
                                       struct drm_file *file);
 
 /* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
-                                         struct i915_address_space *vm,
-                                         int min_size,
-                                         unsigned alignment,
+int __must_check i915_gem_evict_something(struct i915_address_space *vm,
+                                         u64 min_size, u64 alignment,
                                          unsigned cache_level,
-                                         unsigned long start,
-                                         unsigned long end,
+                                         u64 start, u64 end,
                                          unsigned flags);
 int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
@@ -3590,6 +3459,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 /* belongs in i915_gem_gtt.h */
 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
 {
+       wmb();
        if (INTEL_GEN(dev_priv) < 6)
                intel_gtt_chipset_flush();
 }
@@ -3634,28 +3504,21 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
        return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
-               obj->tiling_mode != I915_TILING_NONE;
+               i915_gem_object_is_tiled(obj);
 }
 
-/* i915_gem_debug.c */
-#if WATCH_LISTS
-int i915_verify_lists(struct drm_device *dev);
-#else
-#define i915_verify_lists(dev) 0
-#endif
-
 /* i915_debugfs.c */
 #ifdef CONFIG_DEBUG_FS
 int i915_debugfs_register(struct drm_i915_private *dev_priv);
 void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
 int i915_debugfs_connector_add(struct drm_connector *connector);
-void intel_display_crc_init(struct drm_device *dev);
+void intel_display_crc_init(struct drm_i915_private *dev_priv);
 #else
 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
 { return 0; }
-static inline void intel_display_crc_init(struct drm_device *dev) {}
+static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
 #endif
 
 /* i915_gpu_error.c */
@@ -3684,23 +3547,23 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
 /* i915_cmd_parser.c */
 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
-int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
-bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
-int i915_parse_cmds(struct intel_engine_cs *engine,
-                   struct drm_i915_gem_object *batch_obj,
-                   struct drm_i915_gem_object *shadow_batch_obj,
-                   u32 batch_start_offset,
-                   u32 batch_len,
-                   bool is_master);
+void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
+void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
+bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+                           struct drm_i915_gem_object *batch_obj,
+                           struct drm_i915_gem_object *shadow_batch_obj,
+                           u32 batch_start_offset,
+                           u32 batch_len,
+                           bool is_master);
 
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 
 /* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_device *dev_priv);
-void i915_teardown_sysfs(struct drm_device *dev_priv);
+void i915_setup_sysfs(struct drm_i915_private *dev_priv);
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
 
 /* intel_i2c.c */
 extern int intel_setup_gmbus(struct drm_device *dev);
@@ -3800,7 +3663,6 @@ extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
                                  bool enable);
 
-extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 
@@ -3878,9 +3740,16 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
  * will be implemented using 2 32-bit writes in an arbitrary order with
  * an arbitrary delay between them. This can cause the hardware to
  * act upon the intermediate value, possibly leading to corruption and
- * machine death. You have been warned.
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * dev_priv->uncore.funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actualy support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
  */
-#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
 #define I915_READ64(reg)       dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({                      \
@@ -3923,7 +3792,7 @@ __raw_write(64, q)
 #undef __raw_write
 
 /* These are untraced mmio-accessors that are only valid to be used inside
- * criticial sections inside IRQ handlers where forcewake is explicitly
+ * critical sections inside IRQ handlers where forcewake is explicitly
  * controlled.
  * Think twice, and think again, before using these.
  * Note: Should only be used between intel_uncore_forcewake_irqlock() and
@@ -3995,7 +3864,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
                            schedule_timeout_uninterruptible(remaining_jiffies);
        }
 }
-static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
+
+static inline bool
+__i915_request_irq_complete(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
 
@@ -4017,7 +3888,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
         * is woken.
         */
        if (engine->irq_seqno_barrier &&
-           READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
+           rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
            cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
                struct task_struct *tsk;
 
@@ -4042,7 +3913,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
                 * irq_posted == false but we are still running).
                 */
                rcu_read_lock();
-               tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+               tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
                if (tsk && tsk != current)
                        /* Note that if the bottom-half is changed as we
                         * are sending the wake-up, the new bottom-half will
@@ -4057,18 +3928,35 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
                        return true;
        }
 
-       /* We need to check whether any gpu reset happened in between
-        * the request being submitted and now. If a reset has occurred,
-        * the seqno will have been advance past ours and our request
-        * is complete. If we are in the process of handling a reset,
-        * the request is effectively complete as the rendering will
-        * be discarded, but we need to return in order to drop the
-        * struct_mutex.
-        */
-       if (i915_reset_in_progress(&req->i915->gpu_error))
-               return true;
-
        return false;
 }
 
+void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+
+/* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap);
+
+#define ptr_mask_bits(ptr) ({                                          \
+       unsigned long __v = (unsigned long)(ptr);                       \
+       (typeof(ptr))(__v & PAGE_MASK);                                 \
+})
+
+#define ptr_unpack_bits(ptr, bits) ({                                  \
+       unsigned long __v = (unsigned long)(ptr);                       \
+       (bits) = __v & ~PAGE_MASK;                                      \
+       (typeof(ptr))(__v & PAGE_MASK);                                 \
+})
+
+#define ptr_pack_bits(ptr, bits)                                       \
+       ((typeof(ptr))((unsigned long)(ptr) | (bits)))
+
+#define fetch_and_zero(ptr) ({                                         \
+       typeof(*ptr) __T = *(ptr);                                      \
+       *(ptr) = (typeof(*ptr))0;                                       \
+       __T;                                                            \
+})
+
 #endif
index 11681501d7b1314e9a996073d4bdba48226a22b0..2c81067589225a89762a4129fac7eeee0abebed9 100644 (file)
 #include <drm/drm_vma_manager.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include "intel_mocs.h"
+#include <linux/reservation.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
                                  enum i915_cache_level level)
@@ -139,7 +138,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
        if (ret)
                return ret;
 
-       WARN_ON(i915_verify_lists(dev));
        return 0;
 }
 
@@ -156,10 +154,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        pinned = 0;
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
-               if (vma->pin_count)
+               if (i915_vma_is_pinned(vma))
                        pinned += vma->node.size;
        list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
-               if (vma->pin_count)
+               if (i915_vma_is_pinned(vma))
                        pinned += vma->node.size;
        mutex_unlock(&dev->struct_mutex);
 
@@ -281,23 +279,129 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
        .release = i915_gem_object_release_phys,
 };
 
-static int
-drop_pages(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
-       struct i915_vma *vma, *next;
+       struct i915_vma *vma;
+       LIST_HEAD(still_in_list);
        int ret;
 
-       drm_gem_object_reference(&obj->base);
-       list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
-               if (i915_vma_unbind(vma))
-                       break;
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       ret = i915_gem_object_put_pages(obj);
-       drm_gem_object_unreference(&obj->base);
+       /* Closed vma are removed from the obj->vma_list - but they may
+        * still have an active binding on the object. To remove those we
+        * must wait for all rendering to complete to the object (as unbinding
+        * must anyway), and retire the requests.
+        */
+       ret = i915_gem_object_wait_rendering(obj, false);
+       if (ret)
+               return ret;
+
+       i915_gem_retire_requests(to_i915(obj->base.dev));
+
+       while ((vma = list_first_entry_or_null(&obj->vma_list,
+                                              struct i915_vma,
+                                              obj_link))) {
+               list_move_tail(&vma->obj_link, &still_in_list);
+               ret = i915_vma_unbind(vma);
+               if (ret)
+                       break;
+       }
+       list_splice(&still_in_list, &obj->vma_list);
 
        return ret;
 }
 
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ * @obj: i915 gem object
+ * @readonly: waiting for just read access or read-write access
+ */
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+                              bool readonly)
+{
+       struct reservation_object *resv;
+       struct i915_gem_active *active;
+       unsigned long active_mask;
+       int idx;
+
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+       if (!readonly) {
+               active = obj->last_read;
+               active_mask = i915_gem_object_get_active(obj);
+       } else {
+               active_mask = 1;
+               active = &obj->last_write;
+       }
+
+       for_each_active(active_mask, idx) {
+               int ret;
+
+               ret = i915_gem_active_wait(&active[idx],
+                                          &obj->base.dev->struct_mutex);
+               if (ret)
+                       return ret;
+       }
+
+       resv = i915_gem_object_get_dmabuf_resv(obj);
+       if (resv) {
+               long err;
+
+               err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
+                                                         MAX_SCHEDULE_TIMEOUT);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
+/* A nonblocking variant of the above wait. Must be called prior to
+ * acquiring the mutex for the object, as the object state may change
+ * during this call. A reference must be held by the caller for the object.
+ */
+static __must_check int
+__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
+                       struct intel_rps_client *rps,
+                       bool readonly)
+{
+       struct i915_gem_active *active;
+       unsigned long active_mask;
+       int idx;
+
+       active_mask = __I915_BO_ACTIVE(obj);
+       if (!active_mask)
+               return 0;
+
+       if (!readonly) {
+               active = obj->last_read;
+       } else {
+               active_mask = 1;
+               active = &obj->last_write;
+       }
+
+       for_each_active(active_mask, idx) {
+               int ret;
+
+               ret = i915_gem_active_wait_unlocked(&active[idx],
+                                                   I915_WAIT_INTERRUPTIBLE,
+                                                   NULL, rps);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+       struct drm_i915_file_private *fpriv = file->driver_priv;
+
+       return &fpriv->rps;
+}
+
 int
 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                            int align)
@@ -318,7 +422,11 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
        if (obj->base.filp == NULL)
                return -EINVAL;
 
-       ret = drop_pages(obj);
+       ret = i915_gem_object_unbind(obj);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_object_put_pages(obj);
        if (ret)
                return ret;
 
@@ -408,7 +516,7 @@ i915_gem_create(struct drm_file *file,
 
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(&obj->base);
+       i915_gem_object_put_unlocked(obj);
        if (ret)
                return ret;
 
@@ -502,33 +610,106 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  * flush the object from the CPU cache.
  */
 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
-                                   int *needs_clflush)
+                                   unsigned int *needs_clflush)
 {
        int ret;
 
        *needs_clflush = 0;
 
-       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
-               return -EINVAL;
+       if (!i915_gem_object_has_struct_page(obj))
+               return -ENODEV;
+
+       ret = i915_gem_object_wait_rendering(obj, true);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               return ret;
+
+       i915_gem_object_pin_pages(obj);
+
+       i915_gem_object_flush_gtt_write_domain(obj);
 
-       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
-               /* If we're not in the cpu read domain, set ourself into the gtt
-                * read domain and manually flush cachelines (if required). This
-                * optimizes for the case when the gpu will dirty the data
-                * anyway again before the next pread happens. */
+       /* If we're not in the cpu read domain, set ourself into the gtt
+        * read domain and manually flush cachelines (if required). This
+        * optimizes for the case when the gpu will dirty the data
+        * anyway again before the next pread happens.
+        */
+       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
                *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
                                                        obj->cache_level);
-               ret = i915_gem_object_wait_rendering(obj, true);
+
+       if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+               ret = i915_gem_object_set_to_cpu_domain(obj, false);
                if (ret)
-                       return ret;
+                       goto err_unpin;
+
+               *needs_clflush = 0;
        }
 
+       /* return with the pages pinned */
+       return 0;
+
+err_unpin:
+       i915_gem_object_unpin_pages(obj);
+       return ret;
+}
+
+int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
+                                    unsigned int *needs_clflush)
+{
+       int ret;
+
+       *needs_clflush = 0;
+       if (!i915_gem_object_has_struct_page(obj))
+               return -ENODEV;
+
+       ret = i915_gem_object_wait_rendering(obj, false);
+       if (ret)
+               return ret;
+
        ret = i915_gem_object_get_pages(obj);
        if (ret)
                return ret;
 
        i915_gem_object_pin_pages(obj);
 
+       i915_gem_object_flush_gtt_write_domain(obj);
+
+       /* If we're not in the cpu write domain, set ourself into the
+        * gtt write domain and manually flush cachelines (as required).
+        * This optimizes for the case when the gpu will use the data
+        * right away and we therefore have to clflush anyway.
+        */
+       if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+               *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
+
+       /* Same trick applies to invalidate partially written cachelines read
+        * before writing.
+        */
+       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+               *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
+                                                        obj->cache_level);
+
+       if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+               ret = i915_gem_object_set_to_cpu_domain(obj, true);
+               if (ret)
+                       goto err_unpin;
+
+               *needs_clflush = 0;
+       }
+
+       if ((*needs_clflush & CLFLUSH_AFTER) == 0)
+               obj->cache_dirty = true;
+
+       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+       obj->dirty = 1;
+       /* return with the pages pinned */
+       return 0;
+
+err_unpin:
+       i915_gem_object_unpin_pages(obj);
        return ret;
 }
 
@@ -638,14 +819,24 @@ i915_gem_gtt_pread(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct i915_vma *vma;
        struct drm_mm_node node;
        char __user *user_data;
        uint64_t remain;
        uint64_t offset;
        int ret;
 
-       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
-       if (ret) {
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+       if (!IS_ERR(vma)) {
+               node.start = i915_ggtt_offset(vma);
+               node.allocated = false;
+               ret = i915_vma_put_fence(vma);
+               if (ret) {
+                       i915_vma_unpin(vma);
+                       vma = ERR_PTR(ret);
+               }
+       }
+       if (IS_ERR(vma)) {
                ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
                if (ret)
                        goto out;
@@ -657,12 +848,6 @@ i915_gem_gtt_pread(struct drm_device *dev,
                }
 
                i915_gem_object_pin_pages(obj);
-       } else {
-               node.start = i915_gem_obj_ggtt_offset(obj);
-               node.allocated = false;
-               ret = i915_gem_object_put_fence(obj);
-               if (ret)
-                       goto out_unpin;
        }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -707,7 +892,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
                 * and write to user memory which may result into page
                 * faults, and so we cannot perform this under struct_mutex.
                 */
-               if (slow_user_access(ggtt->mappable, page_base,
+               if (slow_user_access(&ggtt->mappable, page_base,
                                     page_offset, user_data,
                                     page_length, false)) {
                        ret = -EFAULT;
@@ -739,7 +924,7 @@ out_unpin:
                i915_gem_object_unpin_pages(obj);
                remove_mappable_node(&node);
        } else {
-               i915_gem_object_ggtt_unpin(obj);
+               i915_vma_unpin(vma);
        }
 out:
        return ret;
@@ -760,19 +945,14 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int needs_clflush = 0;
        struct sg_page_iter sg_iter;
 
-       if (!i915_gem_object_has_struct_page(obj))
-               return -ENODEV;
-
-       user_data = u64_to_user_ptr(args->data_ptr);
-       remain = args->size;
-
-       obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
        ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
        if (ret)
                return ret;
 
+       obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+       user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
+       remain = args->size;
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
                         offset >> PAGE_SHIFT) {
@@ -828,7 +1008,7 @@ next_page:
        }
 
 out:
-       i915_gem_object_unpin_pages(obj);
+       i915_gem_obj_finish_shmem_access(obj);
 
        return ret;
 }
@@ -857,36 +1037,44 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                       args->size))
                return -EFAULT;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
+               return -ENOENT;
 
        /* Bounds check source.  */
        if (args->offset > obj->base.size ||
            args->size > obj->base.size - args->offset) {
                ret = -EINVAL;
-               goto out;
+               goto err;
        }
 
        trace_i915_gem_object_pread(obj, args->offset, args->size);
 
+       ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
+       if (ret)
+               goto err;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto err;
+
        ret = i915_gem_shmem_pread(dev, obj, args, file);
 
        /* pread for non shmem backed objects */
-       if (ret == -EFAULT || ret == -ENODEV)
+       if (ret == -EFAULT || ret == -ENODEV) {
+               intel_runtime_pm_get(to_i915(dev));
                ret = i915_gem_gtt_pread(dev, obj, args->size,
                                        args->offset, args->data_ptr);
+               intel_runtime_pm_put(to_i915(dev));
+       }
 
-out:
-       drm_gem_object_unreference(&obj->base);
-unlock:
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+
+err:
+       i915_gem_object_put_unlocked(obj);
        return ret;
 }
 
@@ -916,7 +1104,7 @@ fast_user_write(struct io_mapping *mapping,
 /**
  * This is the fast pwrite path, where we copy the data directly from the
  * user into the GTT, uncached.
- * @dev: drm device pointer
+ * @i915: i915 device private data
  * @obj: i915 gem object
  * @args: pwrite arguments structure
  * @file: drm file pointer
@@ -929,17 +1117,28 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 {
        struct i915_ggtt *ggtt = &i915->ggtt;
        struct drm_device *dev = obj->base.dev;
+       struct i915_vma *vma;
        struct drm_mm_node node;
        uint64_t remain, offset;
        char __user *user_data;
        int ret;
        bool hit_slow_path = false;
 
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (i915_gem_object_is_tiled(obj))
                return -EFAULT;
 
-       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
-       if (ret) {
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+                                      PIN_MAPPABLE | PIN_NONBLOCK);
+       if (!IS_ERR(vma)) {
+               node.start = i915_ggtt_offset(vma);
+               node.allocated = false;
+               ret = i915_vma_put_fence(vma);
+               if (ret) {
+                       i915_vma_unpin(vma);
+                       vma = ERR_PTR(ret);
+               }
+       }
+       if (IS_ERR(vma)) {
                ret = insert_mappable_node(i915, &node, PAGE_SIZE);
                if (ret)
                        goto out;
@@ -951,19 +1150,13 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                }
 
                i915_gem_object_pin_pages(obj);
-       } else {
-               node.start = i915_gem_obj_ggtt_offset(obj);
-               node.allocated = false;
-               ret = i915_gem_object_put_fence(obj);
-               if (ret)
-                       goto out_unpin;
        }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, true);
        if (ret)
                goto out_unpin;
 
-       intel_fb_obj_invalidate(obj, ORIGIN_GTT);
+       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
        obj->dirty = true;
 
        user_data = u64_to_user_ptr(args->data_ptr);
@@ -995,11 +1188,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                 * If the object is non-shmem backed, we retry again with the
                 * path that handles page fault.
                 */
-               if (fast_user_write(ggtt->mappable, page_base,
+               if (fast_user_write(&ggtt->mappable, page_base,
                                    page_offset, user_data, page_length)) {
                        hit_slow_path = true;
                        mutex_unlock(&dev->struct_mutex);
-                       if (slow_user_access(ggtt->mappable,
+                       if (slow_user_access(&ggtt->mappable,
                                             page_base,
                                             page_offset, user_data,
                                             page_length, true)) {
@@ -1030,7 +1223,7 @@ out_flush:
                }
        }
 
-       intel_fb_obj_flush(obj, false, ORIGIN_GTT);
+       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 out_unpin:
        if (node.allocated) {
                wmb();
@@ -1040,7 +1233,7 @@ out_unpin:
                i915_gem_object_unpin_pages(obj);
                remove_mappable_node(&node);
        } else {
-               i915_gem_object_ggtt_unpin(obj);
+               i915_vma_unpin(vma);
        }
 out:
        return ret;
@@ -1123,41 +1316,17 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        int shmem_page_offset, page_length, ret = 0;
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
        int hit_slowpath = 0;
-       int needs_clflush_after = 0;
-       int needs_clflush_before = 0;
+       unsigned int needs_clflush;
        struct sg_page_iter sg_iter;
 
-       user_data = u64_to_user_ptr(args->data_ptr);
-       remain = args->size;
-
-       obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
-       if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
-               /* If we're not in the cpu write domain, set ourself into the gtt
-                * write domain and manually flush cachelines (if required). This
-                * optimizes for the case when the gpu will use the data
-                * right away and we therefore have to clflush anyway. */
-               needs_clflush_after = cpu_write_needs_clflush(obj);
-               ret = i915_gem_object_wait_rendering(obj, false);
-               if (ret)
-                       return ret;
-       }
-       /* Same trick applies to invalidate partially written cachelines read
-        * before writing. */
-       if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
-               needs_clflush_before =
-                       !cpu_cache_is_coherent(dev, obj->cache_level);
-
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
        if (ret)
                return ret;
 
-       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-
-       i915_gem_object_pin_pages(obj);
-
+       obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+       user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
-       obj->dirty = 1;
+       remain = args->size;
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
                         offset >> PAGE_SHIFT) {
@@ -1181,7 +1350,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                /* If we don't overwrite a cacheline completely we need to be
                 * careful to have up-to-date data by first clflushing. Don't
                 * overcomplicate things and flush the entire patch. */
-               partial_cacheline_write = needs_clflush_before &&
+               partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
                        ((shmem_page_offset | page_length)
                                & (boot_cpu_data.x86_clflush_size - 1));
 
@@ -1191,7 +1360,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
                                        user_data, page_do_bit17_swizzling,
                                        partial_cacheline_write,
-                                       needs_clflush_after);
+                                       needs_clflush & CLFLUSH_AFTER);
                if (ret == 0)
                        goto next_page;
 
@@ -1200,7 +1369,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
                                        user_data, page_do_bit17_swizzling,
                                        partial_cacheline_write,
-                                       needs_clflush_after);
+                                       needs_clflush & CLFLUSH_AFTER);
 
                mutex_lock(&dev->struct_mutex);
 
@@ -1214,7 +1383,7 @@ next_page:
        }
 
 out:
-       i915_gem_object_unpin_pages(obj);
+       i915_gem_obj_finish_shmem_access(obj);
 
        if (hit_slowpath) {
                /*
@@ -1222,17 +1391,15 @@ out:
                 * cachelines in-line while writing and the object moved
                 * out of the cpu write domain while we've dropped the lock.
                 */
-               if (!needs_clflush_after &&
+               if (!(needs_clflush & CLFLUSH_AFTER) &&
                    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                        if (i915_gem_clflush_object(obj, obj->pin_display))
-                               needs_clflush_after = true;
+                               needs_clflush |= CLFLUSH_AFTER;
                }
        }
 
-       if (needs_clflush_after)
+       if (needs_clflush & CLFLUSH_AFTER)
                i915_gem_chipset_flush(to_i915(dev));
-       else
-               obj->cache_dirty = true;
 
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
        return ret;
@@ -1270,27 +1437,29 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                        return -EFAULT;
        }
 
-       intel_runtime_pm_get(dev_priv);
-
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               goto put_rpm;
-
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
+               return -ENOENT;
 
        /* Bounds check destination. */
        if (args->offset > obj->base.size ||
            args->size > obj->base.size - args->offset) {
                ret = -EINVAL;
-               goto out;
+               goto err;
        }
 
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
+       ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
+       if (ret)
+               goto err;
+
+       intel_runtime_pm_get(dev_priv);
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto err_rpm;
+
        ret = -EFAULT;
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
         * it would end up going through the fenced access, and we'll get
@@ -1306,508 +1475,31 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                 * textures). Fallback to the shmem path in that case. */
        }
 
-       if (ret == -EFAULT) {
+       if (ret == -EFAULT || ret == -ENOSPC) {
                if (obj->phys_handle)
                        ret = i915_gem_phys_pwrite(obj, args, file);
-               else if (i915_gem_object_has_struct_page(obj))
-                       ret = i915_gem_shmem_pwrite(dev, obj, args, file);
                else
-                       ret = -ENODEV;
+                       ret = i915_gem_shmem_pwrite(dev, obj, args, file);
        }
 
-out:
-       drm_gem_object_unreference(&obj->base);
-unlock:
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
-put_rpm:
        intel_runtime_pm_put(dev_priv);
 
        return ret;
-}
-
-static int
-i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
-{
-       if (__i915_terminally_wedged(reset_counter))
-               return -EIO;
-
-       if (__i915_reset_in_progress(reset_counter)) {
-               /* Non-interruptible callers can't handle -EAGAIN, hence return
-                * -EIO unconditionally for these. */
-               if (!interruptible)
-                       return -EIO;
-
-               return -EAGAIN;
-       }
 
-       return 0;
+err_rpm:
+       intel_runtime_pm_put(dev_priv);
+err:
+       i915_gem_object_put_unlocked(obj);
+       return ret;
 }
 
-static unsigned long local_clock_us(unsigned *cpu)
+static inline enum fb_op_origin
+write_origin(struct drm_i915_gem_object *obj, unsigned domain)
 {
-       unsigned long t;
-
-       /* Cheaply and approximately convert from nanoseconds to microseconds.
-        * The result and subsequent calculations are also defined in the same
-        * approximate microseconds units. The principal source of timing
-        * error here is from the simple truncation.
-        *
-        * Note that local_clock() is only defined wrt to the current CPU;
-        * the comparisons are no longer valid if we switch CPUs. Instead of
-        * blocking preemption for the entire busywait, we can detect the CPU
-        * switch and use that as indicator of system load and a reason to
-        * stop busywaiting, see busywait_stop().
-        */
-       *cpu = get_cpu();
-       t = local_clock() >> 10;
-       put_cpu();
-
-       return t;
-}
-
-static bool busywait_stop(unsigned long timeout, unsigned cpu)
-{
-       unsigned this_cpu;
-
-       if (time_after(local_clock_us(&this_cpu), timeout))
-               return true;
-
-       return this_cpu != cpu;
-}
-
-bool __i915_spin_request(const struct drm_i915_gem_request *req,
-                        int state, unsigned long timeout_us)
-{
-       unsigned cpu;
-
-       /* When waiting for high frequency requests, e.g. during synchronous
-        * rendering split between the CPU and GPU, the finite amount of time
-        * required to set up the irq and wait upon it limits the response
-        * rate. By busywaiting on the request completion for a short while we
-        * can service the high frequency waits as quick as possible. However,
-        * if it is a slow request, we want to sleep as quickly as possible.
-        * The tradeoff between waiting and sleeping is roughly the time it
-        * takes to sleep on a request, on the order of a microsecond.
-        */
-
-       timeout_us += local_clock_us(&cpu);
-       do {
-               if (i915_gem_request_completed(req))
-                       return true;
-
-               if (signal_pending_state(state, current))
-                       break;
-
-               if (busywait_stop(timeout_us, cpu))
-                       break;
-
-               cpu_relax_lowlatency();
-       } while (!need_resched());
-
-       return false;
-}
-
-/**
- * __i915_wait_request - wait until execution of request has finished
- * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: RPS client
- *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
- *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-int __i915_wait_request(struct drm_i915_gem_request *req,
-                       bool interruptible,
-                       s64 *timeout,
-                       struct intel_rps_client *rps)
-{
-       int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       DEFINE_WAIT(reset);
-       struct intel_wait wait;
-       unsigned long timeout_remain;
-       s64 before = 0; /* Only to silence a compiler warning. */
-       int ret = 0;
-
-       might_sleep();
-
-       if (list_empty(&req->list))
-               return 0;
-
-       if (i915_gem_request_completed(req))
-               return 0;
-
-       timeout_remain = MAX_SCHEDULE_TIMEOUT;
-       if (timeout) {
-               if (WARN_ON(*timeout < 0))
-                       return -EINVAL;
-
-               if (*timeout == 0)
-                       return -ETIME;
-
-               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
-
-               /*
-                * Record current time in case interrupted by signal, or wedged.
-                */
-               before = ktime_get_raw_ns();
-       }
-
-       trace_i915_gem_request_wait_begin(req);
-
-       /* This client is about to stall waiting for the GPU. In many cases
-        * this is undesirable and limits the throughput of the system, as
-        * many clients cannot continue processing user input/output whilst
-        * blocked. RPS autotuning may take tens of milliseconds to respond
-        * to the GPU load and thus incurs additional latency for the client.
-        * We can circumvent that by promoting the GPU frequency to maximum
-        * before we wait. This makes the GPU throttle up much more quickly
-        * (good for benchmarks and user experience, e.g. window animations),
-        * but at a cost of spending more power processing the workload
-        * (bad for battery). Not all clients even want their results
-        * immediately and for them we should just let the GPU select its own
-        * frequency to maximise efficiency. To prevent a single client from
-        * forcing the clocks too high for the whole system, we only allow
-        * each client to waitboost once in a busy period.
-        */
-       if (INTEL_INFO(req->i915)->gen >= 6)
-               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
-
-       /* Optimistic spin for the next ~jiffie before touching IRQs */
-       if (i915_spin_request(req, state, 5))
-               goto complete;
-
-       set_current_state(state);
-       add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-
-       intel_wait_init(&wait, req->seqno);
-       if (intel_engine_add_wait(req->engine, &wait))
-               /* In order to check that we haven't missed the interrupt
-                * as we enabled it, we need to kick ourselves to do a
-                * coherent check on the seqno before we sleep.
-                */
-               goto wakeup;
-
-       for (;;) {
-               if (signal_pending_state(state, current)) {
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-
-               timeout_remain = io_schedule_timeout(timeout_remain);
-               if (timeout_remain == 0) {
-                       ret = -ETIME;
-                       break;
-               }
-
-               if (intel_wait_complete(&wait))
-                       break;
-
-               set_current_state(state);
-
-wakeup:
-               /* Carefully check if the request is complete, giving time
-                * for the seqno to be visible following the interrupt.
-                * We also have to check in case we are kicked by the GPU
-                * reset in order to drop the struct_mutex.
-                */
-               if (__i915_request_irq_complete(req))
-                       break;
-
-               /* Only spin if we know the GPU is processing this request */
-               if (i915_spin_request(req, state, 2))
-                       break;
-       }
-       remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-
-       intel_engine_remove_wait(req->engine, &wait);
-       __set_current_state(TASK_RUNNING);
-complete:
-       trace_i915_gem_request_wait_end(req);
-
-       if (timeout) {
-               s64 tres = *timeout - (ktime_get_raw_ns() - before);
-
-               *timeout = tres < 0 ? 0 : tres;
-
-               /*
-                * Apparently ktime isn't accurate enough and occasionally has a
-                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
-                * things up to make the test happy. We allow up to 1 jiffy.
-                *
-                * This is a regrssion from the timespec->ktime conversion.
-                */
-               if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
-                       *timeout = 0;
-       }
-
-       if (rps && req->seqno == req->engine->last_submitted_seqno) {
-               /* The GPU is now idle and this client has stalled.
-                * Since no other client has submitted a request in the
-                * meantime, assume that this client is the only one
-                * supplying work to the GPU but is unable to keep that
-                * work supplied because it is waiting. Since the GPU is
-                * then never kept fully busy, RPS autoclocking will
-                * keep the clocks relatively low, causing further delays.
-                * Compensate by giving the synchronous client credit for
-                * a waitboost next time.
-                */
-               spin_lock(&req->i915->rps.client_lock);
-               list_del_init(&rps->link);
-               spin_unlock(&req->i915->rps.client_lock);
-       }
-
-       return ret;
-}
-
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-                                  struct drm_file *file)
-{
-       struct drm_i915_file_private *file_priv;
-
-       WARN_ON(!req || !file || req->file_priv);
-
-       if (!req || !file)
-               return -EINVAL;
-
-       if (req->file_priv)
-               return -EINVAL;
-
-       file_priv = file->driver_priv;
-
-       spin_lock(&file_priv->mm.lock);
-       req->file_priv = file_priv;
-       list_add_tail(&req->client_list, &file_priv->mm.request_list);
-       spin_unlock(&file_priv->mm.lock);
-
-       req->pid = get_pid(task_pid(current));
-
-       return 0;
-}
-
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
-       struct drm_i915_file_private *file_priv = request->file_priv;
-
-       if (!file_priv)
-               return;
-
-       spin_lock(&file_priv->mm.lock);
-       list_del(&request->client_list);
-       request->file_priv = NULL;
-       spin_unlock(&file_priv->mm.lock);
-
-       put_pid(request->pid);
-       request->pid = NULL;
-}
-
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
-{
-       trace_i915_gem_request_retire(request);
-
-       /* We know the GPU must have read the request to have
-        * sent us the seqno + interrupt, so use the position
-        * of tail of the request to update the last known position
-        * of the GPU head.
-        *
-        * Note this requires that we are always called in request
-        * completion order.
-        */
-       request->ringbuf->last_retired_head = request->postfix;
-
-       list_del_init(&request->list);
-       i915_gem_request_remove_from_client(request);
-
-       if (request->previous_context) {
-               if (i915.enable_execlists)
-                       intel_lr_context_unpin(request->previous_context,
-                                              request->engine);
-       }
-
-       i915_gem_context_unreference(request->ctx);
-       i915_gem_request_unreference(request);
-}
-
-static void
-__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_gem_request *tmp;
-
-       lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
-       if (list_empty(&req->list))
-               return;
-
-       do {
-               tmp = list_first_entry(&engine->request_list,
-                                      typeof(*tmp), list);
-
-               i915_gem_request_retire(tmp);
-       } while (tmp != req);
-
-       WARN_ON(i915_verify_lists(engine->dev));
-}
-
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- * @req: request to wait on
- */
-int
-i915_wait_request(struct drm_i915_gem_request *req)
-{
-       struct drm_i915_private *dev_priv = req->i915;
-       bool interruptible;
-       int ret;
-
-       interruptible = dev_priv->mm.interruptible;
-
-       BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
-
-       ret = __i915_wait_request(req, interruptible, NULL, NULL);
-       if (ret)
-               return ret;
-
-       /* If the GPU hung, we want to keep the requests to find the guilty. */
-       if (!i915_reset_in_progress(&dev_priv->gpu_error))
-               __i915_gem_request_retire__upto(req);
-
-       return 0;
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for read access or write
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-                              bool readonly)
-{
-       int ret, i;
-
-       if (!obj->active)
-               return 0;
-
-       if (readonly) {
-               if (obj->last_write_req != NULL) {
-                       ret = i915_wait_request(obj->last_write_req);
-                       if (ret)
-                               return ret;
-
-                       i = obj->last_write_req->engine->id;
-                       if (obj->last_read_req[i] == obj->last_write_req)
-                               i915_gem_object_retire__read(obj, i);
-                       else
-                               i915_gem_object_retire__write(obj);
-               }
-       } else {
-               for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       if (obj->last_read_req[i] == NULL)
-                               continue;
-
-                       ret = i915_wait_request(obj->last_read_req[i]);
-                       if (ret)
-                               return ret;
-
-                       i915_gem_object_retire__read(obj, i);
-               }
-               GEM_BUG_ON(obj->active);
-       }
-
-       return 0;
-}
-
-static void
-i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
-                              struct drm_i915_gem_request *req)
-{
-       int ring = req->engine->id;
-
-       if (obj->last_read_req[ring] == req)
-               i915_gem_object_retire__read(obj, ring);
-       else if (obj->last_write_req == req)
-               i915_gem_object_retire__write(obj);
-
-       if (!i915_reset_in_progress(&req->i915->gpu_error))
-               __i915_gem_request_retire__upto(req);
-}
-
-/* A nonblocking variant of the above wait. This is a highly dangerous routine
- * as the object state may change during this call.
- */
-static __must_check int
-i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
-                                           struct intel_rps_client *rps,
-                                           bool readonly)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-       int ret, i, n = 0;
-
-       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-       BUG_ON(!dev_priv->mm.interruptible);
-
-       if (!obj->active)
-               return 0;
-
-       if (readonly) {
-               struct drm_i915_gem_request *req;
-
-               req = obj->last_write_req;
-               if (req == NULL)
-                       return 0;
-
-               requests[n++] = i915_gem_request_reference(req);
-       } else {
-               for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       struct drm_i915_gem_request *req;
-
-                       req = obj->last_read_req[i];
-                       if (req == NULL)
-                               continue;
-
-                       requests[n++] = i915_gem_request_reference(req);
-               }
-       }
-
-       mutex_unlock(&dev->struct_mutex);
-       ret = 0;
-       for (i = 0; ret == 0 && i < n; i++)
-               ret = __i915_wait_request(requests[i], true, NULL, rps);
-       mutex_lock(&dev->struct_mutex);
-
-       for (i = 0; i < n; i++) {
-               if (ret == 0)
-                       i915_gem_object_retire_request(obj, requests[i]);
-               i915_gem_request_unreference(requests[i]);
-       }
-
-       return ret;
-}
-
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
-       struct drm_i915_file_private *fpriv = file->driver_priv;
-       return &fpriv->rps;
-}
-
-static enum fb_op_origin
-write_origin(struct drm_i915_gem_object *obj, unsigned domain)
-{
-       return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
-              ORIGIN_GTT : ORIGIN_CPU;
+       return (domain == I915_GEM_DOMAIN_GTT ?
+               obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
 }
 
 /**
@@ -1828,10 +1520,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        int ret;
 
        /* Only handle setting domains to types used by the CPU. */
-       if (write_domain & I915_GEM_GPU_DOMAINS)
-               return -EINVAL;
-
-       if (read_domains & I915_GEM_GPU_DOMAINS)
+       if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
                return -EINVAL;
 
        /* Having something in the write domain implies it's in the read
@@ -1840,25 +1529,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        if (write_domain != 0 && read_domains != write_domain)
                return -EINVAL;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
+               return -ENOENT;
 
        /* Try to flush the object off the GPU without holding the lock.
         * We will repeat the flush holding the lock in the normal manner
         * to catch cases where we are gazumped.
         */
-       ret = i915_gem_object_wait_rendering__nonblocking(obj,
-                                                         to_rps_client(file),
-                                                         !write_domain);
+       ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
+       if (ret)
+               goto err;
+
+       ret = i915_mutex_lock_interruptible(dev);
        if (ret)
-               goto unref;
+               goto err;
 
        if (read_domains & I915_GEM_DOMAIN_GTT)
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -1868,11 +1553,13 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        if (write_domain != 0)
                intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 
-unref:
-       drm_gem_object_unreference(&obj->base);
-unlock:
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
        return ret;
+
+err:
+       i915_gem_object_put_unlocked(obj);
+       return ret;
 }
 
 /**
@@ -1887,26 +1574,23 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_sw_finish *args = data;
        struct drm_i915_gem_object *obj;
-       int ret = 0;
-
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
+       int err = 0;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
+               return -ENOENT;
 
        /* Pinned buffers may be scanout, so flush the cache */
-       if (obj->pin_display)
-               i915_gem_object_flush_cpu_write_domain(obj);
+       if (READ_ONCE(obj->pin_display)) {
+               err = i915_mutex_lock_interruptible(dev);
+               if (!err) {
+                       i915_gem_object_flush_cpu_write_domain(obj);
+                       mutex_unlock(&dev->struct_mutex);
+               }
+       }
 
-       drm_gem_object_unreference(&obj->base);
-unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       i915_gem_object_put_unlocked(obj);
+       return err;
 }
 
 /**
@@ -1934,7 +1618,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
 {
        struct drm_i915_gem_mmap *args = data;
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        unsigned long addr;
 
        if (args->flags & ~(I915_MMAP_WC))
@@ -1943,19 +1627,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
                return -ENODEV;
 
-       obj = drm_gem_object_lookup(file, args->handle);
-       if (obj == NULL)
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
                return -ENOENT;
 
        /* prime objects have no backing filp to GEM mmap
         * pages from.
         */
-       if (!obj->filp) {
-               drm_gem_object_unreference_unlocked(obj);
+       if (!obj->base.filp) {
+               i915_gem_object_put_unlocked(obj);
                return -EINVAL;
        }
 
-       addr = vm_mmap(obj->filp, 0, args->size,
+       addr = vm_mmap(obj->base.filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
                       args->offset);
        if (args->flags & I915_MMAP_WC) {
@@ -1963,7 +1647,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                struct vm_area_struct *vma;
 
                if (down_write_killable(&mm->mmap_sem)) {
-                       drm_gem_object_unreference_unlocked(obj);
+                       i915_gem_object_put_unlocked(obj);
                        return -EINTR;
                }
                vma = find_vma(mm, addr);
@@ -1975,9 +1659,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                up_write(&mm->mmap_sem);
 
                /* This may race, but that's ok, it only gets set */
-               WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
+               WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
        }
-       drm_gem_object_unreference_unlocked(obj);
+       i915_gem_object_put_unlocked(obj);
        if (IS_ERR((void *)addr))
                return addr;
 
@@ -1986,9 +1670,69 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
+{
+       u64 size;
+
+       size = i915_gem_object_get_stride(obj);
+       size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
+
+       return size >> PAGE_SHIFT;
+}
+
+/**
+ * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
+ *
+ * A history of the GTT mmap interface:
+ *
+ * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
+ *     aligned and suitable for fencing, and still fit into the available
+ *     mappable space left by the pinned display objects. A classic problem
+ *     we called the page-fault-of-doom where we would ping-pong between
+ *     two objects that could not fit inside the GTT and so the memcpy
+ *     would page one object in at the expense of the other between every
+ *     single byte.
+ *
+ * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
+ *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
+ *     object is too large for the available space (or simply too large
+ *     for the mappable aperture!), a view is created instead and faulted
+ *     into userspace. (This view is aligned and sized appropriately for
+ *     fenced access.)
+ *
+ * Restrictions:
+ *
+ *  * snoopable objects cannot be accessed via the GTT. It can cause machine
+ *    hangs on some architectures, corruption on others. An attempt to service
+ *    a GTT page fault from a snoopable object will generate a SIGBUS.
+ *
+ *  * the object must be able to fit into RAM (physical memory, though no
+ *    limited to the mappable aperture).
+ *
+ *
+ * Caveats:
+ *
+ *  * a new GTT page fault will synchronize rendering from the GPU and flush
+ *    all data to system memory. Subsequent access will not be synchronized.
+ *
+ *  * all mappings are revoked on runtime device suspend.
+ *
+ *  * there are only 8, 16 or 32 fence registers to share between all users
+ *    (older machines require fence register for display and blitter access
+ *    as well). Contention of the fence registers will cause the previous users
+ *    to be unmapped and any new access will generate new page faults.
+ *
+ *  * running out of memory while servicing a fault may generate a SIGBUS,
+ *    rather than the expected SIGSEGV.
+ */
+int i915_gem_mmap_gtt_version(void)
+{
+       return 1;
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
- * @vma: VMA in question
+ * @area: CPU VMA in question
  * @vmf: fault info
  *
  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
@@ -2001,122 +1745,120 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  * from the GTT and/or fence registers to make room.  So performance may
  * suffer if the GTT working set is large or there are few fence registers
  * left.
+ *
+ * The current feature set supported by i915_gem_fault() and thus GTT mmaps
+ * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  */
-int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 {
-       struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+       struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct i915_ggtt_view view = i915_ggtt_view_normal;
-       pgoff_t page_offset;
-       unsigned long pfn;
-       int ret = 0;
        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
-
-       intel_runtime_pm_get(dev_priv);
+       struct i915_vma *vma;
+       pgoff_t page_offset;
+       unsigned int flags;
+       int ret;
 
        /* We don't use vmf->pgoff since that has the fake offset */
-       page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+       page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
                PAGE_SHIFT;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               goto out;
-
        trace_i915_gem_object_fault(obj, page_offset, true, write);
 
        /* Try to flush the object off the GPU first without holding the lock.
-        * Upon reacquiring the lock, we will perform our sanity checks and then
+        * Upon acquiring the lock, we will perform our sanity checks and then
         * repeat the flush holding the lock in the normal manner to catch cases
         * where we are gazumped.
         */
-       ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
+       ret = __unsafe_wait_rendering(obj, NULL, !write);
        if (ret)
-               goto unlock;
+               goto err;
+
+       intel_runtime_pm_get(dev_priv);
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto err_rpm;
 
        /* Access to snoopable pages through the GTT is incoherent. */
        if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
                ret = -EFAULT;
-               goto unlock;
+               goto err_unlock;
        }
 
-       /* Use a partial view if the object is bigger than the aperture. */
-       if (obj->base.size >= ggtt->mappable_end &&
-           obj->tiling_mode == I915_TILING_NONE) {
-               static const unsigned int chunk_size = 256; // 1 MiB
+       /* If the object is smaller than a couple of partial vma, it is
+        * not worth only creating a single partial vma - we may as well
+        * clear enough space for the full object.
+        */
+       flags = PIN_MAPPABLE;
+       if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
+               flags |= PIN_NONBLOCK | PIN_NONFAULT;
+
+       /* Now pin it into the GTT as needed */
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+       if (IS_ERR(vma)) {
+               struct i915_ggtt_view view;
+               unsigned int chunk_size;
+
+               /* Use a partial view if it is bigger than available space */
+               chunk_size = MIN_CHUNK_PAGES;
+               if (i915_gem_object_is_tiled(obj))
+                       chunk_size = max(chunk_size, tile_row_pages(obj));
 
                memset(&view, 0, sizeof(view));
                view.type = I915_GGTT_VIEW_PARTIAL;
                view.params.partial.offset = rounddown(page_offset, chunk_size);
                view.params.partial.size =
-                       min_t(unsigned int,
-                             chunk_size,
-                             (vma->vm_end - vma->vm_start)/PAGE_SIZE -
+                       min_t(unsigned int, chunk_size,
+                             (area->vm_end - area->vm_start) / PAGE_SIZE -
                              view.params.partial.offset);
-       }
 
-       /* Now pin it into the GTT if needed */
-       ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
-       if (ret)
-               goto unlock;
+               /* If the partial covers the entire object, just create a
+                * normal VMA.
+                */
+               if (chunk_size >= obj->base.size >> PAGE_SHIFT)
+                       view.type = I915_GGTT_VIEW_NORMAL;
+
+               /* Userspace is now writing through an untracked VMA, abandon
+                * all hope that the hardware is able to track future writes.
+                */
+               obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
+
+               vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+       }
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unlock;
+       }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, write);
        if (ret)
-               goto unpin;
+               goto err_unpin;
 
-       ret = i915_gem_object_get_fence(obj);
+       ret = i915_vma_get_fence(vma);
        if (ret)
-               goto unpin;
+               goto err_unpin;
 
        /* Finally, remap it using the new GTT offset */
-       pfn = ggtt->mappable_base +
-               i915_gem_obj_ggtt_offset_view(obj, &view);
-       pfn >>= PAGE_SHIFT;
-
-       if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
-               /* Overriding existing pages in partial view does not cause
-                * us any trouble as TLBs are still valid because the fault
-                * is due to userspace losing part of the mapping or never
-                * having accessed it before (at this partials' range).
-                */
-               unsigned long base = vma->vm_start +
-                                    (view.params.partial.offset << PAGE_SHIFT);
-               unsigned int i;
-
-               for (i = 0; i < view.params.partial.size; i++) {
-                       ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
-                       if (ret)
-                               break;
-               }
-
-               obj->fault_mappable = true;
-       } else {
-               if (!obj->fault_mappable) {
-                       unsigned long size = min_t(unsigned long,
-                                                  vma->vm_end - vma->vm_start,
-                                                  obj->base.size);
-                       int i;
-
-                       for (i = 0; i < size >> PAGE_SHIFT; i++) {
-                               ret = vm_insert_pfn(vma,
-                                                   (unsigned long)vma->vm_start + i * PAGE_SIZE,
-                                                   pfn + i);
-                               if (ret)
-                                       break;
-                       }
+       ret = remap_io_mapping(area,
+                              area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
+                              (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+                              min_t(u64, vma->size, area->vm_end - area->vm_start),
+                              &ggtt->mappable);
+       if (ret)
+               goto err_unpin;
 
-                       obj->fault_mappable = true;
-               } else
-                       ret = vm_insert_pfn(vma,
-                                           (unsigned long)vmf->virtual_address,
-                                           pfn + page_offset);
-       }
-unpin:
-       i915_gem_object_ggtt_unpin_view(obj, &view);
-unlock:
+       obj->fault_mappable = true;
+err_unpin:
+       __i915_vma_unpin(vma);
+err_unlock:
        mutex_unlock(&dev->struct_mutex);
-out:
+err_rpm:
+       intel_runtime_pm_put(dev_priv);
+err:
        switch (ret) {
        case -EIO:
                /*
@@ -2157,8 +1899,6 @@ out:
                ret = VM_FAULT_SIGBUS;
                break;
        }
-
-       intel_runtime_pm_put(dev_priv);
        return ret;
 }
 
@@ -2212,46 +1952,58 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
                i915_gem_release_mmap(obj);
 }
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+/**
+ * i915_gem_get_ggtt_size - return required global GTT size for an object
+ * @dev_priv: i915 device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ *
+ * Return the required global GTT size for an object, taking into account
+ * potential fence register mapping.
+ */
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
+                          u64 size, int tiling_mode)
 {
-       uint32_t gtt_size;
+       u64 ggtt_size;
 
-       if (INTEL_INFO(dev)->gen >= 4 ||
+       GEM_BUG_ON(size == 0);
+
+       if (INTEL_GEN(dev_priv) >= 4 ||
            tiling_mode == I915_TILING_NONE)
                return size;
 
        /* Previous chips need a power-of-two fence region when tiling */
-       if (IS_GEN3(dev))
-               gtt_size = 1024*1024;
+       if (IS_GEN3(dev_priv))
+               ggtt_size = 1024*1024;
        else
-               gtt_size = 512*1024;
+               ggtt_size = 512*1024;
 
-       while (gtt_size < size)
-               gtt_size <<= 1;
+       while (ggtt_size < size)
+               ggtt_size <<= 1;
 
-       return gtt_size;
+       return ggtt_size;
 }
 
 /**
- * i915_gem_get_gtt_alignment - return required GTT alignment for an object
- * @dev: drm device
+ * i915_gem_get_ggtt_alignment - return required global GTT alignment
+ * @dev_priv: i915 device
  * @size: object size
  * @tiling_mode: tiling mode
- * @fenced: is fenced alignemned required or not
+ * @fenced: is fenced alignment required or not
  *
- * Return the required GTT alignment for an object, taking into account
+ * Return the required global GTT alignment for an object, taking into account
  * potential fence register mapping.
  */
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-                          int tiling_mode, bool fenced)
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
+                               int tiling_mode, bool fenced)
 {
+       GEM_BUG_ON(size == 0);
+
        /*
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
-       if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+       if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
            tiling_mode == I915_TILING_NONE)
                return 4096;
 
@@ -2259,42 +2011,34 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
-       return i915_gem_get_gtt_size(dev, size, tiling_mode);
+       return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
 }
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       int ret;
-
-       dev_priv->mm.shrinker_no_lock_stealing = true;
+       int err;
 
-       ret = drm_gem_create_mmap_offset(&obj->base);
-       if (ret != -ENOSPC)
-               goto out;
+       err = drm_gem_create_mmap_offset(&obj->base);
+       if (!err)
+               return 0;
 
-       /* Badly fragmented mmap space? The only way we can recover
-        * space is by destroying unwanted objects. We can't randomly release
-        * mmap_offsets as userspace expects them to be persistent for the
-        * lifetime of the objects. The closest we can is to release the
-        * offsets on purgeable objects by truncating it and marking it purged,
-        * which prevents userspace from ever using that object again.
+       /* We can idle the GPU locklessly to flush stale objects, but in order
+        * to claim that space for ourselves, we need to take the big
+        * struct_mutex to free the requests+objects and allocate our slot.
         */
-       i915_gem_shrink(dev_priv,
-                       obj->base.size >> PAGE_SHIFT,
-                       I915_SHRINK_BOUND |
-                       I915_SHRINK_UNBOUND |
-                       I915_SHRINK_PURGEABLE);
-       ret = drm_gem_create_mmap_offset(&obj->base);
-       if (ret != -ENOSPC)
-               goto out;
+       err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+       if (err)
+               return err;
 
-       i915_gem_shrink_all(dev_priv);
-       ret = drm_gem_create_mmap_offset(&obj->base);
-out:
-       dev_priv->mm.shrinker_no_lock_stealing = false;
+       err = i915_mutex_lock_interruptible(&dev_priv->drm);
+       if (!err) {
+               i915_gem_retire_requests(dev_priv);
+               err = drm_gem_create_mmap_offset(&obj->base);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
+       }
 
-       return ret;
+       return err;
 }
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2311,32 +2055,15 @@ i915_gem_mmap_gtt(struct drm_file *file,
        struct drm_i915_gem_object *obj;
        int ret;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
-       obj = to_intel_bo(drm_gem_object_lookup(file, handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
-
-       if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
-               ret = -EFAULT;
-               goto out;
-       }
+       obj = i915_gem_object_lookup(file, handle);
+       if (!obj)
+               return -ENOENT;
 
        ret = i915_gem_object_create_mmap_offset(obj);
-       if (ret)
-               goto out;
-
-       *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+       if (ret == 0)
+               *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
 
-out:
-       drm_gem_object_unreference(&obj->base);
-unlock:
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_object_put_unlocked(obj);
        return ret;
 }
 
@@ -2454,7 +2181,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages_pin_count)
                return -EBUSY;
 
-       BUG_ON(i915_gem_obj_bound_any(obj));
+       GEM_BUG_ON(obj->bind_count);
 
        /* ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
@@ -2462,10 +2189,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        list_del(&obj->global_list);
 
        if (obj->mapping) {
-               if (is_vmalloc_addr(obj->mapping))
-                       vunmap(obj->mapping);
+               void *ptr;
+
+               ptr = ptr_mask_bits(obj->mapping);
+               if (is_vmalloc_addr(ptr))
+                       vunmap(ptr);
                else
-                       kunmap(kmap_to_page(obj->mapping));
+                       kunmap(kmap_to_page(ptr));
+
                obj->mapping = NULL;
        }
 
@@ -2574,7 +2305,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj);
 
-       if (obj->tiling_mode != I915_TILING_NONE &&
+       if (i915_gem_object_is_tiled(obj) &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
                i915_gem_object_pin_pages(obj);
 
@@ -2638,7 +2369,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+                                enum i915_map_type type)
 {
        unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
        struct sg_table *sgt = obj->pages;
@@ -2647,10 +2379,11 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
        struct page *stack_pages[32];
        struct page **pages = stack_pages;
        unsigned long i = 0;
+       pgprot_t pgprot;
        void *addr;
 
        /* A single page can always be kmapped */
-       if (n_pages == 1)
+       if (n_pages == 1 && type == I915_MAP_WB)
                return kmap(sg_page(sgt->sgl));
 
        if (n_pages > ARRAY_SIZE(stack_pages)) {
@@ -2666,7 +2399,15 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
        /* Check that we have the expected number of pages */
        GEM_BUG_ON(i != n_pages);
 
-       addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+       switch (type) {
+       case I915_MAP_WB:
+               pgprot = PAGE_KERNEL;
+               break;
+       case I915_MAP_WC:
+               pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
+               break;
+       }
+       addr = vmap(pages, n_pages, 0, pgprot);
 
        if (pages != stack_pages)
                drm_free_large(pages);
@@ -2675,276 +2416,89 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
 }
 
 /* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+                             enum i915_map_type type)
 {
+       enum i915_map_type has_type;
+       bool pinned;
+       void *ptr;
        int ret;
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
+       GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
 
        ret = i915_gem_object_get_pages(obj);
        if (ret)
                return ERR_PTR(ret);
 
        i915_gem_object_pin_pages(obj);
+       pinned = obj->pages_pin_count > 1;
 
-       if (!obj->mapping) {
-               obj->mapping = i915_gem_object_map(obj);
-               if (!obj->mapping) {
-                       i915_gem_object_unpin_pages(obj);
-                       return ERR_PTR(-ENOMEM);
+       ptr = ptr_unpack_bits(obj->mapping, has_type);
+       if (ptr && has_type != type) {
+               if (pinned) {
+                       ret = -EBUSY;
+                       goto err;
                }
-       }
 
-       return obj->mapping;
-}
-
-void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct drm_i915_gem_request *req)
-{
-       struct drm_i915_gem_object *obj = vma->obj;
-       struct intel_engine_cs *engine;
-
-       engine = i915_gem_request_get_engine(req);
-
-       /* Add a reference if we're newly entering the active list. */
-       if (obj->active == 0)
-               drm_gem_object_reference(&obj->base);
-       obj->active |= intel_engine_flag(engine);
-
-       list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
-       i915_gem_request_assign(&obj->last_read_req[engine->id], req);
-
-       list_move_tail(&vma->vm_link, &vma->vm->active_list);
-}
-
-static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
-{
-       GEM_BUG_ON(obj->last_write_req == NULL);
-       GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
-
-       i915_gem_request_assign(&obj->last_write_req, NULL);
-       intel_fb_obj_flush(obj, true, ORIGIN_CS);
-}
-
-static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
-{
-       struct i915_vma *vma;
-
-       GEM_BUG_ON(obj->last_read_req[ring] == NULL);
-       GEM_BUG_ON(!(obj->active & (1 << ring)));
-
-       list_del_init(&obj->engine_list[ring]);
-       i915_gem_request_assign(&obj->last_read_req[ring], NULL);
-
-       if (obj->last_write_req && obj->last_write_req->engine->id == ring)
-               i915_gem_object_retire__write(obj);
-
-       obj->active &= ~(1 << ring);
-       if (obj->active)
-               return;
-
-       /* Bump our place on the bound list to keep it roughly in LRU order
-        * so that we don't steal from recently used but inactive objects
-        * (unless we are forced to ofc!)
-        */
-       list_move_tail(&obj->global_list,
-                      &to_i915(obj->base.dev)->mm.bound_list);
-
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (!list_empty(&vma->vm_link))
-                       list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-       }
-
-       i915_gem_request_assign(&obj->last_fenced_req, NULL);
-       drm_gem_object_unreference(&obj->base);
-}
-
-static int
-i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
-{
-       struct intel_engine_cs *engine;
-       int ret;
-
-       /* Carefully retire all requests without writing to the rings */
-       for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine);
-               if (ret)
-                       return ret;
-       }
-       i915_gem_retire_requests(dev_priv);
+               if (is_vmalloc_addr(ptr))
+                       vunmap(ptr);
+               else
+                       kunmap(kmap_to_page(ptr));
 
-       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
-               while (intel_kick_waiters(dev_priv) ||
-                      intel_kick_signalers(dev_priv))
-                       yield();
+               ptr = obj->mapping = NULL;
        }
 
-       /* Finally reset hw state */
-       for_each_engine(engine, dev_priv)
-               intel_ring_init_seqno(engine, seqno);
-
-       return 0;
-}
-
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret;
-
-       if (seqno == 0)
-               return -EINVAL;
-
-       /* HWS page needs to be set less than what we
-        * will inject to ring
-        */
-       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
-       if (ret)
-               return ret;
-
-       /* Carefully set the last_seqno value so that wrap
-        * detection still works
-        */
-       dev_priv->next_seqno = seqno;
-       dev_priv->last_seqno = seqno - 1;
-       if (dev_priv->last_seqno == 0)
-               dev_priv->last_seqno--;
-
-       return 0;
-}
-
-int
-i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
-{
-       /* reserve 0 for non-seqno */
-       if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_init_seqno(dev_priv, 0);
-               if (ret)
-                       return ret;
+       if (!ptr) {
+               ptr = i915_gem_object_map(obj, type);
+               if (!ptr) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
 
-               dev_priv->next_seqno = 1;
+               obj->mapping = ptr_pack_bits(ptr, type);
        }
 
-       *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
-       return 0;
-}
-
-static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-
-       dev_priv->gt.active_engines |= intel_engine_flag(engine);
-       if (dev_priv->gt.awake)
-               return;
-
-       intel_runtime_pm_get_noresume(dev_priv);
-       dev_priv->gt.awake = true;
+       return ptr;
 
-       i915_update_gfx_val(dev_priv);
-       if (INTEL_GEN(dev_priv) >= 6)
-               gen6_rps_busy(dev_priv);
-
-       queue_delayed_work(dev_priv->wq,
-                          &dev_priv->gt.retire_work,
-                          round_jiffies_up_relative(HZ));
+err:
+       i915_gem_object_unpin_pages(obj);
+       return ERR_PTR(ret);
 }
 
-/*
- * NB: This function is not allowed to fail. Doing so would mean the the
- * request is not being tracked for completion but the work itself is
- * going to happen on the hardware. This would be a Bad Thing(tm).
- */
-void __i915_add_request(struct drm_i915_gem_request *request,
-                       struct drm_i915_gem_object *obj,
-                       bool flush_caches)
+static void
+i915_gem_object_retire__write(struct i915_gem_active *active,
+                             struct drm_i915_gem_request *request)
 {
-       struct intel_engine_cs *engine;
-       struct intel_ringbuffer *ringbuf;
-       u32 request_start;
-       u32 reserved_tail;
-       int ret;
-
-       if (WARN_ON(request == NULL))
-               return;
-
-       engine = request->engine;
-       ringbuf = request->ringbuf;
-
-       /*
-        * To ensure that this call will not fail, space for its emissions
-        * should already have been reserved in the ring buffer. Let the ring
-        * know that it is time to use that space up.
-        */
-       request_start = intel_ring_get_tail(ringbuf);
-       reserved_tail = request->reserved_space;
-       request->reserved_space = 0;
+       struct drm_i915_gem_object *obj =
+               container_of(active, struct drm_i915_gem_object, last_write);
 
-       /*
-        * Emit any outstanding flushes - execbuf can fail to emit the flush
-        * after having emitted the batchbuffer command. Hence we need to fix
-        * things up similar to emitting the lazy request. The difference here
-        * is that the flush _must_ happen before the next request, no matter
-        * what.
-        */
-       if (flush_caches) {
-               if (i915.enable_execlists)
-                       ret = logical_ring_flush_all_caches(request);
-               else
-                       ret = intel_ring_flush_all_caches(request);
-               /* Not allowed to fail! */
-               WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
-       }
+       intel_fb_obj_flush(obj, true, ORIGIN_CS);
+}
 
-       trace_i915_gem_request_add(request);
+static void
+i915_gem_object_retire__read(struct i915_gem_active *active,
+                            struct drm_i915_gem_request *request)
+{
+       int idx = request->engine->id;
+       struct drm_i915_gem_object *obj =
+               container_of(active, struct drm_i915_gem_object, last_read[idx]);
 
-       request->head = request_start;
+       GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
 
-       /* Whilst this request exists, batch_obj will be on the
-        * active_list, and so will hold the active reference. Only when this
-        * request is retired will the the batch_obj be moved onto the
-        * inactive_list and lose its active reference. Hence we do not need
-        * to explicitly hold another reference here.
-        */
-       request->batch_obj = obj;
+       i915_gem_object_clear_active(obj, idx);
+       if (i915_gem_object_is_active(obj))
+               return;
 
-       /* Seal the request and mark it as pending execution. Note that
-        * we may inspect this state, without holding any locks, during
-        * hangcheck. Hence we apply the barrier to ensure that we do not
-        * see a more recent value in the hws than we are tracking.
-        */
-       request->emitted_jiffies = jiffies;
-       request->previous_seqno = engine->last_submitted_seqno;
-       smp_store_mb(engine->last_submitted_seqno, request->seqno);
-       list_add_tail(&request->list, &engine->request_list);
-
-       /* Record the position of the start of the request so that
-        * should we detect the updated seqno part-way through the
-        * GPU processing the request, we never over-estimate the
-        * position of the head.
+       /* Bump our place on the bound list to keep it roughly in LRU order
+        * so that we don't steal from recently used but inactive objects
+        * (unless we are forced to ofc!)
         */
-       request->postfix = intel_ring_get_tail(ringbuf);
-
-       if (i915.enable_execlists)
-               ret = engine->emit_request(request);
-       else {
-               ret = engine->add_request(request);
-
-               request->tail = intel_ring_get_tail(ringbuf);
-       }
-       /* Not allowed to fail! */
-       WARN(ret, "emit|add_request failed: %d!\n", ret);
-       /* Sanity check that the reserved size was large enough. */
-       ret = intel_ring_get_tail(ringbuf) - request_start;
-       if (ret < 0)
-               ret += ringbuf->size;
-       WARN_ONCE(ret > reserved_tail,
-                 "Not enough space reserved (%d bytes) "
-                 "for adding the request (%d bytes)\n",
-                 reserved_tail, ret);
+       if (obj->bind_count)
+               list_move_tail(&obj->global_list,
+                              &request->i915->mm.bound_list);
 
-       i915_gem_mark_busy(engine);
+       i915_gem_object_put(obj);
 }
 
 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2978,101 +2532,6 @@ static void i915_set_reset_status(struct i915_gem_context *ctx,
        }
 }
 
-void i915_gem_request_free(struct kref *req_ref)
-{
-       struct drm_i915_gem_request *req = container_of(req_ref,
-                                                typeof(*req), ref);
-       kmem_cache_free(req->i915->requests, req);
-}
-
-static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *engine,
-                        struct i915_gem_context *ctx,
-                        struct drm_i915_gem_request **req_out)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-       struct drm_i915_gem_request *req;
-       int ret;
-
-       if (!req_out)
-               return -EINVAL;
-
-       *req_out = NULL;
-
-       /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
-        * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
-        * and restart.
-        */
-       ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
-       if (ret)
-               return ret;
-
-       req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
-       if (req == NULL)
-               return -ENOMEM;
-
-       ret = i915_gem_get_seqno(engine->i915, &req->seqno);
-       if (ret)
-               goto err;
-
-       kref_init(&req->ref);
-       req->i915 = dev_priv;
-       req->engine = engine;
-       req->ctx  = ctx;
-       i915_gem_context_reference(req->ctx);
-
-       /*
-        * Reserve space in the ring buffer for all the commands required to
-        * eventually emit this request. This is to guarantee that the
-        * i915_add_request() call can't fail. Note that the reserve may need
-        * to be redone if the request is not actually submitted straight
-        * away, e.g. because a GPU scheduler has deferred it.
-        */
-       req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
-
-       if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(req);
-       else
-               ret = intel_ring_alloc_request_extras(req);
-       if (ret)
-               goto err_ctx;
-
-       *req_out = req;
-       return 0;
-
-err_ctx:
-       i915_gem_context_unreference(ctx);
-err:
-       kmem_cache_free(dev_priv->requests, req);
-       return ret;
-}
-
-/**
- * i915_gem_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- *       This can be NULL if the request is not directly related to
- *       any specific user context, in which case this function will
- *       choose an appropriate context to use.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-                      struct i915_gem_context *ctx)
-{
-       struct drm_i915_gem_request *req;
-       int err;
-
-       if (ctx == NULL)
-               ctx = engine->i915->kernel_context;
-       err = __i915_gem_request_alloc(engine, ctx, &req);
-       return err ? ERR_PTR(err) : req;
-}
-
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
@@ -3086,182 +2545,139 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
         * extra delay for a recent interrupt is pointless. Hence, we do
         * not need an engine->irq_seqno_barrier() before the seqno reads.
         */
-       list_for_each_entry(request, &engine->request_list, list) {
+       list_for_each_entry(request, &engine->request_list, link) {
                if (i915_gem_request_completed(request))
                        continue;
 
+               if (!i915_sw_fence_done(&request->submit))
+                       break;
+
                return request;
        }
 
        return NULL;
 }
 
-static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
+static void reset_request(struct drm_i915_gem_request *request)
+{
+       void *vaddr = request->ring->vaddr;
+       u32 head;
+
+       /* As this request likely depends on state from the lost
+        * context, clear out all the user operations leaving the
+        * breadcrumb at the end (so we get the fence notifications).
+        */
+       head = request->head;
+       if (request->postfix < head) {
+               memset(vaddr + head, 0, request->ring->size - head);
+               head = 0;
+       }
+       memset(vaddr + head, 0, request->postfix - head);
+}
+
+static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
+       struct i915_gem_context *incomplete_ctx;
        bool ring_hung;
 
+       /* Ensure irq handler finishes, and not run again. */
+       tasklet_kill(&engine->irq_tasklet);
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
+
        request = i915_gem_find_active_request(engine);
-       if (request == NULL)
+       if (!request)
                return;
 
        ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
-
        i915_set_reset_status(request->ctx, ring_hung);
-       list_for_each_entry_continue(request, &engine->request_list, list)
-               i915_set_reset_status(request->ctx, false);
-}
-
-static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
-{
-       struct intel_ringbuffer *buffer;
-
-       while (!list_empty(&engine->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&engine->active_list,
-                                      struct drm_i915_gem_object,
-                                      engine_list[engine->id]);
-
-               i915_gem_object_retire__read(obj, engine->id);
-       }
-
-       /*
-        * Clear the execlists queue up before freeing the requests, as those
-        * are the ones that keep the context and ringbuffer backing objects
-        * pinned in place.
-        */
+       if (!ring_hung)
+               return;
 
-       if (i915.enable_execlists) {
-               /* Ensure irq handler finishes or is cancelled. */
-               tasklet_kill(&engine->irq_tasklet);
+       DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
+                        engine->name, request->fence.seqno);
 
-               intel_execlists_cancel_requests(engine);
-       }
+       /* Setup the CS to resume from the breadcrumb of the hung request */
+       engine->reset_hw(engine, request);
 
-       /*
-        * We must free the requests after all the corresponding objects have
-        * been moved off active lists. Which is the same order as the normal
-        * retire_requests function does. This is important if object hold
-        * implicit references on things like e.g. ppgtt address spaces through
-        * the request.
+       /* Users of the default context do not rely on logical state
+        * preserved between batches. They have to emit full state on
+        * every batch and so it is safe to execute queued requests following
+        * the hang.
+        *
+        * Other contexts preserve state, now corrupt. We want to skip all
+        * queued requests that reference the corrupt context.
         */
-       while (!list_empty(&engine->request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&engine->request_list,
-                                          struct drm_i915_gem_request,
-                                          list);
-
-               i915_gem_request_retire(request);
-       }
+       incomplete_ctx = request->ctx;
+       if (i915_gem_context_is_default(incomplete_ctx))
+               return;
 
-       /* Having flushed all requests from all queues, we know that all
-        * ringbuffers must now be empty. However, since we do not reclaim
-        * all space when retiring the request (to prevent HEADs colliding
-        * with rapid ringbuffer wraparound) the amount of available space
-        * upon reset is less than when we start. Do one more pass over
-        * all the ringbuffers to reset last_retired_head.
-        */
-       list_for_each_entry(buffer, &engine->buffers, link) {
-               buffer->last_retired_head = buffer->tail;
-               intel_ring_update_space(buffer);
-       }
+       list_for_each_entry_continue(request, &engine->request_list, link)
+               if (request->ctx == incomplete_ctx)
+                       reset_request(request);
 
-       intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+       engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
 }
 
-void i915_gem_reset(struct drm_device *dev)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
-       /*
-        * Before we free the objects from the requests, we need to inspect
-        * them for finding the guilty party. As the requests only borrow
-        * their reference to the objects, the inspection must be done first.
-        */
-       for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_status(engine);
+       i915_gem_retire_requests(dev_priv);
 
        for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_cleanup(engine);
-
-       i915_gem_context_reset(dev);
+               i915_gem_reset_engine(engine);
+       mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 
-       i915_gem_restore_fences(dev);
+       i915_gem_restore_fences(&dev_priv->drm);
+}
 
-       WARN_ON(i915_verify_lists(dev));
+static void nop_submit_request(struct drm_i915_gem_request *request)
+{
 }
 
-/**
- * This function clears the request list as sequence numbers are passed.
- * @engine: engine to retire requests on
- */
-void
-i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
+static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
 {
-       WARN_ON(i915_verify_lists(engine->dev));
+       engine->submit_request = nop_submit_request;
 
-       /* Retire requests first as we use it above for the early return.
-        * If we retire requests last, we may use a later seqno and so clear
-        * the requests lists without clearing the active list, leading to
-        * confusion.
+       /* Mark all pending requests as complete so that any concurrent
+        * (lockless) lookup doesn't try and wait upon the request as we
+        * reset it.
         */
-       while (!list_empty(&engine->request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&engine->request_list,
-                                          struct drm_i915_gem_request,
-                                          list);
-
-               if (!i915_gem_request_completed(request))
-                       break;
-
-               i915_gem_request_retire(request);
-       }
+       intel_engine_init_seqno(engine, engine->last_submitted_seqno);
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate,
-        * before we free the context associated with the requests.
+       /*
+        * Clear the execlists queue up before freeing the requests, as those
+        * are the ones that keep the context and ringbuffer backing objects
+        * pinned in place.
         */
-       while (!list_empty(&engine->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&engine->active_list,
-                                      struct drm_i915_gem_object,
-                                      engine_list[engine->id]);
 
-               if (!list_empty(&obj->last_read_req[engine->id]->list))
-                       break;
-
-               i915_gem_object_retire__read(obj, engine->id);
+       if (i915.enable_execlists) {
+               spin_lock(&engine->execlist_lock);
+               INIT_LIST_HEAD(&engine->execlist_queue);
+               i915_gem_request_put(engine->execlist_port[0].request);
+               i915_gem_request_put(engine->execlist_port[1].request);
+               memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+               spin_unlock(&engine->execlist_lock);
        }
 
-       WARN_ON(i915_verify_lists(engine->dev));
+       engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
 }
 
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
 
-       if (dev_priv->gt.active_engines == 0)
-               return;
-
-       GEM_BUG_ON(!dev_priv->gt.awake);
-
-       for_each_engine(engine, dev_priv) {
-               i915_gem_retire_requests_ring(engine);
-               if (list_empty(&engine->request_list))
-                       dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-       }
+       i915_gem_context_lost(dev_priv);
+       for_each_engine(engine, dev_priv)
+               i915_gem_cleanup_engine(engine);
+       mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 
-       if (dev_priv->gt.active_engines == 0)
-               queue_delayed_work(dev_priv->wq,
-                                  &dev_priv->gt.idle_work,
-                                  msecs_to_jiffies(100));
+       i915_gem_retire_requests(dev_priv);
 }
 
 static void
@@ -3281,10 +2697,12 @@ i915_gem_retire_work_handler(struct work_struct *work)
         * We do not need to do this test under locking as in the worst-case
         * we queue the retire worker once too often.
         */
-       if (READ_ONCE(dev_priv->gt.awake))
+       if (READ_ONCE(dev_priv->gt.awake)) {
+               i915_queue_hangcheck(dev_priv);
                queue_delayed_work(dev_priv->wq,
                                   &dev_priv->gt.retire_work,
                                   round_jiffies_up_relative(HZ));
+       }
 }
 
 static void
@@ -3294,7 +2712,6 @@ i915_gem_idle_work_handler(struct work_struct *work)
                container_of(work, typeof(*dev_priv), gt.idle_work.work);
        struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
-       unsigned int stuck_engines;
        bool rearm_hangcheck;
 
        if (!READ_ONCE(dev_priv->gt.awake))
@@ -3307,311 +2724,106 @@ i915_gem_idle_work_handler(struct work_struct *work)
                cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 
        if (!mutex_trylock(&dev->struct_mutex)) {
-               /* Currently busy, come back later */
-               mod_delayed_work(dev_priv->wq,
-                                &dev_priv->gt.idle_work,
-                                msecs_to_jiffies(50));
-               goto out_rearm;
-       }
-
-       if (dev_priv->gt.active_engines)
-               goto out_unlock;
-
-       for_each_engine(engine, dev_priv)
-               i915_gem_batch_pool_fini(&engine->batch_pool);
-
-       GEM_BUG_ON(!dev_priv->gt.awake);
-       dev_priv->gt.awake = false;
-       rearm_hangcheck = false;
-
-       stuck_engines = intel_kick_waiters(dev_priv);
-       if (unlikely(stuck_engines)) {
-               DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
-               dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 6)
-               gen6_rps_idle(dev_priv);
-       intel_runtime_pm_put(dev_priv);
-out_unlock:
-       mutex_unlock(&dev->struct_mutex);
-
-out_rearm:
-       if (rearm_hangcheck) {
-               GEM_BUG_ON(!dev_priv->gt.awake);
-               i915_queue_hangcheck(dev_priv);
-       }
-}
-
-/**
- * Ensures that an object will eventually get non-busy by flushing any required
- * write domains, emitting any outstanding lazy request and retiring and
- * completed requests.
- * @obj: object to flush
- */
-static int
-i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
-{
-       int i;
-
-       if (!obj->active)
-               return 0;
-
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               struct drm_i915_gem_request *req;
-
-               req = obj->last_read_req[i];
-               if (req == NULL)
-                       continue;
-
-               if (i915_gem_request_completed(req))
-                       i915_gem_object_retire__read(obj, i);
-       }
-
-       return 0;
-}
-
-/**
- * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- *
- * Returns 0 if successful, else an error is returned with the remaining time in
- * the timeout parameter.
- *  -ETIME: object is still busy after timeout
- *  -ERESTARTSYS: signal interrupted the wait
- *  -ENONENT: object doesn't exist
- * Also possible, but rare:
- *  -EAGAIN: GPU wedged
- *  -ENOMEM: damn
- *  -ENODEV: Internal IRQ fail
- *  -E?: The add request failed
- *
- * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
- * non-zero timeout parameter the wait ioctl will wait for the given number of
- * nanoseconds on an object becoming unbusy. Since the wait itself does so
- * without holding struct_mutex the object may become re-busied before this
- * function completes. A similar but shorter * race condition exists in the busy
- * ioctl
- */
-int
-i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
-{
-       struct drm_i915_gem_wait *args = data;
-       struct drm_i915_gem_object *obj;
-       struct drm_i915_gem_request *req[I915_NUM_ENGINES];
-       int i, n = 0;
-       int ret;
-
-       if (args->flags != 0)
-               return -EINVAL;
-
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
-       if (&obj->base == NULL) {
-               mutex_unlock(&dev->struct_mutex);
-               return -ENOENT;
-       }
-
-       /* Need to make sure the object gets inactive eventually. */
-       ret = i915_gem_object_flush_active(obj);
-       if (ret)
-               goto out;
-
-       if (!obj->active)
-               goto out;
-
-       /* Do this after OLR check to make sure we make forward progress polling
-        * on this IOCTL with a timeout == 0 (like busy ioctl)
-        */
-       if (args->timeout_ns == 0) {
-               ret = -ETIME;
-               goto out;
-       }
-
-       drm_gem_object_unreference(&obj->base);
-
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               if (obj->last_read_req[i] == NULL)
-                       continue;
-
-               req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
-       }
-
-       mutex_unlock(&dev->struct_mutex);
-
-       for (i = 0; i < n; i++) {
-               if (ret == 0)
-                       ret = __i915_wait_request(req[i], true,
-                                                 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
-                                                 to_rps_client(file));
-               i915_gem_request_unreference(req[i]);
-       }
-       return ret;
-
-out:
-       drm_gem_object_unreference(&obj->base);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
-static int
-__i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                      struct intel_engine_cs *to,
-                      struct drm_i915_gem_request *from_req,
-                      struct drm_i915_gem_request **to_req)
-{
-       struct intel_engine_cs *from;
-       int ret;
-
-       from = i915_gem_request_get_engine(from_req);
-       if (to == from)
-               return 0;
-
-       if (i915_gem_request_completed(from_req))
-               return 0;
-
-       if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
-               struct drm_i915_private *i915 = to_i915(obj->base.dev);
-               ret = __i915_wait_request(from_req,
-                                         i915->mm.interruptible,
-                                         NULL,
-                                         &i915->rps.semaphores);
-               if (ret)
-                       return ret;
-
-               i915_gem_object_retire_request(obj, from_req);
-       } else {
-               int idx = intel_ring_sync_index(from, to);
-               u32 seqno = i915_gem_request_get_seqno(from_req);
-
-               WARN_ON(!to_req);
-
-               if (seqno <= from->semaphore.sync_seqno[idx])
-                       return 0;
+               /* Currently busy, come back later */
+               mod_delayed_work(dev_priv->wq,
+                                &dev_priv->gt.idle_work,
+                                msecs_to_jiffies(50));
+               goto out_rearm;
+       }
 
-               if (*to_req == NULL) {
-                       struct drm_i915_gem_request *req;
+       if (dev_priv->gt.active_engines)
+               goto out_unlock;
 
-                       req = i915_gem_request_alloc(to, NULL);
-                       if (IS_ERR(req))
-                               return PTR_ERR(req);
+       for_each_engine(engine, dev_priv)
+               i915_gem_batch_pool_fini(&engine->batch_pool);
 
-                       *to_req = req;
-               }
+       GEM_BUG_ON(!dev_priv->gt.awake);
+       dev_priv->gt.awake = false;
+       rearm_hangcheck = false;
 
-               trace_i915_gem_ring_sync_to(*to_req, from, from_req);
-               ret = to->semaphore.sync_to(*to_req, from, seqno);
-               if (ret)
-                       return ret;
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_idle(dev_priv);
+       intel_runtime_pm_put(dev_priv);
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
 
-               /* We use last_read_req because sync_to()
-                * might have just caused seqno wrap under
-                * the radar.
-                */
-               from->semaphore.sync_seqno[idx] =
-                       i915_gem_request_get_seqno(obj->last_read_req[from->id]);
+out_rearm:
+       if (rearm_hangcheck) {
+               GEM_BUG_ON(!dev_priv->gt.awake);
+               i915_queue_hangcheck(dev_priv);
        }
+}
 
-       return 0;
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
+{
+       struct drm_i915_gem_object *obj = to_intel_bo(gem);
+       struct drm_i915_file_private *fpriv = file->driver_priv;
+       struct i915_vma *vma, *vn;
+
+       mutex_lock(&obj->base.dev->struct_mutex);
+       list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
+               if (vma->vm->file == fpriv)
+                       i915_vma_close(vma);
+       mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
 /**
- * i915_gem_object_sync - sync an object to a ring.
- *
- * @obj: object which may be in use on another ring.
- * @to: ring we wish to use the object on. May be NULL.
- * @to_req: request we wish to use the object for. See below.
- *          This will be allocated and returned if a request is
- *          required but not passed in.
- *
- * This code is meant to abstract object synchronization with the GPU.
- * Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow one engine to write
- * into a buffer at any time, but multiple readers. To ensure each has
- * a coherent view of memory, we must:
- *
- * - If there is an outstanding write request to the object, the new
- *   request must wait for it to complete (either CPU or in hw, requests
- *   on the same ring will be naturally ordered).
- *
- * - If we are a write request (pending_write_domain is set), the new
- *   request must wait for outstanding read requests to complete.
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
  *
- * For CPU synchronisation (NULL to) no request is required. For syncing with
- * rings to_req must be non-NULL. However, a request does not have to be
- * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
- * request will be allocated automatically and returned through *to_req. Note
- * that it is not guaranteed that commands will be emitted (because the system
- * might already be idle). Hence there is no need to create a request that
- * might never have any work submitted. Note further that if a request is
- * returned in *to_req, it is the responsibility of the caller to submit
- * that request (after potentially adding more work to it).
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ *  -ETIME: object is still busy after timeout
+ *  -ERESTARTSYS: signal interrupted the wait
+ *  -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ *  -EAGAIN: GPU wedged
+ *  -ENOMEM: damn
+ *  -ENODEV: Internal IRQ fail
+ *  -E?: The add request failed
  *
- * Returns 0 if successful, else propagates up the lower layer error.
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
  */
 int
-i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                    struct intel_engine_cs *to,
-                    struct drm_i915_gem_request **to_req)
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
-       const bool readonly = obj->base.pending_write_domain == 0;
-       struct drm_i915_gem_request *req[I915_NUM_ENGINES];
-       int ret, i, n;
+       struct drm_i915_gem_wait *args = data;
+       struct intel_rps_client *rps = to_rps_client(file);
+       struct drm_i915_gem_object *obj;
+       unsigned long active;
+       int idx, ret = 0;
 
-       if (!obj->active)
-               return 0;
+       if (args->flags != 0)
+               return -EINVAL;
 
-       if (to == NULL)
-               return i915_gem_object_wait_rendering(obj, readonly);
+       obj = i915_gem_object_lookup(file, args->bo_handle);
+       if (!obj)
+               return -ENOENT;
 
-       n = 0;
-       if (readonly) {
-               if (obj->last_write_req)
-                       req[n++] = obj->last_write_req;
-       } else {
-               for (i = 0; i < I915_NUM_ENGINES; i++)
-                       if (obj->last_read_req[i])
-                               req[n++] = obj->last_read_req[i];
-       }
-       for (i = 0; i < n; i++) {
-               ret = __i915_gem_object_sync(obj, to, req[i], to_req);
+       active = __I915_BO_ACTIVE(obj);
+       for_each_active(active, idx) {
+               s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
+               ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
+                                                   I915_WAIT_INTERRUPTIBLE,
+                                                   timeout, rps);
                if (ret)
-                       return ret;
+                       break;
        }
 
-       return 0;
-}
-
-static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
-{
-       u32 old_write_domain, old_read_domains;
-
-       /* Force a pagefault for domain tracking on next user access */
-       i915_gem_release_mmap(obj);
-
-       if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
-               return;
-
-       old_read_domains = obj->base.read_domains;
-       old_write_domain = obj->base.write_domain;
-
-       obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
-       obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
-
-       trace_i915_gem_object_change_domain(obj,
-                                           old_read_domains,
-                                           old_write_domain);
+       i915_gem_object_put_unlocked(obj);
+       return ret;
 }
 
 static void __i915_vma_iounmap(struct i915_vma *vma)
 {
-       GEM_BUG_ON(vma->pin_count);
+       GEM_BUG_ON(i915_vma_is_pinned(vma));
 
        if (vma->iomap == NULL)
                return;
@@ -3620,65 +2832,83 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
        vma->iomap = NULL;
 }
 
-static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
+int i915_vma_unbind(struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       unsigned long active;
        int ret;
 
-       if (list_empty(&vma->obj_link))
-               return 0;
-
-       if (!drm_mm_node_allocated(&vma->node)) {
-               i915_gem_vma_destroy(vma);
-               return 0;
-       }
-
-       if (vma->pin_count)
-               return -EBUSY;
+       /* First wait upon any activity as retiring the request may
+        * have side-effects such as unpinning or even unbinding this vma.
+        */
+       active = i915_vma_get_active(vma);
+       if (active) {
+               int idx;
+
+               /* When a closed VMA is retired, it is unbound - eek.
+                * In order to prevent it from being recursively closed,
+                * take a pin on the vma so that the second unbind is
+                * aborted.
+                */
+               __i915_vma_pin(vma);
 
-       BUG_ON(obj->pages == NULL);
+               for_each_active(active, idx) {
+                       ret = i915_gem_active_retire(&vma->last_read[idx],
+                                                  &vma->vm->dev->struct_mutex);
+                       if (ret)
+                               break;
+               }
 
-       if (wait) {
-               ret = i915_gem_object_wait_rendering(obj, false);
+               __i915_vma_unpin(vma);
                if (ret)
                        return ret;
+
+               GEM_BUG_ON(i915_vma_is_active(vma));
        }
 
-       if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
-               i915_gem_object_finish_gtt(obj);
+       if (i915_vma_is_pinned(vma))
+               return -EBUSY;
+
+       if (!drm_mm_node_allocated(&vma->node))
+               goto destroy;
 
+       GEM_BUG_ON(obj->bind_count == 0);
+       GEM_BUG_ON(!obj->pages);
+
+       if (i915_vma_is_map_and_fenceable(vma)) {
                /* release the fence reg _after_ flushing */
-               ret = i915_gem_object_put_fence(obj);
+               ret = i915_vma_put_fence(vma);
                if (ret)
                        return ret;
 
+               /* Force a pagefault for domain tracking on next user access */
+               i915_gem_release_mmap(obj);
+
                __i915_vma_iounmap(vma);
+               vma->flags &= ~I915_VMA_CAN_FENCE;
        }
 
-       trace_i915_vma_unbind(vma);
-
-       vma->vm->unbind_vma(vma);
-       vma->bound = 0;
-
-       list_del_init(&vma->vm_link);
-       if (vma->is_ggtt) {
-               if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
-                       obj->map_and_fenceable = false;
-               } else if (vma->ggtt_view.pages) {
-                       sg_free_table(vma->ggtt_view.pages);
-                       kfree(vma->ggtt_view.pages);
-               }
-               vma->ggtt_view.pages = NULL;
+       if (likely(!vma->vm->closed)) {
+               trace_i915_vma_unbind(vma);
+               vma->vm->unbind_vma(vma);
        }
+       vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
        drm_mm_remove_node(&vma->node);
-       i915_gem_vma_destroy(vma);
+       list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+       if (vma->pages != obj->pages) {
+               GEM_BUG_ON(!vma->pages);
+               sg_free_table(vma->pages);
+               kfree(vma->pages);
+       }
+       vma->pages = NULL;
 
        /* Since the unbound list is global, only move to that list if
         * no more VMAs exist. */
-       if (list_empty(&obj->vma_list))
-               list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+       if (--obj->bind_count == 0)
+               list_move_tail(&obj->global_list,
+                              &to_i915(obj->base.dev)->mm.unbound_list);
 
        /* And finally now the object is completely decoupled from this vma,
         * we can drop its hold on the backing storage and allow it to be
@@ -3686,36 +2916,28 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
         */
        i915_gem_object_unpin_pages(obj);
 
-       return 0;
-}
-
-int i915_vma_unbind(struct i915_vma *vma)
-{
-       return __i915_vma_unbind(vma, true);
-}
+destroy:
+       if (unlikely(i915_vma_is_closed(vma)))
+               i915_vma_destroy(vma);
 
-int __i915_vma_unbind_no_wait(struct i915_vma *vma)
-{
-       return __i915_vma_unbind(vma, false);
+       return 0;
 }
 
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+                          unsigned int flags)
 {
        struct intel_engine_cs *engine;
        int ret;
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
        for_each_engine(engine, dev_priv) {
                if (engine->last_context == NULL)
                        continue;
 
-               ret = intel_engine_idle(engine);
+               ret = intel_engine_idle(engine, flags);
                if (ret)
                        return ret;
        }
 
-       WARN_ON(i915_verify_lists(dev));
        return 0;
 }
 
@@ -3753,128 +2975,87 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 }
 
 /**
- * Finds free space in the GTT aperture and binds the object or a view of it
- * there.
- * @obj: object to bind
- * @vm: address space to bind into
- * @ggtt_view: global gtt view if applicable
- * @alignment: requested alignment
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
+ * @size: requested size in bytes (can be larger than the VMA)
+ * @alignment: required alignment
  * @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
  */
-static struct i915_vma *
-i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
-                          struct i915_address_space *vm,
-                          const struct i915_ggtt_view *ggtt_view,
-                          unsigned alignment,
-                          uint64_t flags)
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       u32 fence_alignment, unfenced_alignment;
-       u32 search_flag, alloc_flag;
+       struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+       struct drm_i915_gem_object *obj = vma->obj;
        u64 start, end;
-       u64 size, fence_size;
-       struct i915_vma *vma;
        int ret;
 
-       if (i915_is_ggtt(vm)) {
-               u32 view_size;
-
-               if (WARN_ON(!ggtt_view))
-                       return ERR_PTR(-EINVAL);
-
-               view_size = i915_ggtt_view_size(obj, ggtt_view);
-
-               fence_size = i915_gem_get_gtt_size(dev,
-                                                  view_size,
-                                                  obj->tiling_mode);
-               fence_alignment = i915_gem_get_gtt_alignment(dev,
-                                                            view_size,
-                                                            obj->tiling_mode,
-                                                            true);
-               unfenced_alignment = i915_gem_get_gtt_alignment(dev,
-                                                               view_size,
-                                                               obj->tiling_mode,
-                                                               false);
-               size = flags & PIN_MAPPABLE ? fence_size : view_size;
-       } else {
-               fence_size = i915_gem_get_gtt_size(dev,
-                                                  obj->base.size,
-                                                  obj->tiling_mode);
-               fence_alignment = i915_gem_get_gtt_alignment(dev,
-                                                            obj->base.size,
-                                                            obj->tiling_mode,
-                                                            true);
-               unfenced_alignment =
-                       i915_gem_get_gtt_alignment(dev,
-                                                  obj->base.size,
-                                                  obj->tiling_mode,
-                                                  false);
-               size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
-       }
+       GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+       GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+       size = max(size, vma->size);
+       if (flags & PIN_MAPPABLE)
+               size = i915_gem_get_ggtt_size(dev_priv, size,
+                                             i915_gem_object_get_tiling(obj));
+
+       alignment = max(max(alignment, vma->display_alignment),
+                       i915_gem_get_ggtt_alignment(dev_priv, size,
+                                                   i915_gem_object_get_tiling(obj),
+                                                   flags & PIN_MAPPABLE));
 
        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-       end = vm->total;
+
+       end = vma->vm->total;
        if (flags & PIN_MAPPABLE)
-               end = min_t(u64, end, ggtt->mappable_end);
+               end = min_t(u64, end, dev_priv->ggtt.mappable_end);
        if (flags & PIN_ZONE_4G)
                end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
 
-       if (alignment == 0)
-               alignment = flags & PIN_MAPPABLE ? fence_alignment :
-                                               unfenced_alignment;
-       if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
-               DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
-                         ggtt_view ? ggtt_view->type : 0,
-                         alignment);
-               return ERR_PTR(-EINVAL);
-       }
-
        /* If binding the object/GGTT view requires more space than the entire
         * aperture has, reject it early before evicting everything in a vain
         * attempt to find space.
         */
        if (size > end) {
-               DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
-                         ggtt_view ? ggtt_view->type : 0,
-                         size,
+               DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
+                         size, obj->base.size,
                          flags & PIN_MAPPABLE ? "mappable" : "total",
                          end);
-               return ERR_PTR(-E2BIG);
+               return -E2BIG;
        }
 
        ret = i915_gem_object_get_pages(obj);
        if (ret)
-               return ERR_PTR(ret);
+               return ret;
 
        i915_gem_object_pin_pages(obj);
 
-       vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
-                         i915_gem_obj_lookup_or_create_vma(obj, vm);
-
-       if (IS_ERR(vma))
-               goto err_unpin;
-
        if (flags & PIN_OFFSET_FIXED) {
-               uint64_t offset = flags & PIN_OFFSET_MASK;
-
-               if (offset & (alignment - 1) || offset + size > end) {
+               u64 offset = flags & PIN_OFFSET_MASK;
+               if (offset & (alignment - 1) || offset > end - size) {
                        ret = -EINVAL;
-                       goto err_free_vma;
+                       goto err_unpin;
                }
+
                vma->node.start = offset;
                vma->node.size = size;
                vma->node.color = obj->cache_level;
-               ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+               ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
                if (ret) {
                        ret = i915_gem_evict_for_vma(vma);
                        if (ret == 0)
-                               ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+                               ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+                       if (ret)
+                               goto err_unpin;
                }
-               if (ret)
-                       goto err_free_vma;
        } else {
+               u32 search_flag, alloc_flag;
+
                if (flags & PIN_HIGH) {
                        search_flag = DRM_MM_SEARCH_BELOW;
                        alloc_flag = DRM_MM_CREATE_TOP;
@@ -3883,47 +3064,45 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                        alloc_flag = DRM_MM_CREATE_DEFAULT;
                }
 
+               /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+                * so we know that we always have a minimum alignment of 4096.
+                * The drm_mm range manager is optimised to return results
+                * with zero alignment, so where possible use the optimal
+                * path.
+                */
+               if (alignment <= 4096)
+                       alignment = 0;
+
 search_free:
-               ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
+               ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+                                                         &vma->node,
                                                          size, alignment,
                                                          obj->cache_level,
                                                          start, end,
                                                          search_flag,
                                                          alloc_flag);
                if (ret) {
-                       ret = i915_gem_evict_something(dev, vm, size, alignment,
+                       ret = i915_gem_evict_something(vma->vm, size, alignment,
                                                       obj->cache_level,
                                                       start, end,
                                                       flags);
                        if (ret == 0)
                                goto search_free;
 
-                       goto err_free_vma;
+                       goto err_unpin;
                }
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
-               ret = -EINVAL;
-               goto err_remove_node;
-       }
-
-       trace_i915_vma_bind(vma, flags);
-       ret = i915_vma_bind(vma, obj->cache_level, flags);
-       if (ret)
-               goto err_remove_node;
+       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&vma->vm_link, &vm->inactive_list);
+       list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+       obj->bind_count++;
 
-       return vma;
+       return 0;
 
-err_remove_node:
-       drm_mm_remove_node(&vma->node);
-err_free_vma:
-       i915_gem_vma_destroy(vma);
-       vma = ERR_PTR(ret);
 err_unpin:
        i915_gem_object_unpin_pages(obj);
-       return vma;
+       return ret;
 }
 
 bool
@@ -3968,51 +3147,72 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 static void
 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 {
-       uint32_t old_write_domain;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
        if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
                return;
 
        /* No actual flushing is required for the GTT write domain.  Writes
-        * to it immediately go to main memory as far as we know, so there's
+        * to it "immediately" go to main memory as far as we know, so there's
         * no chipset flush.  It also doesn't land in render cache.
         *
         * However, we do have to enforce the order so that all writes through
         * the GTT land before any writes to the device, such as updates to
         * the GATT itself.
+        *
+        * We also have to wait a bit for the writes to land from the GTT.
+        * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+        * timing. This issue has only been observed when switching quickly
+        * between GTT writes and CPU reads from inside the kernel on recent hw,
+        * and it appears to only affect discrete GTT blocks (i.e. on LLC
+        * system agents we cannot reproduce this behaviour).
         */
        wmb();
+       if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
+               POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
 
-       old_write_domain = obj->base.write_domain;
-       obj->base.write_domain = 0;
-
-       intel_fb_obj_flush(obj, false, ORIGIN_GTT);
+       intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
 
+       obj->base.write_domain = 0;
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
-                                           old_write_domain);
+                                           I915_GEM_DOMAIN_GTT);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
 static void
 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 {
-       uint32_t old_write_domain;
-
        if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
                return;
 
        if (i915_gem_clflush_object(obj, obj->pin_display))
                i915_gem_chipset_flush(to_i915(obj->base.dev));
 
-       old_write_domain = obj->base.write_domain;
-       obj->base.write_domain = 0;
-
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
+       obj->base.write_domain = 0;
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
-                                           old_write_domain);
+                                           I915_GEM_DOMAIN_CPU);
+}
+
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma;
+
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!i915_vma_is_ggtt(vma))
+                       continue;
+
+               if (i915_vma_is_active(vma))
+                       continue;
+
+               if (!drm_mm_node_allocated(&vma->node))
+                       continue;
+
+               list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+       }
 }
 
 /**
@@ -4026,20 +3226,16 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        uint32_t old_write_domain, old_read_domains;
-       struct i915_vma *vma;
        int ret;
 
-       if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
-               return 0;
-
        ret = i915_gem_object_wait_rendering(obj, !write);
        if (ret)
                return ret;
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+               return 0;
+
        /* Flush and acquire obj->pages so that we are coherent through
         * direct access in memory with previous cached writes through
         * shmemfs and that our cache domain tracking remains valid.
@@ -4080,10 +3276,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
                                            old_write_domain);
 
        /* And bump the LRU for this access */
-       vma = i915_gem_obj_to_ggtt(obj);
-       if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
-               list_move_tail(&vma->vm_link,
-                              &ggtt->base.inactive_list);
+       i915_gem_object_bump_inactive_ggtt(obj);
 
        return 0;
 }
@@ -4106,9 +3299,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct i915_vma *vma, *next;
-       bool bound = false;
+       struct i915_vma *vma;
        int ret = 0;
 
        if (obj->cache_level == cache_level)
@@ -4119,21 +3310,28 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
         * catch the issue of the CS prefetch crossing page boundaries and
         * reading an invalid PTE on older architectures.
         */
-       list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
+restart:
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
-               if (vma->pin_count) {
+               if (i915_vma_is_pinned(vma)) {
                        DRM_DEBUG("can not change the cache level of pinned objects\n");
                        return -EBUSY;
                }
 
-               if (!i915_gem_valid_gtt_space(vma, cache_level)) {
-                       ret = i915_vma_unbind(vma);
-                       if (ret)
-                               return ret;
-               } else
-                       bound = true;
+               if (i915_gem_valid_gtt_space(vma, cache_level))
+                       continue;
+
+               ret = i915_vma_unbind(vma);
+               if (ret)
+                       return ret;
+
+               /* As unbinding may affect other elements in the
+                * obj->vma_list (due to side-effects from retiring
+                * an active vma), play safe and restart the iterator.
+                */
+               goto restart;
        }
 
        /* We can reuse the existing drm_mm nodes but need to change the
@@ -4143,7 +3341,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
         * rewrite the PTE in the belief that doing so tramples upon less
         * state and so involves less work.
         */
-       if (bound) {
+       if (obj->bind_count) {
                /* Before we change the PTE, the GPU must not be accessing it.
                 * If we wait upon the object, we know that all the bound
                 * VMA are no longer active.
@@ -4152,7 +3350,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                if (ret)
                        return ret;
 
-               if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
+               if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
                        /* Access to snoopable pages through the GTT is
                         * incoherent and on some machines causes a hard
                         * lockup. Relinquish the CPU mmaping to force
@@ -4169,9 +3367,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                         * dropped the fence as all snoopable access is
                         * supposed to be linear.
                         */
-                       ret = i915_gem_object_put_fence(obj);
-                       if (ret)
-                               return ret;
+                       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+                               ret = i915_vma_put_fence(vma);
+                               if (ret)
+                                       return ret;
+                       }
                } else {
                        /* We either have incoherent backing store and
                         * so no GTT access or the architecture is fully
@@ -4215,8 +3415,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL)
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
                return -ENOENT;
 
        switch (obj->cache_level) {
@@ -4234,7 +3434,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
                break;
        }
 
-       drm_gem_object_unreference_unlocked(&obj->base);
+       i915_gem_object_put_unlocked(obj);
        return 0;
 }
 
@@ -4276,15 +3476,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto rpm_put;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
 
        ret = i915_gem_object_set_cache_level(obj, level);
 
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
 rpm_put:
@@ -4298,11 +3498,12 @@ rpm_put:
  * Can be called from an uninterruptible phase (modesetting) and allows
  * any flushes to be pipelined (for pageflips).
  */
-int
+struct i915_vma *
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
                                     const struct i915_ggtt_view *view)
 {
+       struct i915_vma *vma;
        u32 old_read_domains, old_write_domain;
        int ret;
 
@@ -4322,19 +3523,31 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
         */
        ret = i915_gem_object_set_cache_level(obj,
                                              HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
-       if (ret)
+       if (ret) {
+               vma = ERR_PTR(ret);
                goto err_unpin_display;
+       }
 
        /* As the user may map the buffer once pinned in the display plane
         * (e.g. libkms for the bootup splash), we have to ensure that we
-        * always use map_and_fenceable for all scanout buffers.
+        * always use map_and_fenceable for all scanout buffers. However,
+        * it may simply be too big to fit into mappable, in which case
+        * put it anyway and hope that userspace can cope (but always first
+        * try to preserve the existing ABI).
         */
-       ret = i915_gem_object_ggtt_pin(obj, view, alignment,
-                                      view->type == I915_GGTT_VIEW_NORMAL ?
-                                      PIN_MAPPABLE : 0);
-       if (ret)
+       vma = ERR_PTR(-ENOSPC);
+       if (view->type == I915_GGTT_VIEW_NORMAL)
+               vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
+                                              PIN_MAPPABLE | PIN_NONBLOCK);
+       if (IS_ERR(vma))
+               vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
+       if (IS_ERR(vma))
                goto err_unpin_display;
 
+       vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+       WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
+
        i915_gem_object_flush_cpu_write_domain(obj);
 
        old_write_domain = obj->base.write_domain;
@@ -4350,23 +3563,28 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                            old_read_domains,
                                            old_write_domain);
 
-       return 0;
+       return vma;
 
 err_unpin_display:
        obj->pin_display--;
-       return ret;
+       return vma;
 }
 
 void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
-                                        const struct i915_ggtt_view *view)
+i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
-       if (WARN_ON(obj->pin_display == 0))
+       if (WARN_ON(vma->obj->pin_display == 0))
                return;
 
-       i915_gem_object_ggtt_unpin_view(obj, view);
+       if (--vma->obj->pin_display == 0)
+               vma->display_alignment = 0;
 
-       obj->pin_display--;
+       /* Bump the LRU to try and avoid premature eviction whilst flipping  */
+       if (!i915_vma_is_active(vma))
+               list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+       i915_vma_unpin(vma);
+       WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
 }
 
 /**
@@ -4383,13 +3601,13 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
-       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
-               return 0;
-
        ret = i915_gem_object_wait_rendering(obj, !write);
        if (ret)
                return ret;
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+               return 0;
+
        i915_gem_object_flush_gtt_write_domain(obj);
 
        old_write_domain = obj->base.write_domain;
@@ -4464,28 +3682,31 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                target = request;
        }
        if (target)
-               i915_gem_request_reference(target);
+               i915_gem_request_get(target);
        spin_unlock(&file_priv->mm.lock);
 
        if (target == NULL)
                return 0;
 
-       ret = __i915_wait_request(target, true, NULL, NULL);
-       i915_gem_request_unreference(target);
+       ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+       i915_gem_request_put(target);
 
        return ret;
 }
 
 static bool
-i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-       struct drm_i915_gem_object *obj = vma->obj;
+       if (!drm_mm_node_allocated(&vma->node))
+               return false;
+
+       if (vma->node.size < size)
+               return true;
 
-       if (alignment &&
-           vma->node.start & (alignment - 1))
+       if (alignment && vma->node.start & (alignment - 1))
                return true;
 
-       if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+       if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
                return true;
 
        if (flags & PIN_OFFSET_BIAS &&
@@ -4502,135 +3723,208 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        bool mappable, fenceable;
        u32 fence_size, fence_alignment;
 
-       fence_size = i915_gem_get_gtt_size(obj->base.dev,
-                                          obj->base.size,
-                                          obj->tiling_mode);
-       fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
-                                                    obj->base.size,
-                                                    obj->tiling_mode,
-                                                    true);
+       fence_size = i915_gem_get_ggtt_size(dev_priv,
+                                           vma->size,
+                                           i915_gem_object_get_tiling(obj));
+       fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
+                                                     vma->size,
+                                                     i915_gem_object_get_tiling(obj),
+                                                     true);
 
        fenceable = (vma->node.size == fence_size &&
                     (vma->node.start & (fence_alignment - 1)) == 0);
 
        mappable = (vma->node.start + fence_size <=
-                   to_i915(obj->base.dev)->ggtt.mappable_end);
+                   dev_priv->ggtt.mappable_end);
 
-       obj->map_and_fenceable = mappable && fenceable;
+       if (mappable && fenceable)
+               vma->flags |= I915_VMA_CAN_FENCE;
+       else
+               vma->flags &= ~I915_VMA_CAN_FENCE;
 }
 
-static int
-i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
-                      struct i915_address_space *vm,
-                      const struct i915_ggtt_view *ggtt_view,
-                      uint32_t alignment,
-                      uint64_t flags)
+int __i915_vma_do_pin(struct i915_vma *vma,
+                     u64 size, u64 alignment, u64 flags)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct i915_vma *vma;
-       unsigned bound;
+       unsigned int bound = vma->flags;
        int ret;
 
-       if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
-               return -ENODEV;
-
-       if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
-               return -EINVAL;
-
-       if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
-               return -EINVAL;
-
-       if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
-               return -EINVAL;
-
-       vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
-                         i915_gem_obj_to_vma(obj, vm);
-
-       if (vma) {
-               if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
-                       return -EBUSY;
+       GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+       GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
-               if (i915_vma_misplaced(vma, alignment, flags)) {
-                       WARN(vma->pin_count,
-                            "bo is already pinned in %s with incorrect alignment:"
-                            " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
-                            " obj->map_and_fenceable=%d\n",
-                            ggtt_view ? "ggtt" : "ppgtt",
-                            upper_32_bits(vma->node.start),
-                            lower_32_bits(vma->node.start),
-                            alignment,
-                            !!(flags & PIN_MAPPABLE),
-                            obj->map_and_fenceable);
-                       ret = i915_vma_unbind(vma);
-                       if (ret)
-                               return ret;
-
-                       vma = NULL;
-               }
+       if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+               ret = -EBUSY;
+               goto err;
        }
 
-       bound = vma ? vma->bound : 0;
-       if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-               vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
-                                                flags);
-               if (IS_ERR(vma))
-                       return PTR_ERR(vma);
-       } else {
-               ret = i915_vma_bind(vma, obj->cache_level, flags);
+       if ((bound & I915_VMA_BIND_MASK) == 0) {
+               ret = i915_vma_insert(vma, size, alignment, flags);
                if (ret)
-                       return ret;
+                       goto err;
        }
 
-       if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
-           (bound ^ vma->bound) & GLOBAL_BIND) {
+       ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+       if (ret)
+               goto err;
+
+       if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
                __i915_vma_set_map_and_fenceable(vma);
-               WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-       }
 
-       vma->pin_count++;
+       GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
        return 0;
-}
 
-int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-                   struct i915_address_space *vm,
-                   uint32_t alignment,
-                   uint64_t flags)
-{
-       return i915_gem_object_do_pin(obj, vm,
-                                     i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
-                                     alignment, flags);
+err:
+       __i915_vma_unpin(vma);
+       return ret;
 }
 
-int
+struct i915_vma *
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         const struct i915_ggtt_view *view,
-                        uint32_t alignment,
-                        uint64_t flags)
+                        u64 size,
+                        u64 alignment,
+                        u64 flags)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+       struct i915_vma *vma;
+       int ret;
+
+       vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
+       if (IS_ERR(vma))
+               return vma;
+
+       if (i915_vma_misplaced(vma, size, alignment, flags)) {
+               if (flags & PIN_NONBLOCK &&
+                   (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
+                       return ERR_PTR(-ENOSPC);
+
+               WARN(i915_vma_is_pinned(vma),
+                    "bo is already pinned in ggtt with incorrect alignment:"
+                    " offset=%08x, req.alignment=%llx,"
+                    " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
+                    i915_ggtt_offset(vma), alignment,
+                    !!(flags & PIN_MAPPABLE),
+                    i915_vma_is_map_and_fenceable(vma));
+               ret = i915_vma_unbind(vma);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
+
+       ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+       if (ret)
+               return ERR_PTR(ret);
 
-       BUG_ON(!view);
+       return vma;
+}
 
-       return i915_gem_object_do_pin(obj, &ggtt->base, view,
-                                     alignment, flags | PIN_GLOBAL);
+static __always_inline unsigned int __busy_read_flag(unsigned int id)
+{
+       /* Note that we could alias engines in the execbuf API, but
+        * that would be very unwise as it prevents userspace from
+        * fine control over engine selection. Ahem.
+        *
+        * This should be something like EXEC_MAX_ENGINE instead of
+        * I915_NUM_ENGINES.
+        */
+       BUILD_BUG_ON(I915_NUM_ENGINES > 16);
+       return 0x10000 << id;
 }
 
-void
-i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
-                               const struct i915_ggtt_view *view)
+static __always_inline unsigned int __busy_write_id(unsigned int id)
 {
-       struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
+       /* The uABI guarantees an active writer is also amongst the read
+        * engines. This would be true if we accessed the activity tracking
+        * under the lock, but as we perform the lookup of the object and
+        * its activity locklessly we can not guarantee that the last_write
+        * being active implies that we have set the same engine flag from
+        * last_read - hence we always set both read and write busy for
+        * last_write.
+        */
+       return id | __busy_read_flag(id);
+}
+
+static __always_inline unsigned int
+__busy_set_if_active(const struct i915_gem_active *active,
+                    unsigned int (*flag)(unsigned int id))
+{
+       struct drm_i915_gem_request *request;
+
+       request = rcu_dereference(active->request);
+       if (!request || i915_gem_request_completed(request))
+               return 0;
+
+       /* This is racy. See __i915_gem_active_get_rcu() for an in detail
+        * discussion of how to handle the race correctly, but for reporting
+        * the busy state we err on the side of potentially reporting the
+        * wrong engine as being busy (but we guarantee that the result
+        * is at least self-consistent).
+        *
+        * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
+        * whilst we are inspecting it, even under the RCU read lock as we are.
+        * This means that there is a small window for the engine and/or the
+        * seqno to have been overwritten. The seqno will always be in the
+        * future compared to the intended, and so we know that if that
+        * seqno is idle (on whatever engine) our request is idle and the
+        * return 0 above is correct.
+        *
+        * The issue is that if the engine is switched, it is just as likely
+        * to report that it is busy (but since the switch happened, we know
+        * the request should be idle). So there is a small chance that a busy
+        * result is actually the wrong engine.
+        *
+        * So why don't we care?
+        *
+        * For starters, the busy ioctl is a heuristic that is by definition
+        * racy. Even with perfect serialisation in the driver, the hardware
+        * state is constantly advancing - the state we report to the user
+        * is stale.
+        *
+        * The critical information for the busy-ioctl is whether the object
+        * is idle as userspace relies on that to detect whether its next
+        * access will stall, or if it has missed submitting commands to
+        * the hardware allowing the GPU to stall. We never generate a
+        * false-positive for idleness, thus busy-ioctl is reliable at the
+        * most fundamental level, and we maintain the guarantee that a
+        * busy object left to itself will eventually become idle (and stay
+        * idle!).
+        *
+        * We allow ourselves the leeway of potentially misreporting the busy
+        * state because that is an optimisation heuristic that is constantly
+        * in flux. Being quickly able to detect the busy/idle state is much
+        * more important than accurate logging of exactly which engines were
+        * busy.
+        *
+        * For accuracy in reporting the engine, we could use
+        *
+        *      result = 0;
+        *      request = __i915_gem_active_get_rcu(active);
+        *      if (request) {
+        *              if (!i915_gem_request_completed(request))
+        *                      result = flag(request->engine->exec_id);
+        *              i915_gem_request_put(request);
+        *      }
+        *
+        * but that still remains susceptible to both hardware and userspace
+        * races. So we accept making the result of that race slightly worse,
+        * given the rarity of the race and its low impact on the result.
+        */
+       return flag(READ_ONCE(request->engine->exec_id));
+}
 
-       WARN_ON(vma->pin_count == 0);
-       WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
+static __always_inline unsigned int
+busy_check_reader(const struct i915_gem_active *active)
+{
+       return __busy_set_if_active(active, __busy_read_flag);
+}
 
-       --vma->pin_count;
+static __always_inline unsigned int
+busy_check_writer(const struct i915_gem_active *active)
+{
+       return __busy_set_if_active(active, __busy_write_id);
 }
 
 int
@@ -4639,47 +3933,64 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_busy *args = data;
        struct drm_i915_gem_object *obj;
-       int ret;
+       unsigned long active;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
+               return -ENOENT;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
+       args->busy = 0;
+       active = __I915_BO_ACTIVE(obj);
+       if (active) {
+               int idx;
 
-       /* Count all active objects as busy, even if they are currently not used
-        * by the gpu. Users of this interface expect objects to eventually
-        * become non-busy without any further actions, therefore emit any
-        * necessary flushes here.
-        */
-       ret = i915_gem_object_flush_active(obj);
-       if (ret)
-               goto unref;
+               /* Yes, the lookups are intentionally racy.
+                *
+                * First, we cannot simply rely on __I915_BO_ACTIVE. We have
+                * to regard the value as stale and as our ABI guarantees
+                * forward progress, we confirm the status of each active
+                * request with the hardware.
+                *
+                * Even though we guard the pointer lookup by RCU, that only
+                * guarantees that the pointer and its contents remain
+                * dereferencable and does *not* mean that the request we
+                * have is the same as the one being tracked by the object.
+                *
+                * Consider that we lookup the request just as it is being
+                * retired and freed. We take a local copy of the pointer,
+                * but before we add its engine into the busy set, the other
+                * thread reallocates it and assigns it to a task on another
+                * engine with a fresh and incomplete seqno. Guarding against
+                * that requires careful serialisation and reference counting,
+                * i.e. using __i915_gem_active_get_request_rcu(). We don't,
+                * instead we expect that if the result is busy, which engines
+                * are busy is not completely reliable - we only guarantee
+                * that the object was busy.
+                */
+               rcu_read_lock();
 
-       args->busy = 0;
-       if (obj->active) {
-               int i;
+               for_each_active(active, idx)
+                       args->busy |= busy_check_reader(&obj->last_read[idx]);
 
-               for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       struct drm_i915_gem_request *req;
+               /* For ABI sanity, we only care that the write engine is in
+                * the set of read engines. This should be ensured by the
+                * ordering of setting last_read/last_write in
+                * i915_vma_move_to_active(), and then in reverse in retire.
+                * However, for good measure, we always report the last_write
+                * request as a busy read as well as being a busy write.
+                *
+                * We don't care that the set of active read/write engines
+                * may change during construction of the result, as it is
+                * equally liable to change before userspace can inspect
+                * the result.
+                */
+               args->busy |= busy_check_writer(&obj->last_write);
 
-                       req = obj->last_read_req[i];
-                       if (req)
-                               args->busy |= 1 << (16 + req->engine->exec_id);
-               }
-               if (obj->last_write_req)
-                       args->busy |= obj->last_write_req->engine->exec_id;
+               rcu_read_unlock();
        }
 
-unref:
-       drm_gem_object_unreference(&obj->base);
-unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       i915_gem_object_put_unlocked(obj);
+       return 0;
 }
 
 int
@@ -4710,19 +4021,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file_priv, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
 
-       if (i915_gem_obj_is_pinned(obj)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        if (obj->pages &&
-           obj->tiling_mode != I915_TILING_NONE &&
+           i915_gem_object_is_tiled(obj) &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                if (obj->madv == I915_MADV_WILLNEED)
                        i915_gem_object_unpin_pages(obj);
@@ -4739,8 +4045,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 
        args->retained = obj->madv != __I915_MADV_PURGED;
 
-out:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -4753,14 +4058,17 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
        INIT_LIST_HEAD(&obj->global_list);
        for (i = 0; i < I915_NUM_ENGINES; i++)
-               INIT_LIST_HEAD(&obj->engine_list[i]);
+               init_request_active(&obj->last_read[i],
+                                   i915_gem_object_retire__read);
+       init_request_active(&obj->last_write,
+                           i915_gem_object_retire__write);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
 
        obj->ops = ops;
 
-       obj->fence_reg = I915_FENCE_REG_NONE;
+       obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
        obj->madv = I915_MADV_WILLNEED;
 
        i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
@@ -4865,33 +4173,31 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
        trace_i915_gem_object_destroy(obj);
 
+       /* All file-owned VMA should have been released by this point through
+        * i915_gem_close_object(), or earlier by i915_gem_context_close().
+        * However, the object may also be bound into the global GTT (e.g.
+        * older GPUs without per-process support, or for direct access through
+        * the GTT either for the user or for scanout). Those VMA still need to
+        * unbound now.
+        */
        list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
-               int ret;
-
-               vma->pin_count = 0;
-               ret = i915_vma_unbind(vma);
-               if (WARN_ON(ret == -ERESTARTSYS)) {
-                       bool was_interruptible;
-
-                       was_interruptible = dev_priv->mm.interruptible;
-                       dev_priv->mm.interruptible = false;
-
-                       WARN_ON(i915_vma_unbind(vma));
-
-                       dev_priv->mm.interruptible = was_interruptible;
-               }
+               GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+               GEM_BUG_ON(i915_vma_is_active(vma));
+               vma->flags &= ~I915_VMA_PIN_MASK;
+               i915_vma_close(vma);
        }
+       GEM_BUG_ON(obj->bind_count);
 
        /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
         * before progressing. */
        if (obj->stolen)
                i915_gem_object_unpin_pages(obj);
 
-       WARN_ON(obj->frontbuffer_bits);
+       WARN_ON(atomic_read(&obj->frontbuffer_bits));
 
        if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
-           obj->tiling_mode != I915_TILING_NONE)
+           i915_gem_object_is_tiled(obj))
                i915_gem_object_unpin_pages(obj);
 
        if (WARN_ON(obj->pages_pin_count))
@@ -4899,7 +4205,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        if (discard_backing_storage(obj))
                obj->madv = I915_MADV_DONTNEED;
        i915_gem_object_put_pages(obj);
-       i915_gem_object_free_mmap_offset(obj);
 
        BUG_ON(obj->pages);
 
@@ -4918,71 +4223,35 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        intel_runtime_pm_put(dev_priv);
 }
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                                    struct i915_address_space *vm)
-{
-       struct i915_vma *vma;
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
-                   vma->vm == vm)
-                       return vma;
-       }
-       return NULL;
-}
-
-struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
-                                          const struct i915_ggtt_view *view)
-{
-       struct i915_vma *vma;
-
-       GEM_BUG_ON(!view);
-
-       list_for_each_entry(vma, &obj->vma_list, obj_link)
-               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
-                       return vma;
-       return NULL;
-}
-
-void i915_gem_vma_destroy(struct i915_vma *vma)
-{
-       WARN_ON(vma->node.allocated);
-
-       /* Keep the vma as a placeholder in the execbuffer reservation lists */
-       if (!list_empty(&vma->exec_list))
-               return;
-
-       if (!vma->is_ggtt)
-               i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
-       list_del(&vma->obj_link);
-
-       kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
-static void
-i915_gem_stop_engines(struct drm_device *dev)
+int i915_gem_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_engine_cs *engine;
-
-       for_each_engine(engine, dev_priv)
-               dev_priv->gt.stop_engine(engine);
-}
+       int ret;
 
-int
-i915_gem_suspend(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret = 0;
+       intel_suspend_gt_powersave(dev_priv);
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_wait_for_idle(dev_priv);
+
+       /* We have to flush all the executing contexts to main memory so
+        * that they can saved in the hibernation image. To ensure the last
+        * context image is coherent, we have to switch away from it. That
+        * leaves the dev_priv->kernel_context still active when
+        * we actually suspend, and its image in memory may not match the GPU
+        * state. Fortunately, the kernel_context is disposable and we do
+        * not rely on its state.
+        */
+       ret = i915_gem_switch_to_kernel_context(dev_priv);
+       if (ret)
+               goto err;
+
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
        if (ret)
                goto err;
 
        i915_gem_retire_requests(dev_priv);
 
-       i915_gem_stop_engines(dev);
        i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
@@ -5002,6 +4271,22 @@ err:
        return ret;
 }
 
+void i915_gem_resume(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_restore_gtt_mappings(dev);
+
+       /* As we didn't flush the kernel context before suspend, we cannot
+        * guarantee that the context image is complete. So let's just reset
+        * it and start again.
+        */
+       dev_priv->gt.resume(dev_priv);
+
+       mutex_unlock(&dev->struct_mutex);
+}
+
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5054,53 +4339,6 @@ static void init_unused_rings(struct drm_device *dev)
        }
 }
 
-int i915_gem_init_engines(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret;
-
-       ret = intel_init_render_ring_buffer(dev);
-       if (ret)
-               return ret;
-
-       if (HAS_BSD(dev)) {
-               ret = intel_init_bsd_ring_buffer(dev);
-               if (ret)
-                       goto cleanup_render_ring;
-       }
-
-       if (HAS_BLT(dev)) {
-               ret = intel_init_blt_ring_buffer(dev);
-               if (ret)
-                       goto cleanup_bsd_ring;
-       }
-
-       if (HAS_VEBOX(dev)) {
-               ret = intel_init_vebox_ring_buffer(dev);
-               if (ret)
-                       goto cleanup_blt_ring;
-       }
-
-       if (HAS_BSD2(dev)) {
-               ret = intel_init_bsd2_ring_buffer(dev);
-               if (ret)
-                       goto cleanup_vebox_ring;
-       }
-
-       return 0;
-
-cleanup_vebox_ring:
-       intel_cleanup_engine(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
-       intel_cleanup_engine(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
-       intel_cleanup_engine(&dev_priv->engine[VCS]);
-cleanup_render_ring:
-       intel_cleanup_engine(&dev_priv->engine[RCS]);
-
-       return ret;
-}
-
 int
 i915_gem_init_hw(struct drm_device *dev)
 {
@@ -5167,6 +4405,27 @@ out:
        return ret;
 }
 
+bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
+{
+       if (INTEL_INFO(dev_priv)->gen < 6)
+               return false;
+
+       /* TODO: make semaphores and Execlists play nicely together */
+       if (i915.enable_execlists)
+               return false;
+
+       if (value >= 0)
+               return value;
+
+#ifdef CONFIG_INTEL_IOMMU
+       /* Enable semaphores on SNB when IO remapping is off */
+       if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
+               return false;
+#endif
+
+       return true;
+}
+
 int i915_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5175,15 +4434,11 @@ int i915_gem_init(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
 
        if (!i915.enable_execlists) {
-               dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
-               dev_priv->gt.init_engines = i915_gem_init_engines;
-               dev_priv->gt.cleanup_engine = intel_cleanup_engine;
-               dev_priv->gt.stop_engine = intel_stop_engine;
+               dev_priv->gt.resume = intel_legacy_submission_resume;
+               dev_priv->gt.cleanup_engine = intel_engine_cleanup;
        } else {
-               dev_priv->gt.execbuf_submit = intel_execlists_submission;
-               dev_priv->gt.init_engines = intel_logical_rings_init;
+               dev_priv->gt.resume = intel_lr_context_resume;
                dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
-               dev_priv->gt.stop_engine = intel_logical_ring_stop;
        }
 
        /* This is just a security blanket to placate dragons.
@@ -5195,24 +4450,27 @@ int i915_gem_init(struct drm_device *dev)
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        i915_gem_init_userptr(dev_priv);
-       i915_gem_init_ggtt(dev);
+
+       ret = i915_gem_init_ggtt(dev_priv);
+       if (ret)
+               goto out_unlock;
 
        ret = i915_gem_context_init(dev);
        if (ret)
                goto out_unlock;
 
-       ret = dev_priv->gt.init_engines(dev);
+       ret = intel_engines_init(dev);
        if (ret)
                goto out_unlock;
 
        ret = i915_gem_init_hw(dev);
        if (ret == -EIO) {
-               /* Allow ring initialisation to fail by marking the GPU as
+               /* Allow engine initialisation to fail by marking the GPU as
                 * wedged. But we only want to do this where the GPU is angry,
                 * for all other failure, such as an allocation failure, bail.
                 */
                DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-               atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+               i915_gem_set_wedged(dev_priv);
                ret = 0;
        }
 
@@ -5236,7 +4494,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
 static void
 init_engine_lists(struct intel_engine_cs *engine)
 {
-       INIT_LIST_HEAD(&engine->active_list);
        INIT_LIST_HEAD(&engine->request_list);
 }
 
@@ -5244,6 +4501,7 @@ void
 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = &dev_priv->drm;
+       int i;
 
        if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
            !IS_CHERRYVIEW(dev_priv))
@@ -5259,6 +4517,13 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
                                I915_READ(vgtif_reg(avail_rs.fence_num));
 
        /* Initialize fence registers to zero */
+       for (i = 0; i < dev_priv->num_fence_regs; i++) {
+               struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+
+               fence->i915 = dev_priv;
+               fence->id = i;
+               list_add_tail(&fence->link, &dev_priv->mm.fence_list);
+       }
        i915_gem_restore_fences(dev);
 
        i915_gem_detect_bit_6_swizzle(dev);
@@ -5283,18 +4548,17 @@ i915_gem_load_init(struct drm_device *dev)
        dev_priv->requests =
                kmem_cache_create("i915_gem_request",
                                  sizeof(struct drm_i915_gem_request), 0,
-                                 SLAB_HWCACHE_ALIGN,
+                                 SLAB_HWCACHE_ALIGN |
+                                 SLAB_RECLAIM_ACCOUNT |
+                                 SLAB_DESTROY_BY_RCU,
                                  NULL);
 
-       INIT_LIST_HEAD(&dev_priv->vm_list);
        INIT_LIST_HEAD(&dev_priv->context_list);
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        for (i = 0; i < I915_NUM_ENGINES; i++)
                init_engine_lists(&dev_priv->engine[i]);
-       for (i = 0; i < I915_MAX_NUM_FENCES; i++)
-               INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
                          i915_gem_retire_work_handler);
        INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -5304,13 +4568,13 @@ i915_gem_load_init(struct drm_device *dev)
 
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-
        init_waitqueue_head(&dev_priv->pending_flip_queue);
 
        dev_priv->mm.interruptible = true;
 
-       mutex_init(&dev_priv->fb_tracking.lock);
+       atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
+
+       spin_lock_init(&dev_priv->fb_tracking.lock);
 }
 
 void i915_gem_load_cleanup(struct drm_device *dev)
@@ -5320,11 +4584,19 @@ void i915_gem_load_cleanup(struct drm_device *dev)
        kmem_cache_destroy(dev_priv->requests);
        kmem_cache_destroy(dev_priv->vmas);
        kmem_cache_destroy(dev_priv->objects);
+
+       /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+       rcu_barrier();
 }
 
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 {
        struct drm_i915_gem_object *obj;
+       struct list_head *phases[] = {
+               &dev_priv->mm.unbound_list,
+               &dev_priv->mm.bound_list,
+               NULL
+       }, **p;
 
        /* Called just before we write the hibernation image.
         *
@@ -5335,16 +4607,18 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
         *
         * To make sure the hibernation image contains the latest state,
         * we update that state just before writing out the image.
+        *
+        * To try and reduce the hibernation image, we manually shrink
+        * the objects as well.
         */
 
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-       }
+       i915_gem_shrink_all(dev_priv);
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       for (p = phases; *p; p++) {
+               list_for_each_entry(obj, *p, global_list) {
+                       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+                       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+               }
        }
 
        return 0;
@@ -5353,21 +4627,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct drm_i915_gem_request *request;
 
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
         */
        spin_lock(&file_priv->mm.lock);
-       while (!list_empty(&file_priv->mm.request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&file_priv->mm.request_list,
-                                          struct drm_i915_gem_request,
-                                          client_list);
-               list_del(&request->client_list);
+       list_for_each_entry(request, &file_priv->mm.request_list, client_list)
                request->file_priv = NULL;
-       }
        spin_unlock(&file_priv->mm.lock);
 
        if (!list_empty(&file_priv->rps.link)) {
@@ -5396,7 +4664,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        spin_lock_init(&file_priv->mm.lock);
        INIT_LIST_HEAD(&file_priv->mm.request_list);
 
-       file_priv->bsd_ring = -1;
+       file_priv->bsd_engine = -1;
 
        ret = i915_gem_context_open(dev, file);
        if (ret)
@@ -5418,118 +4686,24 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
                       unsigned frontbuffer_bits)
 {
+       /* Control of individual bits within the mask are guarded by
+        * the owning plane->mutex, i.e. we can never see concurrent
+        * manipulation of individual bits. But since the bitfield as a whole
+        * is updated using RMW, we need to use atomics in order to update
+        * the bits.
+        */
+       BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+                    sizeof(atomic_t) * BITS_PER_BYTE);
+
        if (old) {
-               WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
-               WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
-               old->frontbuffer_bits &= ~frontbuffer_bits;
+               WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
+               atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
        }
 
        if (new) {
-               WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
-               WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
-               new->frontbuffer_bits |= frontbuffer_bits;
-       }
-}
-
-/* All the new VM stuff */
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
-                       struct i915_address_space *vm)
-{
-       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
-       struct i915_vma *vma;
-
-       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
-       list_for_each_entry(vma, &o->vma_list, obj_link) {
-               if (vma->is_ggtt &&
-                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-                       continue;
-               if (vma->vm == vm)
-                       return vma->node.start;
-       }
-
-       WARN(1, "%s vma for this object not found.\n",
-            i915_is_ggtt(vm) ? "global" : "ppgtt");
-       return -1;
-}
-
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-                                 const struct i915_ggtt_view *view)
-{
-       struct i915_vma *vma;
-
-       list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
-                       return vma->node.start;
-
-       WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
-       return -1;
-}
-
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-                       struct i915_address_space *vm)
-{
-       struct i915_vma *vma;
-
-       list_for_each_entry(vma, &o->vma_list, obj_link) {
-               if (vma->is_ggtt &&
-                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-                       continue;
-               if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
-                       return true;
+               WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
+               atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
        }
-
-       return false;
-}
-
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
-                                 const struct i915_ggtt_view *view)
-{
-       struct i915_vma *vma;
-
-       list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->is_ggtt &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view) &&
-                   drm_mm_node_allocated(&vma->node))
-                       return true;
-
-       return false;
-}
-
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
-{
-       struct i915_vma *vma;
-
-       list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (drm_mm_node_allocated(&vma->node))
-                       return true;
-
-       return false;
-}
-
-unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
-{
-       struct i915_vma *vma;
-
-       GEM_BUG_ON(list_empty(&o->vma_list));
-
-       list_for_each_entry(vma, &o->vma_list, obj_link) {
-               if (vma->is_ggtt &&
-                   vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
-                       return vma->node.size;
-       }
-
-       return 0;
-}
-
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
-{
-       struct i915_vma *vma;
-       list_for_each_entry(vma, &obj->vma_list, obj_link)
-               if (vma->pin_count > 0)
-                       return true;
-
-       return false;
 }
 
 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
@@ -5584,6 +4758,6 @@ i915_gem_object_create_from_data(struct drm_device *dev,
        return obj;
 
 fail:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        return ERR_PTR(ret);
 }
index 3752d5daa4b2171cc9f3f45f312e7985407ae381..ed989596d9a38249c87ac223c71285abb5d512f5 100644 (file)
 
 /**
  * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @dev: the drm device
+ * @engine: the associated request submission engine
  * @pool: the batch buffer pool
  */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
                              struct i915_gem_batch_pool *pool)
 {
        int n;
 
-       pool->dev = dev;
+       pool->engine = engine;
 
        for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
                INIT_LIST_HEAD(&pool->cache_list[n]);
@@ -65,18 +65,17 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 {
        int n;
 
-       WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+       lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
        for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
-               while (!list_empty(&pool->cache_list[n])) {
-                       struct drm_i915_gem_object *obj =
-                               list_first_entry(&pool->cache_list[n],
-                                                struct drm_i915_gem_object,
-                                                batch_pool_link);
-
-                       list_del(&obj->batch_pool_link);
-                       drm_gem_object_unreference(&obj->base);
-               }
+               struct drm_i915_gem_object *obj, *next;
+
+               list_for_each_entry_safe(obj, next,
+                                        &pool->cache_list[n],
+                                        batch_pool_link)
+                       i915_gem_object_put(obj);
+
+               INIT_LIST_HEAD(&pool->cache_list[n]);
        }
 }
 
@@ -102,7 +101,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
        struct list_head *list;
        int n;
 
-       WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+       lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
        /* Compute a power-of-two bucket, but throw everything greater than
         * 16KiB into the same bucket: i.e. the the buckets hold objects of
@@ -115,13 +114,14 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 
        list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
                /* The batches are strictly LRU ordered */
-               if (tmp->active)
+               if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
+                                            &tmp->base.dev->struct_mutex))
                        break;
 
                /* While we're looping, do some clean up */
                if (tmp->madv == __I915_MADV_PURGED) {
                        list_del(&tmp->batch_pool_link);
-                       drm_gem_object_unreference(&tmp->base);
+                       i915_gem_object_put(tmp);
                        continue;
                }
 
@@ -134,7 +134,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
        if (obj == NULL) {
                int ret;
 
-               obj = i915_gem_object_create(pool->dev, size);
+               obj = i915_gem_object_create(&pool->engine->i915->drm, size);
                if (IS_ERR(obj))
                        return obj;
 
index 848e90703eed6895668f103be8e172ced7e89f03..10d5ac4c00d385b7cb68bc701c67d64e54d1c55c 100644 (file)
 
 #include "i915_drv.h"
 
+struct intel_engine_cs;
+
 struct i915_gem_batch_pool {
-       struct drm_device *dev;
+       struct intel_engine_cs *engine;
        struct list_head cache_list[4];
 };
 
 /* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
                              struct i915_gem_batch_pool *pool);
 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
 struct drm_i915_gem_object*
index 3c97f0e7a003758e6d60f3a4f65b040c91d3a316..df10f4e95736be96184027a7fd632523b493d1cc 100644 (file)
@@ -134,21 +134,6 @@ static int get_context_size(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-static void i915_gem_context_clean(struct i915_gem_context *ctx)
-{
-       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-       struct i915_vma *vma, *next;
-
-       if (!ppgtt)
-               return;
-
-       list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
-                                vm_link) {
-               if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
-                       break;
-       }
-}
-
 void i915_gem_context_free(struct kref *ctx_ref)
 {
        struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -156,13 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        trace_i915_context_free(ctx);
-
-       /*
-        * This context is going away and we need to remove all VMAs still
-        * around. This is to handle imported shared objects for which
-        * destructor did not run when their handles were closed.
-        */
-       i915_gem_context_clean(ctx);
+       GEM_BUG_ON(!ctx->closed);
 
        i915_ppgtt_put(ctx->ppgtt);
 
@@ -173,12 +152,13 @@ void i915_gem_context_free(struct kref *ctx_ref)
                        continue;
 
                WARN_ON(ce->pin_count);
-               if (ce->ringbuf)
-                       intel_ringbuffer_free(ce->ringbuf);
+               if (ce->ring)
+                       intel_ring_free(ce->ring);
 
-               drm_gem_object_unreference(&ce->state->base);
+               i915_vma_put(ce->state);
        }
 
+       put_pid(ctx->pid);
        list_del(&ctx->link);
 
        ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
@@ -216,7 +196,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
                ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
                /* Failure shouldn't ever happen this early */
                if (WARN_ON(ret)) {
-                       drm_gem_object_unreference(&obj->base);
+                       i915_gem_object_put(obj);
                        return ERR_PTR(ret);
                }
        }
@@ -224,6 +204,37 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
        return obj;
 }
 
+static void i915_ppgtt_close(struct i915_address_space *vm)
+{
+       struct list_head *phases[] = {
+               &vm->active_list,
+               &vm->inactive_list,
+               &vm->unbound_list,
+               NULL,
+       }, **phase;
+
+       GEM_BUG_ON(vm->closed);
+       vm->closed = true;
+
+       for (phase = phases; *phase; phase++) {
+               struct i915_vma *vma, *vn;
+
+               list_for_each_entry_safe(vma, vn, *phase, vm_link)
+                       if (!i915_vma_is_closed(vma))
+                               i915_vma_close(vma);
+       }
+}
+
+static void context_close(struct i915_gem_context *ctx)
+{
+       GEM_BUG_ON(ctx->closed);
+       ctx->closed = true;
+       if (ctx->ppgtt)
+               i915_ppgtt_close(&ctx->ppgtt->base);
+       ctx->file_priv = ERR_PTR(-EBADF);
+       i915_gem_context_put(ctx);
+}
+
 static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
 {
        int ret;
@@ -271,13 +282,24 @@ __create_hw_context(struct drm_device *dev,
        ctx->ggtt_alignment = get_context_alignment(dev_priv);
 
        if (dev_priv->hw_context_size) {
-               struct drm_i915_gem_object *obj =
-                               i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+               struct drm_i915_gem_object *obj;
+               struct i915_vma *vma;
+
+               obj = i915_gem_alloc_context_obj(dev,
+                                                dev_priv->hw_context_size);
                if (IS_ERR(obj)) {
                        ret = PTR_ERR(obj);
                        goto err_out;
                }
-               ctx->engine[RCS].state = obj;
+
+               vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+               if (IS_ERR(vma)) {
+                       i915_gem_object_put(obj);
+                       ret = PTR_ERR(vma);
+                       goto err_out;
+               }
+
+               ctx->engine[RCS].state = vma;
        }
 
        /* Default context will never have a file_priv */
@@ -290,6 +312,9 @@ __create_hw_context(struct drm_device *dev,
                ret = DEFAULT_CONTEXT_HANDLE;
 
        ctx->file_priv = file_priv;
+       if (file_priv)
+               ctx->pid = get_task_pid(current, PIDTYPE_PID);
+
        ctx->user_handle = ret;
        /* NB: Mark all slices as needing a remap so that when the context first
         * loads it will restore whatever remap state already exists. If there
@@ -305,7 +330,7 @@ __create_hw_context(struct drm_device *dev,
        return ctx;
 
 err_out:
-       i915_gem_context_unreference(ctx);
+       context_close(ctx);
        return ERR_PTR(ret);
 }
 
@@ -327,13 +352,14 @@ i915_gem_create_context(struct drm_device *dev,
                return ctx;
 
        if (USES_FULL_PPGTT(dev)) {
-               struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
+               struct i915_hw_ppgtt *ppgtt =
+                       i915_ppgtt_create(to_i915(dev), file_priv);
 
                if (IS_ERR(ppgtt)) {
                        DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
                                         PTR_ERR(ppgtt));
                        idr_remove(&file_priv->context_idr, ctx->user_handle);
-                       i915_gem_context_unreference(ctx);
+                       context_close(ctx);
                        return ERR_CAST(ppgtt);
                }
 
@@ -388,28 +414,12 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
                struct intel_context *ce = &ctx->engine[engine->id];
 
                if (ce->state)
-                       i915_gem_object_ggtt_unpin(ce->state);
+                       i915_vma_unpin(ce->state);
 
-               i915_gem_context_unreference(ctx);
+               i915_gem_context_put(ctx);
        }
 }
 
-void i915_gem_context_reset(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       lockdep_assert_held(&dev->struct_mutex);
-
-       if (i915.enable_execlists) {
-               struct i915_gem_context *ctx;
-
-               list_for_each_entry(ctx, &dev_priv->context_list, link)
-                       intel_lr_context_reset(dev_priv, ctx);
-       }
-
-       i915_gem_context_lost(dev_priv);
-}
-
 int i915_gem_context_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -504,7 +514,7 @@ void i915_gem_context_fini(struct drm_device *dev)
 
        lockdep_assert_held(&dev->struct_mutex);
 
-       i915_gem_context_unreference(dctx);
+       context_close(dctx);
        dev_priv->kernel_context = NULL;
 
        ida_destroy(&dev_priv->context_hw_ida);
@@ -514,8 +524,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
 {
        struct i915_gem_context *ctx = p;
 
-       ctx->file_priv = ERR_PTR(-EBADF);
-       i915_gem_context_unreference(ctx);
+       context_close(ctx);
        return 0;
 }
 
@@ -552,12 +561,13 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
        struct drm_i915_private *dev_priv = req->i915;
+       struct intel_ring *ring = req->ring;
        struct intel_engine_cs *engine = req->engine;
        u32 flags = hw_flags | MI_MM_SPACE_GTT;
        const int num_rings =
                /* Use an extended w/a on ivb+ if signalling from other rings */
-               i915_semaphore_is_enabled(dev_priv) ?
-               hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
+               i915.semaphores ?
+               INTEL_INFO(dev_priv)->num_rings - 1 :
                0;
        int len, ret;
 
@@ -567,7 +577,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
         * itlb_before_ctx_switch.
         */
        if (IS_GEN6(dev_priv)) {
-               ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+               ret = engine->emit_flush(req, EMIT_INVALIDATE);
                if (ret)
                        return ret;
        }
@@ -589,64 +599,64 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
        /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
        if (INTEL_GEN(dev_priv) >= 7) {
-               intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+               intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
                if (num_rings) {
                        struct intel_engine_cs *signaller;
 
-                       intel_ring_emit(engine,
+                       intel_ring_emit(ring,
                                        MI_LOAD_REGISTER_IMM(num_rings));
                        for_each_engine(signaller, dev_priv) {
                                if (signaller == engine)
                                        continue;
 
-                               intel_ring_emit_reg(engine,
+                               intel_ring_emit_reg(ring,
                                                    RING_PSMI_CTL(signaller->mmio_base));
-                               intel_ring_emit(engine,
+                               intel_ring_emit(ring,
                                                _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
                        }
                }
        }
 
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_emit(engine, MI_SET_CONTEXT);
-       intel_ring_emit(engine,
-                       i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
-                       flags);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_emit(ring, MI_SET_CONTEXT);
+       intel_ring_emit(ring,
+                       i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
         * WaMiSetContext_Hang:snb,ivb,vlv
         */
-       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_emit(ring, MI_NOOP);
 
        if (INTEL_GEN(dev_priv) >= 7) {
                if (num_rings) {
                        struct intel_engine_cs *signaller;
                        i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-                       intel_ring_emit(engine,
+                       intel_ring_emit(ring,
                                        MI_LOAD_REGISTER_IMM(num_rings));
                        for_each_engine(signaller, dev_priv) {
                                if (signaller == engine)
                                        continue;
 
                                last_reg = RING_PSMI_CTL(signaller->mmio_base);
-                               intel_ring_emit_reg(engine, last_reg);
-                               intel_ring_emit(engine,
+                               intel_ring_emit_reg(ring, last_reg);
+                               intel_ring_emit(ring,
                                                _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
                        }
 
                        /* Insert a delay before the next switch! */
-                       intel_ring_emit(engine,
+                       intel_ring_emit(ring,
                                        MI_STORE_REGISTER_MEM |
                                        MI_SRM_LRM_GLOBAL_GTT);
-                       intel_ring_emit_reg(engine, last_reg);
-                       intel_ring_emit(engine, engine->scratch.gtt_offset);
-                       intel_ring_emit(engine, MI_NOOP);
+                       intel_ring_emit_reg(ring, last_reg);
+                       intel_ring_emit(ring,
+                                       i915_ggtt_offset(engine->scratch));
+                       intel_ring_emit(ring, MI_NOOP);
                }
-               intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+               intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
        }
 
-       intel_ring_advance(engine);
+       intel_ring_advance(ring);
 
        return ret;
 }
@@ -654,7 +664,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
        u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int i, ret;
 
        if (!remap_info)
@@ -669,13 +679,13 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice)
         * here because no other code should access these registers other than
         * at initialization time.
         */
-       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
        for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-               intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
-               intel_ring_emit(engine, remap_info[i]);
+               intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
+               intel_ring_emit(ring, remap_info[i]);
        }
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -744,6 +754,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
        struct i915_gem_context *to = req->ctx;
        struct intel_engine_cs *engine = req->engine;
        struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+       struct i915_vma *vma = to->engine[RCS].state;
        struct i915_gem_context *from;
        u32 hw_flags;
        int ret, i;
@@ -751,10 +762,15 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
        if (skip_rcs_switch(ppgtt, engine, to))
                return 0;
 
+       /* Clear this page out of any CPU caches for coherent swap-in/out. */
+       if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+               ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+               if (ret)
+                       return ret;
+       }
+
        /* Trying to pin first makes error handling easier. */
-       ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
-                                   to->ggtt_alignment,
-                                   0);
+       ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
        if (ret)
                return ret;
 
@@ -767,18 +783,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
         */
        from = engine->last_context;
 
-       /*
-        * Clear this page out of any CPU caches for coherent swap-in/out. Note
-        * that thanks to write = false in this call and us not setting any gpu
-        * write domains when putting a context object onto the active list
-        * (when switching away from it), this won't block.
-        *
-        * XXX: We need a real interface to do this instead of trickery.
-        */
-       ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
-       if (ret)
-               goto unpin_out;
-
        if (needs_pd_load_pre(ppgtt, engine, to)) {
                /* Older GENs and non render rings still want the load first,
                 * "PP_DCLV followed by PP_DIR_BASE register through Load
@@ -787,7 +791,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
                trace_switch_mm(engine, to);
                ret = ppgtt->switch_mm(ppgtt, req);
                if (ret)
-                       goto unpin_out;
+                       goto err;
        }
 
        if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
@@ -804,7 +808,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
        if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
                ret = mi_set_context(req, hw_flags);
                if (ret)
-                       goto unpin_out;
+                       goto err;
        }
 
        /* The backing object for the context is done after switching to the
@@ -814,8 +818,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
-               from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
@@ -823,14 +825,12 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
                 * able to defer doing this until we know the object would be
                 * swapped, but there is no way to do that yet.
                 */
-               from->engine[RCS].state->dirty = 1;
-
-               /* obj is kept alive until the next request by its active ref */
-               i915_gem_object_ggtt_unpin(from->engine[RCS].state);
-               i915_gem_context_unreference(from);
+               i915_vma_move_to_active(from->engine[RCS].state, req, 0);
+               /* state is kept alive until the next request */
+               i915_vma_unpin(from->engine[RCS].state);
+               i915_gem_context_put(from);
        }
-       i915_gem_context_reference(to);
-       engine->last_context = to;
+       engine->last_context = i915_gem_context_get(to);
 
        /* GEN8 does *not* require an explicit reload if the PDPs have been
         * setup, and we do not wish to move them.
@@ -872,8 +872,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 
        return 0;
 
-unpin_out:
-       i915_gem_object_ggtt_unpin(to->engine[RCS].state);
+err:
+       i915_vma_unpin(vma);
        return ret;
 }
 
@@ -894,8 +894,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
 
-       WARN_ON(i915.enable_execlists);
        lockdep_assert_held(&req->i915->drm.struct_mutex);
+       if (i915.enable_execlists)
+               return 0;
 
        if (!req->ctx->engine[engine->id].state) {
                struct i915_gem_context *to = req->ctx;
@@ -914,10 +915,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
                }
 
                if (to != engine->last_context) {
-                       i915_gem_context_reference(to);
                        if (engine->last_context)
-                               i915_gem_context_unreference(engine->last_context);
-                       engine->last_context = to;
+                               i915_gem_context_put(engine->last_context);
+                       engine->last_context = i915_gem_context_get(to);
                }
 
                return 0;
@@ -926,6 +926,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
        return do_rcs_switch(req);
 }
 
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+
+       for_each_engine(engine, dev_priv) {
+               struct drm_i915_gem_request *req;
+               int ret;
+
+               if (engine->last_context == NULL)
+                       continue;
+
+               if (engine->last_context == dev_priv->kernel_context)
+                       continue;
+
+               req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+               if (IS_ERR(req))
+                       return PTR_ERR(req);
+
+               ret = i915_switch_context(req);
+               i915_add_request_no_flush(req);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static bool contexts_enabled(struct drm_device *dev)
 {
        return i915.enable_execlists || to_i915(dev)->hw_context_size;
@@ -985,7 +1012,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
        }
 
        idr_remove(&file_priv->context_idr, ctx->user_handle);
-       i915_gem_context_unreference(ctx);
+       context_close(ctx);
        mutex_unlock(&dev->struct_mutex);
 
        DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
deleted file mode 100644 (file)
index a565164..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Keith Packard <keithp@keithp.com>
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-
-#if WATCH_LISTS
-int
-i915_verify_lists(struct drm_device *dev)
-{
-       static int warned;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_object *obj;
-       struct intel_engine_cs *engine;
-       int err = 0;
-
-       if (warned)
-               return 0;
-
-       for_each_engine(engine, dev_priv) {
-               list_for_each_entry(obj, &engine->active_list,
-                                   engine_list[engine->id]) {
-                       if (obj->base.dev != dev ||
-                           !atomic_read(&obj->base.refcount.refcount)) {
-                               DRM_ERROR("%s: freed active obj %p\n",
-                                         engine->name, obj);
-                               err++;
-                               break;
-                       } else if (!obj->active ||
-                                  obj->last_read_req[engine->id] == NULL) {
-                               DRM_ERROR("%s: invalid active obj %p\n",
-                                         engine->name, obj);
-                               err++;
-                       } else if (obj->base.write_domain) {
-                               DRM_ERROR("%s: invalid write obj %p (w %x)\n",
-                                         engine->name,
-                                         obj, obj->base.write_domain);
-                               err++;
-                       }
-               }
-       }
-
-       return warned = err;
-}
-#endif /* WATCH_LIST */
index 80bbe43a2e92a5b363d46d784fda9accd6f8d3f9..10265bb356041adc86988568f188f1fe60cb6058 100644 (file)
  * Authors:
  *     Dave Airlie <airlied@redhat.com>
  */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
 #include <drm/drmP.h>
+
 #include "i915_drv.h"
-#include <linux/dma-buf.h>
 
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
@@ -115,7 +119,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
        if (ret)
                return ERR_PTR(ret);
 
-       addr = i915_gem_object_pin_map(obj);
+       addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
        mutex_unlock(&dev->struct_mutex);
 
        return addr;
@@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
        .end_cpu_access = i915_gem_end_cpu_access,
 };
 
+static void export_fences(struct drm_i915_gem_object *obj,
+                         struct dma_buf *dma_buf)
+{
+       struct reservation_object *resv = dma_buf->resv;
+       struct drm_i915_gem_request *req;
+       unsigned long active;
+       int idx;
+
+       active = __I915_BO_ACTIVE(obj);
+       if (!active)
+               return;
+
+       /* Serialise with execbuf to prevent concurrent fence-loops */
+       mutex_lock(&obj->base.dev->struct_mutex);
+
+       /* Mark the object for future fences before racily adding old fences */
+       obj->base.dma_buf = dma_buf;
+
+       ww_mutex_lock(&resv->lock, NULL);
+
+       for_each_active(active, idx) {
+               req = i915_gem_active_get(&obj->last_read[idx],
+                                         &obj->base.dev->struct_mutex);
+               if (!req)
+                       continue;
+
+               if (reservation_object_reserve_shared(resv) == 0)
+                       reservation_object_add_shared_fence(resv, &req->fence);
+
+               i915_gem_request_put(req);
+       }
+
+       req = i915_gem_active_get(&obj->last_write,
+                                 &obj->base.dev->struct_mutex);
+       if (req) {
+               reservation_object_add_excl_fence(resv, &req->fence);
+               i915_gem_request_put(req);
+       }
+
+       ww_mutex_unlock(&resv->lock);
+       mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                      struct drm_gem_object *gem_obj, int flags)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+       struct dma_buf *dma_buf;
 
        exp_info.ops = &i915_dmabuf_ops;
        exp_info.size = gem_obj->size;
        exp_info.flags = flags;
        exp_info.priv = gem_obj;
 
-
        if (obj->ops->dmabuf_export) {
                int ret = obj->ops->dmabuf_export(obj);
                if (ret)
                        return ERR_PTR(ret);
        }
 
-       return dma_buf_export(&exp_info);
+       dma_buf = dma_buf_export(&exp_info);
+       if (IS_ERR(dma_buf))
+               return dma_buf;
+
+       export_fences(obj, dma_buf);
+       return dma_buf;
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -278,8 +330,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
                         * Importing dmabuf exported from out own gem increases
                         * refcount on gem itself instead of f_count of dmabuf.
                         */
-                       drm_gem_object_reference(&obj->base);
-                       return &obj->base;
+                       return &i915_gem_object_get(obj)->base;
                }
        }
 
@@ -300,6 +351,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
        i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
        obj->base.import_attach = attach;
 
+       /* We use GTT as shorthand for a coherent domain, one that is
+        * neither in the GPU cache nor in the CPU cache, where all
+        * writes are immediately visible in memory. (That's not strictly
+        * true, but it's close! There are internal buffers such as the
+        * write-combined buffer or a delay through the chipset for GTT
+        * writes that do require us to treat GTT as a separate cache domain.)
+        */
+       obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+       obj->base.write_domain = 0;
+
        return &obj->base;
 
 fail_detach:
index 3c1280ec7ff648979ea96a17325a5aa6924e688d..5b6f81c1dbca44786cf3243eb56494e128041e8d 100644 (file)
 #include "intel_drv.h"
 #include "i915_trace.h"
 
-static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+static bool
+gpu_is_idle(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
 
-       if (i915.enable_execlists)
-               return 0;
-
        for_each_engine(engine, dev_priv) {
-               struct drm_i915_gem_request *req;
-               int ret;
-
-               if (engine->last_context == NULL)
-                       continue;
-
-               if (engine->last_context == dev_priv->kernel_context)
-                       continue;
-
-               req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
-               if (IS_ERR(req))
-                       return PTR_ERR(req);
-
-               ret = i915_switch_context(req);
-               i915_add_request_no_flush(req);
-               if (ret)
-                       return ret;
+               if (intel_engine_is_active(engine))
+                       return false;
        }
 
-       return 0;
+       return true;
 }
 
-
 static bool
-mark_free(struct i915_vma *vma, struct list_head *unwind)
+mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
 {
-       if (vma->pin_count)
+       if (i915_vma_is_pinned(vma))
                return false;
 
        if (WARN_ON(!list_empty(&vma->exec_list)))
                return false;
 
+       if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
+               return false;
+
        list_add(&vma->exec_list, unwind);
        return drm_mm_scan_add_block(&vma->node);
 }
 
 /**
  * i915_gem_evict_something - Evict vmas to make room for binding a new one
- * @dev: drm_device
  * @vm: address space to evict from
  * @min_size: size of the desired free space
  * @alignment: alignment constraint of the desired free space
@@ -102,42 +86,37 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
  * memory in e.g. the shrinker.
  */
 int
-i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
-                        int min_size, unsigned alignment, unsigned cache_level,
-                        unsigned long start, unsigned long end,
+i915_gem_evict_something(struct i915_address_space *vm,
+                        u64 min_size, u64 alignment,
+                        unsigned cache_level,
+                        u64 start, u64 end,
                         unsigned flags)
 {
-       struct list_head eviction_list, unwind_list;
-       struct i915_vma *vma;
-       int ret = 0;
-       int pass = 0;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct list_head eviction_list;
+       struct list_head *phases[] = {
+               &vm->inactive_list,
+               &vm->active_list,
+               NULL,
+       }, **phase;
+       struct i915_vma *vma, *next;
+       int ret;
 
-       trace_i915_gem_evict(dev, min_size, alignment, flags);
+       trace_i915_gem_evict(vm, min_size, alignment, flags);
 
        /*
         * The goal is to evict objects and amalgamate space in LRU order.
         * The oldest idle objects reside on the inactive list, which is in
-        * retirement order. The next objects to retire are those on the (per
-        * ring) active list that do not have an outstanding flush. Once the
-        * hardware reports completion (the seqno is updated after the
-        * batchbuffer has been finished) the clean buffer objects would
-        * be retired to the inactive list. Any dirty objects would be added
-        * to the tail of the flushing list. So after processing the clean
-        * active objects we need to emit a MI_FLUSH to retire the flushing
-        * list, hence the retirement order of the flushing list is in
-        * advance of the dirty objects on the active lists.
+        * retirement order. The next objects to retire are those in flight,
+        * on the active list, again in retirement order.
         *
         * The retirement sequence is thus:
         *   1. Inactive objects (already retired)
-        *   2. Clean active objects
-        *   3. Flushing list
-        *   4. Dirty active objects.
+        *   2. Active objects (will stall on unbinding)
         *
         * On each list, the oldest objects lie at the HEAD with the freshest
         * object on the TAIL.
         */
-
-       INIT_LIST_HEAD(&unwind_list);
        if (start != 0 || end != vm->total) {
                drm_mm_init_scan_with_range(&vm->mm, min_size,
                                            alignment, cache_level,
@@ -145,96 +124,86 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
        } else
                drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
-search_again:
-       /* First see if there is a large enough contiguous idle region... */
-       list_for_each_entry(vma, &vm->inactive_list, vm_link) {
-               if (mark_free(vma, &unwind_list))
-                       goto found;
-       }
-
        if (flags & PIN_NONBLOCK)
-               goto none;
+               phases[1] = NULL;
 
-       /* Now merge in the soon-to-be-expired objects... */
-       list_for_each_entry(vma, &vm->active_list, vm_link) {
-               if (mark_free(vma, &unwind_list))
-                       goto found;
-       }
+search_again:
+       INIT_LIST_HEAD(&eviction_list);
+       phase = phases;
+       do {
+               list_for_each_entry(vma, *phase, vm_link)
+                       if (mark_free(vma, flags, &eviction_list))
+                               goto found;
+       } while (*++phase);
 
-none:
        /* Nothing found, clean up and bail out! */
-       while (!list_empty(&unwind_list)) {
-               vma = list_first_entry(&unwind_list,
-                                      struct i915_vma,
-                                      exec_list);
+       list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
                ret = drm_mm_scan_remove_block(&vma->node);
                BUG_ON(ret);
 
-               list_del_init(&vma->exec_list);
+               INIT_LIST_HEAD(&vma->exec_list);
        }
 
        /* Can we unpin some objects such as idle hw contents,
-        * or pending flips?
+        * or pending flips? But since only the GGTT has global entries
+        * such as scanouts, rinbuffers and contexts, we can skip the
+        * purge when inspecting per-process local address spaces.
         */
-       if (flags & PIN_NONBLOCK)
+       if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
                return -ENOSPC;
 
-       /* Only idle the GPU and repeat the search once */
-       if (pass++ == 0) {
-               struct drm_i915_private *dev_priv = to_i915(dev);
-
-               if (i915_is_ggtt(vm)) {
-                       ret = switch_to_pinned_context(dev_priv);
-                       if (ret)
-                               return ret;
-               }
-
-               ret = i915_gem_wait_for_idle(dev_priv);
-               if (ret)
-                       return ret;
-
-               i915_gem_retire_requests(dev_priv);
-               goto search_again;
+       if (gpu_is_idle(dev_priv)) {
+               /* If we still have pending pageflip completions, drop
+                * back to userspace to give our workqueues time to
+                * acquire our locks and unpin the old scanouts.
+                */
+               return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
        }
 
-       /* If we still have pending pageflip completions, drop
-        * back to userspace to give our workqueues time to
-        * acquire our locks and unpin the old scanouts.
+       /* Not everything in the GGTT is tracked via vma (otherwise we
+        * could evict as required with minimal stalling) so we are forced
+        * to idle the GPU and explicitly retire outstanding requests in
+        * the hopes that we can then remove contexts and the like only
+        * bound by their active reference.
         */
-       return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+       ret = i915_gem_switch_to_kernel_context(dev_priv);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
+       if (ret)
+               return ret;
+
+       i915_gem_retire_requests(dev_priv);
+       goto search_again;
 
 found:
        /* drm_mm doesn't allow any other other operations while
-        * scanning, therefore store to be evicted objects on a
-        * temporary list. */
-       INIT_LIST_HEAD(&eviction_list);
-       while (!list_empty(&unwind_list)) {
-               vma = list_first_entry(&unwind_list,
-                                      struct i915_vma,
-                                      exec_list);
-               if (drm_mm_scan_remove_block(&vma->node)) {
-                       list_move(&vma->exec_list, &eviction_list);
-                       drm_gem_object_reference(&vma->obj->base);
-                       continue;
-               }
-               list_del_init(&vma->exec_list);
+        * scanning, therefore store to-be-evicted objects on a
+        * temporary list and take a reference for all before
+        * calling unbind (which may remove the active reference
+        * of any of our objects, thus corrupting the list).
+        */
+       list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+               if (drm_mm_scan_remove_block(&vma->node))
+                       __i915_vma_pin(vma);
+               else
+                       list_del_init(&vma->exec_list);
        }
 
        /* Unbinding will emit any required flushes */
        while (!list_empty(&eviction_list)) {
-               struct drm_gem_object *obj;
                vma = list_first_entry(&eviction_list,
                                       struct i915_vma,
                                       exec_list);
 
-               obj =  &vma->obj->base;
                list_del_init(&vma->exec_list);
+               __i915_vma_unpin(vma);
                if (ret == 0)
                        ret = i915_vma_unbind(vma);
-
-               drm_gem_object_unreference(obj);
        }
-
        return ret;
 }
 
@@ -256,8 +225,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
 
                vma = container_of(node, typeof(*vma), node);
 
-               if (vma->pin_count) {
-                       if (!vma->exec_entry || (vma->pin_count > 1))
+               if (i915_vma_is_pinned(vma)) {
+                       if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
                                /* Object is pinned for some other use */
                                return -EBUSY;
 
@@ -303,22 +272,23 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
                struct drm_i915_private *dev_priv = to_i915(vm->dev);
 
                if (i915_is_ggtt(vm)) {
-                       ret = switch_to_pinned_context(dev_priv);
+                       ret = i915_gem_switch_to_kernel_context(dev_priv);
                        if (ret)
                                return ret;
                }
 
-               ret = i915_gem_wait_for_idle(dev_priv);
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE |
+                                            I915_WAIT_LOCKED);
                if (ret)
                        return ret;
 
                i915_gem_retire_requests(dev_priv);
-
                WARN_ON(!list_empty(&vm->active_list));
        }
 
        list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
-               if (vma->pin_count == 0)
+               if (!i915_vma_is_pinned(vma))
                        WARN_ON(i915_vma_unbind(vma));
 
        return 0;
index 1978633e7549ad4e7c7fd0f0d4eacfe4fce229c9..33c85227643dbc04cc377ea374146ecd6e3804b5 100644 (file)
  *
  */
 
+#include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/uaccess.h>
+
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
+
 #include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
-#include <linux/dma_remapping.h>
-#include <linux/uaccess.h>
+#include "intel_frontbuffer.h"
+
+#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
 
-#define  __EXEC_OBJECT_HAS_PIN (1<<31)
-#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
-#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
-#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+#define  __EXEC_OBJECT_HAS_PIN         (1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE       (1<<30)
+#define  __EXEC_OBJECT_NEEDS_MAP       (1<<29)
+#define  __EXEC_OBJECT_NEEDS_BIAS      (1<<28)
+#define  __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
 
 #define BATCH_OFFSET_BIAS (256*1024)
 
+struct i915_execbuffer_params {
+       struct drm_device               *dev;
+       struct drm_file                 *file;
+       struct i915_vma                 *batch;
+       u32                             dispatch_flags;
+       u32                             args_batch_start_offset;
+       struct intel_engine_cs          *engine;
+       struct i915_gem_context         *ctx;
+       struct drm_i915_gem_request     *request;
+};
+
 struct eb_vmas {
+       struct drm_i915_private *i915;
        struct list_head vmas;
        int and;
        union {
@@ -51,7 +71,8 @@ struct eb_vmas {
 };
 
 static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+eb_create(struct drm_i915_private *i915,
+         struct drm_i915_gem_execbuffer2 *args)
 {
        struct eb_vmas *eb = NULL;
 
@@ -78,6 +99,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
        } else
                eb->and = -args->buffer_count;
 
+       eb->i915 = i915;
        INIT_LIST_HEAD(&eb->vmas);
        return eb;
 }
@@ -89,6 +111,26 @@ eb_reset(struct eb_vmas *eb)
                memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
+static struct i915_vma *
+eb_get_batch(struct eb_vmas *eb)
+{
+       struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+       /*
+        * SNA is doing fancy tricks with compressing batch buffers, which leads
+        * to negative relocation deltas. Usually that works out ok since the
+        * relocate address is still positive, except when the batch is placed
+        * very low in the GTT. Ensure this doesn't happen.
+        *
+        * Note that actual hangs have only been observed on gen7, but for
+        * paranoia do it everywhere.
+        */
+       if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
+               vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+       return vma;
+}
+
 static int
 eb_lookup_vmas(struct eb_vmas *eb,
               struct drm_i915_gem_exec_object2 *exec,
@@ -122,7 +164,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
                        goto err;
                }
 
-               drm_gem_object_reference(&obj->base);
+               i915_gem_object_get(obj);
                list_add_tail(&obj->obj_exec_link, &objects);
        }
        spin_unlock(&file->table_lock);
@@ -143,8 +185,8 @@ eb_lookup_vmas(struct eb_vmas *eb,
                 * from the (obj, vm) we don't run the risk of creating
                 * duplicated vmas for the same vm.
                 */
-               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
-               if (IS_ERR(vma)) {
+               vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+               if (unlikely(IS_ERR(vma))) {
                        DRM_DEBUG("Failed to lookup VMA\n");
                        ret = PTR_ERR(vma);
                        goto err;
@@ -175,7 +217,7 @@ err:
                                       struct drm_i915_gem_object,
                                       obj_exec_link);
                list_del_init(&obj->obj_exec_link);
-               drm_gem_object_unreference(&obj->base);
+               i915_gem_object_put(obj);
        }
        /*
         * Objects already transfered to the vmas list will be unreferenced by
@@ -208,7 +250,6 @@ static void
 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry;
-       struct drm_i915_gem_object *obj = vma->obj;
 
        if (!drm_mm_node_allocated(&vma->node))
                return;
@@ -216,10 +257,10 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
        entry = vma->exec_entry;
 
        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
-               i915_gem_object_unpin_fence(obj);
+               i915_vma_unpin_fence(vma);
 
        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-               vma->pin_count--;
+               __i915_vma_unpin(vma);
 
        entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
@@ -234,13 +275,19 @@ static void eb_destroy(struct eb_vmas *eb)
                                       exec_list);
                list_del_init(&vma->exec_list);
                i915_gem_execbuffer_unreserve_vma(vma);
-               drm_gem_object_unreference(&vma->obj->base);
+               i915_vma_put(vma);
        }
        kfree(eb);
 }
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
+       if (!i915_gem_object_has_struct_page(obj))
+               return false;
+
+       if (DBG_USE_CPU_RELOC)
+               return DBG_USE_CPU_RELOC > 0;
+
        return (HAS_LLC(obj->base.dev) ||
                obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
                obj->cache_level != I915_CACHE_NONE);
@@ -265,144 +312,265 @@ static inline uint64_t gen8_noncanonical_addr(uint64_t address)
 }
 
 static inline uint64_t
-relocation_target(struct drm_i915_gem_relocation_entry *reloc,
+relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
                  uint64_t target_offset)
 {
        return gen8_canonical_addr((int)reloc->delta + target_offset);
 }
 
-static int
-relocate_entry_cpu(struct drm_i915_gem_object *obj,
-                  struct drm_i915_gem_relocation_entry *reloc,
-                  uint64_t target_offset)
+struct reloc_cache {
+       struct drm_i915_private *i915;
+       struct drm_mm_node node;
+       unsigned long vaddr;
+       unsigned int page;
+       bool use_64bit_reloc;
+};
+
+static void reloc_cache_init(struct reloc_cache *cache,
+                            struct drm_i915_private *i915)
 {
-       struct drm_device *dev = obj->base.dev;
-       uint32_t page_offset = offset_in_page(reloc->offset);
-       uint64_t delta = relocation_target(reloc, target_offset);
-       char *vaddr;
-       int ret;
+       cache->page = -1;
+       cache->vaddr = 0;
+       cache->i915 = i915;
+       cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+       cache->node.allocated = false;
+}
 
-       ret = i915_gem_object_set_to_cpu_domain(obj, true);
-       if (ret)
-               return ret;
+static inline void *unmask_page(unsigned long p)
+{
+       return (void *)(uintptr_t)(p & PAGE_MASK);
+}
 
-       vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
-                               reloc->offset >> PAGE_SHIFT));
-       *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
+static inline unsigned int unmask_flags(unsigned long p)
+{
+       return p & ~PAGE_MASK;
+}
+
+#define KMAP 0x4 /* after CLFLUSH_FLAGS */
+
+static void reloc_cache_fini(struct reloc_cache *cache)
+{
+       void *vaddr;
+
+       if (!cache->vaddr)
+               return;
 
-       if (INTEL_INFO(dev)->gen >= 8) {
-               page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+       vaddr = unmask_page(cache->vaddr);
+       if (cache->vaddr & KMAP) {
+               if (cache->vaddr & CLFLUSH_AFTER)
+                       mb();
 
-               if (page_offset == 0) {
-                       kunmap_atomic(vaddr);
-                       vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
-                           (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+               kunmap_atomic(vaddr);
+               i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
+       } else {
+               wmb();
+               io_mapping_unmap_atomic((void __iomem *)vaddr);
+               if (cache->node.allocated) {
+                       struct i915_ggtt *ggtt = &cache->i915->ggtt;
+
+                       ggtt->base.clear_range(&ggtt->base,
+                                              cache->node.start,
+                                              cache->node.size,
+                                              true);
+                       drm_mm_remove_node(&cache->node);
+               } else {
+                       i915_vma_unpin((struct i915_vma *)cache->node.mm);
                }
+       }
+}
+
+static void *reloc_kmap(struct drm_i915_gem_object *obj,
+                       struct reloc_cache *cache,
+                       int page)
+{
+       void *vaddr;
+
+       if (cache->vaddr) {
+               kunmap_atomic(unmask_page(cache->vaddr));
+       } else {
+               unsigned int flushes;
+               int ret;
 
-               *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
+               ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
+               if (ret)
+                       return ERR_PTR(ret);
+
+               BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
+               BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
+
+               cache->vaddr = flushes | KMAP;
+               cache->node.mm = (void *)obj;
+               if (flushes)
+                       mb();
        }
 
-       kunmap_atomic(vaddr);
+       vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
+       cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
+       cache->page = page;
 
-       return 0;
+       return vaddr;
 }
 
-static int
-relocate_entry_gtt(struct drm_i915_gem_object *obj,
-                  struct drm_i915_gem_relocation_entry *reloc,
-                  uint64_t target_offset)
+static void *reloc_iomap(struct drm_i915_gem_object *obj,
+                        struct reloc_cache *cache,
+                        int page)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       uint64_t delta = relocation_target(reloc, target_offset);
-       uint64_t offset;
-       void __iomem *reloc_page;
-       int ret;
+       struct i915_ggtt *ggtt = &cache->i915->ggtt;
+       unsigned long offset;
+       void *vaddr;
 
-       ret = i915_gem_object_set_to_gtt_domain(obj, true);
-       if (ret)
-               return ret;
+       if (cache->node.allocated) {
+               wmb();
+               ggtt->base.insert_page(&ggtt->base,
+                                      i915_gem_object_get_dma_address(obj, page),
+                                      cache->node.start, I915_CACHE_NONE, 0);
+               cache->page = page;
+               return unmask_page(cache->vaddr);
+       }
 
-       ret = i915_gem_object_put_fence(obj);
-       if (ret)
-               return ret;
+       if (cache->vaddr) {
+               io_mapping_unmap_atomic(unmask_page(cache->vaddr));
+       } else {
+               struct i915_vma *vma;
+               int ret;
 
-       /* Map the page containing the relocation we're going to perform.  */
-       offset = i915_gem_obj_ggtt_offset(obj);
-       offset += reloc->offset;
-       reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
-                                             offset & PAGE_MASK);
-       iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
-
-       if (INTEL_INFO(dev)->gen >= 8) {
-               offset += sizeof(uint32_t);
-
-               if (offset_in_page(offset) == 0) {
-                       io_mapping_unmap_atomic(reloc_page);
-                       reloc_page =
-                               io_mapping_map_atomic_wc(ggtt->mappable,
-                                                        offset);
+               if (use_cpu_reloc(obj))
+                       return NULL;
+
+               ret = i915_gem_object_set_to_gtt_domain(obj, true);
+               if (ret)
+                       return ERR_PTR(ret);
+
+               vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+                                              PIN_MAPPABLE | PIN_NONBLOCK);
+               if (IS_ERR(vma)) {
+                       memset(&cache->node, 0, sizeof(cache->node));
+                       ret = drm_mm_insert_node_in_range_generic
+                               (&ggtt->base.mm, &cache->node,
+                                4096, 0, 0,
+                                0, ggtt->mappable_end,
+                                DRM_MM_SEARCH_DEFAULT,
+                                DRM_MM_CREATE_DEFAULT);
+                       if (ret)
+                               return ERR_PTR(ret);
+               } else {
+                       ret = i915_vma_put_fence(vma);
+                       if (ret) {
+                               i915_vma_unpin(vma);
+                               return ERR_PTR(ret);
+                       }
+
+                       cache->node.start = vma->node.start;
+                       cache->node.mm = (void *)vma;
                }
+       }
 
-               iowrite32(upper_32_bits(delta),
-                         reloc_page + offset_in_page(offset));
+       offset = cache->node.start;
+       if (cache->node.allocated) {
+               ggtt->base.insert_page(&ggtt->base,
+                                      i915_gem_object_get_dma_address(obj, page),
+                                      offset, I915_CACHE_NONE, 0);
+       } else {
+               offset += page << PAGE_SHIFT;
        }
 
-       io_mapping_unmap_atomic(reloc_page);
+       vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+       cache->page = page;
+       cache->vaddr = (unsigned long)vaddr;
 
-       return 0;
+       return vaddr;
 }
 
-static void
-clflush_write32(void *addr, uint32_t value)
+static void *reloc_vaddr(struct drm_i915_gem_object *obj,
+                        struct reloc_cache *cache,
+                        int page)
 {
-       /* This is not a fast path, so KISS. */
-       drm_clflush_virt_range(addr, sizeof(uint32_t));
-       *(uint32_t *)addr = value;
-       drm_clflush_virt_range(addr, sizeof(uint32_t));
+       void *vaddr;
+
+       if (cache->page == page) {
+               vaddr = unmask_page(cache->vaddr);
+       } else {
+               vaddr = NULL;
+               if ((cache->vaddr & KMAP) == 0)
+                       vaddr = reloc_iomap(obj, cache, page);
+               if (!vaddr)
+                       vaddr = reloc_kmap(obj, cache, page);
+       }
+
+       return vaddr;
 }
 
-static int
-relocate_entry_clflush(struct drm_i915_gem_object *obj,
-                      struct drm_i915_gem_relocation_entry *reloc,
-                      uint64_t target_offset)
+static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
 {
-       struct drm_device *dev = obj->base.dev;
-       uint32_t page_offset = offset_in_page(reloc->offset);
-       uint64_t delta = relocation_target(reloc, target_offset);
-       char *vaddr;
-       int ret;
+       if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
+               if (flushes & CLFLUSH_BEFORE) {
+                       clflushopt(addr);
+                       mb();
+               }
 
-       ret = i915_gem_object_set_to_gtt_domain(obj, true);
-       if (ret)
-               return ret;
+               *addr = value;
 
-       vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
-                               reloc->offset >> PAGE_SHIFT));
-       clflush_write32(vaddr + page_offset, lower_32_bits(delta));
+               /* Writes to the same cacheline are serialised by the CPU
+                * (including clflush). On the write path, we only require
+                * that it hits memory in an orderly fashion and place
+                * mb barriers at the start and end of the relocation phase
+                * to ensure ordering of clflush wrt to the system.
+                */
+               if (flushes & CLFLUSH_AFTER)
+                       clflushopt(addr);
+       } else
+               *addr = value;
+}
 
-       if (INTEL_INFO(dev)->gen >= 8) {
-               page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+static int
+relocate_entry(struct drm_i915_gem_object *obj,
+              const struct drm_i915_gem_relocation_entry *reloc,
+              struct reloc_cache *cache,
+              u64 target_offset)
+{
+       u64 offset = reloc->offset;
+       bool wide = cache->use_64bit_reloc;
+       void *vaddr;
+
+       target_offset = relocation_target(reloc, target_offset);
+repeat:
+       vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+
+       clflush_write32(vaddr + offset_in_page(offset),
+                       lower_32_bits(target_offset),
+                       cache->vaddr);
+
+       if (wide) {
+               offset += sizeof(u32);
+               target_offset >>= 32;
+               wide = false;
+               goto repeat;
+       }
 
-               if (page_offset == 0) {
-                       kunmap_atomic(vaddr);
-                       vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
-                           (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
-               }
+       return 0;
+}
 
-               clflush_write32(vaddr + page_offset, upper_32_bits(delta));
-       }
+static bool object_is_idle(struct drm_i915_gem_object *obj)
+{
+       unsigned long active = i915_gem_object_get_active(obj);
+       int idx;
 
-       kunmap_atomic(vaddr);
+       for_each_active(active, idx) {
+               if (!i915_gem_active_is_idle(&obj->last_read[idx],
+                                            &obj->base.dev->struct_mutex))
+                       return false;
+       }
 
-       return 0;
+       return true;
 }
 
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                                   struct eb_vmas *eb,
-                                  struct drm_i915_gem_relocation_entry *reloc)
+                                  struct drm_i915_gem_relocation_entry *reloc,
+                                  struct reloc_cache *cache)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_gem_object *target_obj;
@@ -465,7 +633,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 
        /* Check that the relocation address is valid... */
        if (unlikely(reloc->offset >
-               obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+                    obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
                DRM_DEBUG("Relocation beyond object bounds: "
                          "obj %p target %d offset %d size %d.\n",
                          obj, reloc->target_handle,
@@ -482,26 +650,15 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
        }
 
        /* We can't wait for rendering with pagefaults disabled */
-       if (obj->active && pagefault_disabled())
+       if (pagefault_disabled() && !object_is_idle(obj))
                return -EFAULT;
 
-       if (use_cpu_reloc(obj))
-               ret = relocate_entry_cpu(obj, reloc, target_offset);
-       else if (obj->map_and_fenceable)
-               ret = relocate_entry_gtt(obj, reloc, target_offset);
-       else if (static_cpu_has(X86_FEATURE_CLFLUSH))
-               ret = relocate_entry_clflush(obj, reloc, target_offset);
-       else {
-               WARN_ONCE(1, "Impossible case in relocation handling\n");
-               ret = -ENODEV;
-       }
-
+       ret = relocate_entry(obj, reloc, cache, target_offset);
        if (ret)
                return ret;
 
        /* and update the user's relocation entry */
        reloc->presumed_offset = target_offset;
-
        return 0;
 }
 
@@ -513,9 +670,11 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
        struct drm_i915_gem_relocation_entry __user *user_relocs;
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-       int remain, ret;
+       struct reloc_cache cache;
+       int remain, ret = 0;
 
        user_relocs = u64_to_user_ptr(entry->relocs_ptr);
+       reloc_cache_init(&cache, eb->i915);
 
        remain = entry->relocation_count;
        while (remain) {
@@ -525,19 +684,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
                        count = ARRAY_SIZE(stack_reloc);
                remain -= count;
 
-               if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
-                       return -EFAULT;
+               if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
+                       ret = -EFAULT;
+                       goto out;
+               }
 
                do {
                        u64 offset = r->presumed_offset;
 
-                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
+                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
                        if (ret)
-                               return ret;
+                               goto out;
 
                        if (r->presumed_offset != offset &&
-                           __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
-                               return -EFAULT;
+                           __put_user(r->presumed_offset,
+                                      &user_relocs->presumed_offset)) {
+                               ret = -EFAULT;
+                               goto out;
                        }
 
                        user_relocs++;
@@ -545,7 +708,9 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
                } while (--count);
        }
 
-       return 0;
+out:
+       reloc_cache_fini(&cache);
+       return ret;
 #undef N_RELOC
 }
 
@@ -555,15 +720,18 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
                                      struct drm_i915_gem_relocation_entry *relocs)
 {
        const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-       int i, ret;
+       struct reloc_cache cache;
+       int i, ret = 0;
 
+       reloc_cache_init(&cache, eb->i915);
        for (i = 0; i < entry->relocation_count; i++) {
-               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
+               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
                if (ret)
-                       return ret;
+                       break;
        }
+       reloc_cache_fini(&cache);
 
-       return 0;
+       return ret;
 }
 
 static int
@@ -626,23 +794,27 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                        flags |= PIN_HIGH;
        }
 
-       ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
-       if ((ret == -ENOSPC  || ret == -E2BIG) &&
+       ret = i915_vma_pin(vma,
+                          entry->pad_to_size,
+                          entry->alignment,
+                          flags);
+       if ((ret == -ENOSPC || ret == -E2BIG) &&
            only_mappable_for_reloc(entry->flags))
-               ret = i915_gem_object_pin(obj, vma->vm,
-                                         entry->alignment,
-                                         flags & ~PIN_MAPPABLE);
+               ret = i915_vma_pin(vma,
+                                  entry->pad_to_size,
+                                  entry->alignment,
+                                  flags & ~PIN_MAPPABLE);
        if (ret)
                return ret;
 
        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
        if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-               ret = i915_gem_object_get_fence(obj);
+               ret = i915_vma_get_fence(vma);
                if (ret)
                        return ret;
 
-               if (i915_gem_object_pin_fence(obj))
+               if (i915_vma_pin_fence(vma))
                        entry->flags |= __EXEC_OBJECT_HAS_FENCE;
        }
 
@@ -667,7 +839,7 @@ need_reloc_mappable(struct i915_vma *vma)
        if (entry->relocation_count == 0)
                return false;
 
-       if (!vma->is_ggtt)
+       if (!i915_vma_is_ggtt(vma))
                return false;
 
        /* See also use_cpu_reloc() */
@@ -684,14 +856,17 @@ static bool
 eb_vma_misplaced(struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-       struct drm_i915_gem_object *obj = vma->obj;
 
-       WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
+       WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+               !i915_vma_is_ggtt(vma));
 
        if (entry->alignment &&
            vma->node.start & (entry->alignment - 1))
                return true;
 
+       if (vma->node.size < entry->pad_to_size)
+               return true;
+
        if (entry->flags & EXEC_OBJECT_PINNED &&
            vma->node.start != entry->offset)
                return true;
@@ -701,7 +876,8 @@ eb_vma_misplaced(struct i915_vma *vma)
                return true;
 
        /* avoid costly ping-pong once a batch bo ended up non-mappable */
-       if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+       if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+           !i915_vma_is_map_and_fenceable(vma))
                return !only_mappable_for_reloc(entry->flags);
 
        if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
@@ -725,8 +901,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
        bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
        int retry;
 
-       i915_gem_retire_requests_ring(engine);
-
        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
        INIT_LIST_HEAD(&ordered_vmas);
@@ -746,7 +920,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
                need_fence =
                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-                       obj->tiling_mode != I915_TILING_NONE;
+                       i915_gem_object_is_tiled(obj);
                need_mappable = need_fence || need_reloc_mappable(vma);
 
                if (entry->flags & EXEC_OBJECT_PINNED)
@@ -843,7 +1017,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
                list_del_init(&vma->exec_list);
                i915_gem_execbuffer_unreserve_vma(vma);
-               drm_gem_object_unreference(&vma->obj->base);
+               i915_vma_put(vma);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -937,41 +1111,54 @@ err:
        return ret;
 }
 
+static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
+{
+       unsigned int mask;
+
+       mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
+       mask <<= I915_BO_ACTIVE_SHIFT;
+
+       return mask;
+}
+
 static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
                                struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_engine_flag(req->engine);
+       const unsigned int other_rings = eb_other_engines(req);
        struct i915_vma *vma;
-       uint32_t flush_domains = 0;
-       bool flush_chipset = false;
        int ret;
 
        list_for_each_entry(vma, vmas, exec_list) {
                struct drm_i915_gem_object *obj = vma->obj;
+               struct reservation_object *resv;
 
-               if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, req->engine, &req);
+               if (obj->flags & other_rings) {
+                       ret = i915_gem_request_await_object
+                               (req, obj, obj->base.pending_write_domain);
                        if (ret)
                                return ret;
                }
 
-               if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
-                       flush_chipset |= i915_gem_clflush_object(obj, false);
+               resv = i915_gem_object_get_dmabuf_resv(obj);
+               if (resv) {
+                       ret = i915_sw_fence_await_reservation
+                               (&req->submit, resv, &i915_fence_ops,
+                                obj->base.pending_write_domain, 10*HZ,
+                                GFP_KERNEL | __GFP_NOWARN);
+                       if (ret < 0)
+                               return ret;
+               }
 
-               flush_domains |= obj->base.write_domain;
+               if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+                       i915_gem_clflush_object(obj, false);
        }
 
-       if (flush_chipset)
-               i915_gem_chipset_flush(req->engine->i915);
-
-       if (flush_domains & I915_GEM_DOMAIN_GTT)
-               wmb();
+       /* Unconditionally flush any chipset caches (for streaming writes). */
+       i915_gem_chipset_flush(req->engine->i915);
 
-       /* Unconditionally invalidate gpu caches and ensure that we do flush
-        * any residual writes from the previous batch.
-        */
-       return intel_ring_invalidate_all_caches(req);
+       /* Unconditionally invalidate GPU caches and TLBs. */
+       return req->engine->emit_flush(req, EMIT_INVALIDATE);
 }
 
 static bool
@@ -1007,6 +1194,9 @@ validate_exec_list(struct drm_device *dev,
        unsigned invalid_flags;
        int i;
 
+       /* INTERNAL flags must not overlap with external ones */
+       BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
+
        invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
        if (USES_FULL_PPGTT(dev))
                invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
@@ -1036,6 +1226,14 @@ validate_exec_list(struct drm_device *dev,
                if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
                        return -EINVAL;
 
+               /* pad_to_size was once a reserved field, so sanitize it */
+               if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
+                       if (offset_in_page(exec[i].pad_to_size))
+                               return -EINVAL;
+               } else {
+                       exec[i].pad_to_size = 0;
+               }
+
                /* First check for malicious input causing overflow in
                 * the worst case where we need to allocate the entire
                 * relocation tree as a single array.
@@ -1067,12 +1265,9 @@ static struct i915_gem_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
                          struct intel_engine_cs *engine, const u32 ctx_id)
 {
-       struct i915_gem_context *ctx = NULL;
+       struct i915_gem_context *ctx;
        struct i915_ctx_hang_stats *hs;
 
-       if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
-               return ERR_PTR(-EINVAL);
-
        ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
        if (IS_ERR(ctx))
                return ctx;
@@ -1086,66 +1281,99 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
        return ctx;
 }
 
-void
+void i915_vma_move_to_active(struct i915_vma *vma,
+                            struct drm_i915_gem_request *req,
+                            unsigned int flags)
+{
+       struct drm_i915_gem_object *obj = vma->obj;
+       const unsigned int idx = req->engine->id;
+
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+       obj->dirty = 1; /* be paranoid  */
+
+       /* Add a reference if we're newly entering the active list.
+        * The order in which we add operations to the retirement queue is
+        * vital here: mark_active adds to the start of the callback list,
+        * such that subsequent callbacks are called first. Therefore we
+        * add the active reference first and queue for it to be dropped
+        * *last*.
+        */
+       if (!i915_gem_object_is_active(obj))
+               i915_gem_object_get(obj);
+       i915_gem_object_set_active(obj, idx);
+       i915_gem_active_set(&obj->last_read[idx], req);
+
+       if (flags & EXEC_OBJECT_WRITE) {
+               i915_gem_active_set(&obj->last_write, req);
+
+               intel_fb_obj_invalidate(obj, ORIGIN_CS);
+
+               /* update for the implicit flush after a batch */
+               obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+       }
+
+       if (flags & EXEC_OBJECT_NEEDS_FENCE)
+               i915_gem_active_set(&vma->last_fence, req);
+
+       i915_vma_set_active(vma, idx);
+       i915_gem_active_set(&vma->last_read[idx], req);
+       list_move_tail(&vma->vm_link, &vma->vm->active_list);
+}
+
+static void eb_export_fence(struct drm_i915_gem_object *obj,
+                           struct drm_i915_gem_request *req,
+                           unsigned int flags)
+{
+       struct reservation_object *resv;
+
+       resv = i915_gem_object_get_dmabuf_resv(obj);
+       if (!resv)
+               return;
+
+       /* Ignore errors from failing to allocate the new fence, we can't
+        * handle an error right now. Worst case should be missed
+        * synchronisation leading to rendering corruption.
+        */
+       ww_mutex_lock(&resv->lock, NULL);
+       if (flags & EXEC_OBJECT_WRITE)
+               reservation_object_add_excl_fence(resv, &req->fence);
+       else if (reservation_object_reserve_shared(resv) == 0)
+               reservation_object_add_shared_fence(resv, &req->fence);
+       ww_mutex_unlock(&resv->lock);
+}
+
+static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, vmas, exec_list) {
-               struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
                struct drm_i915_gem_object *obj = vma->obj;
                u32 old_read = obj->base.read_domains;
                u32 old_write = obj->base.write_domain;
 
-               obj->dirty = 1; /* be paranoid  */
                obj->base.write_domain = obj->base.pending_write_domain;
-               if (obj->base.write_domain == 0)
+               if (obj->base.write_domain)
+                       vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
+               else
                        obj->base.pending_read_domains |= obj->base.read_domains;
                obj->base.read_domains = obj->base.pending_read_domains;
 
-               i915_vma_move_to_active(vma, req);
-               if (obj->base.write_domain) {
-                       i915_gem_request_assign(&obj->last_write_req, req);
-
-                       intel_fb_obj_invalidate(obj, ORIGIN_CS);
-
-                       /* update for the implicit flush after a batch */
-                       obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
-               }
-               if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-                       i915_gem_request_assign(&obj->last_fenced_req, req);
-                       if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-                               struct drm_i915_private *dev_priv = engine->i915;
-                               list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
-                                              &dev_priv->mm.fence_list);
-                       }
-               }
-
+               i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
+               eb_export_fence(obj, req, vma->exec_entry->flags);
                trace_i915_gem_object_change_domain(obj, old_read, old_write);
        }
 }
 
-static void
-i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
-{
-       /* Unconditionally force add_request to emit a full flush. */
-       params->engine->gpu_caches_dirty = true;
-
-       /* Add a breadcrumb for the completion of the batch buffer */
-       __i915_add_request(params->request, params->batch_obj, true);
-}
-
 static int
-i915_reset_gen7_sol_offsets(struct drm_device *dev,
-                           struct drm_i915_gem_request *req)
+i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_ring *ring = req->ring;
        int ret, i;
 
-       if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
+       if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
                DRM_DEBUG("sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
@@ -1155,21 +1383,21 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
                return ret;
 
        for (i = 0; i < 4; i++) {
-               intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
-               intel_ring_emit(engine, 0);
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
+               intel_ring_emit(ring, 0);
        }
 
-       intel_ring_advance(engine);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
-static struct drm_i915_gem_object*
+static struct i915_vma *
 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
                          struct drm_i915_gem_exec_object2 *shadow_exec_entry,
-                         struct eb_vmas *eb,
                          struct drm_i915_gem_object *batch_obj,
+                         struct eb_vmas *eb,
                          u32 batch_start_offset,
                          u32 batch_len,
                          bool is_master)
@@ -1181,51 +1409,44 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
        shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
                                                   PAGE_ALIGN(batch_len));
        if (IS_ERR(shadow_batch_obj))
-               return shadow_batch_obj;
-
-       ret = i915_parse_cmds(engine,
-                             batch_obj,
-                             shadow_batch_obj,
-                             batch_start_offset,
-                             batch_len,
-                             is_master);
-       if (ret)
-               goto err;
-
-       ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
-       if (ret)
-               goto err;
+               return ERR_CAST(shadow_batch_obj);
+
+       ret = intel_engine_cmd_parser(engine,
+                                     batch_obj,
+                                     shadow_batch_obj,
+                                     batch_start_offset,
+                                     batch_len,
+                                     is_master);
+       if (ret) {
+               if (ret == -EACCES) /* unhandled chained batch */
+                       vma = NULL;
+               else
+                       vma = ERR_PTR(ret);
+               goto out;
+       }
 
-       i915_gem_object_unpin_pages(shadow_batch_obj);
+       vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+       if (IS_ERR(vma))
+               goto out;
 
        memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
 
-       vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
        vma->exec_entry = shadow_exec_entry;
        vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
-       drm_gem_object_reference(&shadow_batch_obj->base);
+       i915_gem_object_get(shadow_batch_obj);
        list_add_tail(&vma->exec_list, &eb->vmas);
 
-       shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
-
-       return shadow_batch_obj;
-
-err:
+out:
        i915_gem_object_unpin_pages(shadow_batch_obj);
-       if (ret == -EACCES) /* unhandled chained batch */
-               return batch_obj;
-       else
-               return ERR_PTR(ret);
+       return vma;
 }
 
-int
-i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
-                              struct drm_i915_gem_execbuffer2 *args,
-                              struct list_head *vmas)
+static int
+execbuf_submit(struct i915_execbuffer_params *params,
+              struct drm_i915_gem_execbuffer2 *args,
+              struct list_head *vmas)
 {
-       struct drm_device *dev = params->dev;
-       struct intel_engine_cs *engine = params->engine;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = params->request->i915;
        u64 exec_start, exec_len;
        int instp_mode;
        u32 instp_mask;
@@ -1239,34 +1460,31 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
        if (ret)
                return ret;
 
-       WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
-            "%s didn't clear reload\n", engine->name);
-
        instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
        instp_mask = I915_EXEC_CONSTANTS_MASK;
        switch (instp_mode) {
        case I915_EXEC_CONSTANTS_REL_GENERAL:
        case I915_EXEC_CONSTANTS_ABSOLUTE:
        case I915_EXEC_CONSTANTS_REL_SURFACE:
-               if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
+               if (instp_mode != 0 && params->engine->id != RCS) {
                        DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
                        return -EINVAL;
                }
 
                if (instp_mode != dev_priv->relative_constants_mode) {
-                       if (INTEL_INFO(dev)->gen < 4) {
+                       if (INTEL_INFO(dev_priv)->gen < 4) {
                                DRM_DEBUG("no rel constants on pre-gen4\n");
                                return -EINVAL;
                        }
 
-                       if (INTEL_INFO(dev)->gen > 5 &&
+                       if (INTEL_INFO(dev_priv)->gen > 5 &&
                            instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
                                DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
                                return -EINVAL;
                        }
 
                        /* The HW changed the meaning on this bit on gen6 */
-                       if (INTEL_INFO(dev)->gen >= 6)
+                       if (INTEL_INFO(dev_priv)->gen >= 6)
                                instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
                }
                break;
@@ -1275,37 +1493,39 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
                return -EINVAL;
        }
 
-       if (engine == &dev_priv->engine[RCS] &&
+       if (params->engine->id == RCS &&
            instp_mode != dev_priv->relative_constants_mode) {
+               struct intel_ring *ring = params->request->ring;
+
                ret = intel_ring_begin(params->request, 4);
                if (ret)
                        return ret;
 
-               intel_ring_emit(engine, MI_NOOP);
-               intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(engine, INSTPM);
-               intel_ring_emit(engine, instp_mask << 16 | instp_mode);
-               intel_ring_advance(engine);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit_reg(ring, INSTPM);
+               intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+               intel_ring_advance(ring);
 
                dev_priv->relative_constants_mode = instp_mode;
        }
 
        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-               ret = i915_reset_gen7_sol_offsets(dev, params->request);
+               ret = i915_reset_gen7_sol_offsets(params->request);
                if (ret)
                        return ret;
        }
 
        exec_len   = args->batch_len;
-       exec_start = params->batch_obj_vm_offset +
+       exec_start = params->batch->node.start +
                     params->args_batch_start_offset;
 
        if (exec_len == 0)
-               exec_len = params->batch_obj->base.size;
+               exec_len = params->batch->size - params->args_batch_start_offset;
 
-       ret = engine->dispatch_execbuffer(params->request,
-                                       exec_start, exec_len,
-                                       params->dispatch_flags);
+       ret = params->engine->emit_bb_start(params->request,
+                                           exec_start, exec_len,
+                                           params->dispatch_flags);
        if (ret)
                return ret;
 
@@ -1318,43 +1538,20 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 /**
  * Find one BSD ring to dispatch the corresponding BSD command.
- * The ring index is returned.
+ * The engine index is returned.
  */
 static unsigned int
-gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
+gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+                        struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
        /* Check whether the file_priv has already selected one ring. */
-       if ((int)file_priv->bsd_ring < 0) {
-               /* If not, use the ping-pong mechanism to select one. */
-               mutex_lock(&dev_priv->drm.struct_mutex);
-               file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
-               dev_priv->mm.bsd_ring_dispatch_index ^= 1;
-               mutex_unlock(&dev_priv->drm.struct_mutex);
-       }
+       if ((int)file_priv->bsd_engine < 0)
+               file_priv->bsd_engine = atomic_fetch_xor(1,
+                        &dev_priv->mm.bsd_engine_dispatch_index);
 
-       return file_priv->bsd_ring;
-}
-
-static struct drm_i915_gem_object *
-eb_get_batch(struct eb_vmas *eb)
-{
-       struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
-
-       /*
-        * SNA is doing fancy tricks with compressing batch buffers, which leads
-        * to negative relocation deltas. Usually that works out ok since the
-        * relocate address is still positive, except when the batch is placed
-        * very low in the GTT. Ensure this doesn't happen.
-        *
-        * Note that actual hangs have only been observed on gen7, but for
-        * paranoia do it everywhere.
-        */
-       if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
-               vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
-
-       return vma->obj;
+       return file_priv->bsd_engine;
 }
 
 #define I915_USER_RINGS (4)
@@ -1367,31 +1564,31 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
        [I915_EXEC_VEBOX]       = VECS
 };
 
-static int
-eb_select_ring(struct drm_i915_private *dev_priv,
-              struct drm_file *file,
-              struct drm_i915_gem_execbuffer2 *args,
-              struct intel_engine_cs **ring)
+static struct intel_engine_cs *
+eb_select_engine(struct drm_i915_private *dev_priv,
+                struct drm_file *file,
+                struct drm_i915_gem_execbuffer2 *args)
 {
        unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+       struct intel_engine_cs *engine;
 
        if (user_ring_id > I915_USER_RINGS) {
                DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
-               return -EINVAL;
+               return NULL;
        }
 
        if ((user_ring_id != I915_EXEC_BSD) &&
            ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
                DRM_DEBUG("execbuf with non bsd ring but with invalid "
                          "bsd dispatch flags: %d\n", (int)(args->flags));
-               return -EINVAL;
+               return NULL;
        }
 
        if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
                unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
 
                if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
-                       bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+                       bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
                } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
                           bsd_idx <= I915_EXEC_BSD_RING2) {
                        bsd_idx >>= I915_EXEC_BSD_SHIFT;
@@ -1399,20 +1596,20 @@ eb_select_ring(struct drm_i915_private *dev_priv,
                } else {
                        DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
                                  bsd_idx);
-                       return -EINVAL;
+                       return NULL;
                }
 
-               *ring = &dev_priv->engine[_VCS(bsd_idx)];
+               engine = &dev_priv->engine[_VCS(bsd_idx)];
        } else {
-               *ring = &dev_priv->engine[user_ring_map[user_ring_id]];
+               engine = &dev_priv->engine[user_ring_map[user_ring_id]];
        }
 
-       if (!intel_engine_initialized(*ring)) {
+       if (!intel_engine_initialized(engine)) {
                DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
-               return -EINVAL;
+               return NULL;
        }
 
-       return 0;
+       return engine;
 }
 
 static int
@@ -1423,9 +1620,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct drm_i915_gem_request *req = NULL;
        struct eb_vmas *eb;
-       struct drm_i915_gem_object *batch_obj;
        struct drm_i915_gem_exec_object2 shadow_exec_entry;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
@@ -1454,9 +1649,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (args->flags & I915_EXEC_IS_PINNED)
                dispatch_flags |= I915_DISPATCH_PINNED;
 
-       ret = eb_select_ring(dev_priv, file, args, &engine);
-       if (ret)
-               return ret;
+       engine = eb_select_engine(dev_priv, file, args);
+       if (!engine)
+               return -EINVAL;
 
        if (args->buffer_count < 1) {
                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
@@ -1496,7 +1691,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
-       i915_gem_context_reference(ctx);
+       i915_gem_context_get(ctx);
 
        if (ctx->ppgtt)
                vm = &ctx->ppgtt->base;
@@ -1505,9 +1700,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        memset(&params_master, 0x00, sizeof(params_master));
 
-       eb = eb_create(args);
+       eb = eb_create(dev_priv, args);
        if (eb == NULL) {
-               i915_gem_context_unreference(ctx);
+               i915_gem_context_put(ctx);
                mutex_unlock(&dev->struct_mutex);
                ret = -ENOMEM;
                goto pre_mutex_err;
@@ -1519,7 +1714,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
 
        /* take note of the batch buffer before we might reorder the lists */
-       batch_obj = eb_get_batch(eb);
+       params->batch = eb_get_batch(eb);
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1543,34 +1738,34 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
 
        /* Set the pending read domains for the batch buffer to COMMAND */
-       if (batch_obj->base.pending_write_domain) {
+       if (params->batch->obj->base.pending_write_domain) {
                DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
                ret = -EINVAL;
                goto err;
        }
+       if (args->batch_start_offset > params->batch->size ||
+           args->batch_len > params->batch->size - args->batch_start_offset) {
+               DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+               ret = -EINVAL;
+               goto err;
+       }
 
        params->args_batch_start_offset = args->batch_start_offset;
-       if (i915_needs_cmd_parser(engine) && args->batch_len) {
-               struct drm_i915_gem_object *parsed_batch_obj;
-
-               parsed_batch_obj = i915_gem_execbuffer_parse(engine,
-                                                            &shadow_exec_entry,
-                                                            eb,
-                                                            batch_obj,
-                                                            args->batch_start_offset,
-                                                            args->batch_len,
-                                                            drm_is_current_master(file));
-               if (IS_ERR(parsed_batch_obj)) {
-                       ret = PTR_ERR(parsed_batch_obj);
+       if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
+               struct i915_vma *vma;
+
+               vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
+                                               params->batch->obj,
+                                               eb,
+                                               args->batch_start_offset,
+                                               args->batch_len,
+                                               drm_is_current_master(file));
+               if (IS_ERR(vma)) {
+                       ret = PTR_ERR(vma);
                        goto err;
                }
 
-               /*
-                * parsed_batch_obj == batch_obj means batch not fully parsed:
-                * Accept, but don't promote to secure.
-                */
-
-               if (parsed_batch_obj != batch_obj) {
+               if (vma) {
                        /*
                         * Batch parsed and accepted:
                         *
@@ -1582,16 +1777,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         */
                        dispatch_flags |= I915_DISPATCH_SECURE;
                        params->args_batch_start_offset = 0;
-                       batch_obj = parsed_batch_obj;
+                       params->batch = vma;
                }
        }
 
-       batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+       params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
        if (dispatch_flags & I915_DISPATCH_SECURE) {
+               struct drm_i915_gem_object *obj = params->batch->obj;
+               struct i915_vma *vma;
+
                /*
                 * So on first glance it looks freaky that we pin the batch here
                 * outside of the reservation loop. But:
@@ -1602,22 +1800,31 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                 *   fitting due to fragmentation.
                 * So this is actually safe.
                 */
-               ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
-               if (ret)
+               vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+               if (IS_ERR(vma)) {
+                       ret = PTR_ERR(vma);
                        goto err;
+               }
 
-               params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
-       } else
-               params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
+               params->batch = vma;
+       }
 
        /* Allocate a request for this batch buffer nice and early. */
-       req = i915_gem_request_alloc(engine, ctx);
-       if (IS_ERR(req)) {
-               ret = PTR_ERR(req);
+       params->request = i915_gem_request_alloc(engine, ctx);
+       if (IS_ERR(params->request)) {
+               ret = PTR_ERR(params->request);
                goto err_batch_unpin;
        }
 
-       ret = i915_gem_request_add_to_client(req, file);
+       /* Whilst this request exists, batch_obj will be on the
+        * active_list, and so will hold the active reference. Only when this
+        * request is retired will the the batch_obj be moved onto the
+        * inactive_list and lose its active reference. Hence we do not need
+        * to explicitly hold another reference here.
+        */
+       params->request->batch = params->batch;
+
+       ret = i915_gem_request_add_to_client(params->request, file);
        if (ret)
                goto err_request;
 
@@ -1631,13 +1838,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        params->file                    = file;
        params->engine                    = engine;
        params->dispatch_flags          = dispatch_flags;
-       params->batch_obj               = batch_obj;
        params->ctx                     = ctx;
-       params->request                 = req;
 
-       ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+       ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
-       i915_gem_execbuffer_retire_commands(params);
+       __i915_add_request(params->request, ret == 0);
 
 err_batch_unpin:
        /*
@@ -1647,11 +1852,10 @@ err_batch_unpin:
         * active.
         */
        if (dispatch_flags & I915_DISPATCH_SECURE)
-               i915_gem_object_ggtt_unpin(batch_obj);
-
+               i915_vma_unpin(params->batch);
 err:
        /* the request owns the ref now */
-       i915_gem_context_unreference(ctx);
+       i915_gem_context_put(ctx);
        eb_destroy(eb);
 
        mutex_unlock(&dev->struct_mutex);
index 251d7a95af891dc5f4071196bc5168769cd74398..8df1fa7234e8e031e9c61da4608c5c4b7610476b 100644 (file)
  * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
  */
 
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
+#define pipelined 0
+
+static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
+                                struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t fence_reg_lo, fence_reg_hi;
        int fence_pitch_shift;
+       u64 val;
 
-       if (INTEL_INFO(dev)->gen >= 6) {
-               fence_reg_lo = FENCE_REG_GEN6_LO(reg);
-               fence_reg_hi = FENCE_REG_GEN6_HI(reg);
+       if (INTEL_INFO(fence->i915)->gen >= 6) {
+               fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
+               fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
                fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
+
        } else {
-               fence_reg_lo = FENCE_REG_965_LO(reg);
-               fence_reg_hi = FENCE_REG_965_HI(reg);
+               fence_reg_lo = FENCE_REG_965_LO(fence->id);
+               fence_reg_hi = FENCE_REG_965_HI(fence->id);
                fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
        }
 
-       /* To w/a incoherency with non-atomic 64-bit register updates,
-        * we split the 64-bit update into two 32-bit writes. In order
-        * for a partial fence not to be evaluated between writes, we
-        * precede the update with write to turn off the fence register,
-        * and only enable the fence as the last step.
-        *
-        * For extra levels of paranoia, we make sure each step lands
-        * before applying the next step.
-        */
-       I915_WRITE(fence_reg_lo, 0);
-       POSTING_READ(fence_reg_lo);
-
-       if (obj) {
-               u32 size = i915_gem_obj_ggtt_size(obj);
-               uint64_t val;
-
-               /* Adjust fence size to match tiled area */
-               if (obj->tiling_mode != I915_TILING_NONE) {
-                       uint32_t row_size = obj->stride *
-                               (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
-                       size = (size / row_size) * row_size;
-               }
-
-               val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
-                                0xfffff000) << 32;
-               val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
-               val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
-               if (obj->tiling_mode == I915_TILING_Y)
-                       val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+       val = 0;
+       if (vma) {
+               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+               bool is_y_tiled = tiling == I915_TILING_Y;
+               unsigned int stride = i915_gem_object_get_stride(vma->obj);
+               u32 row_size = stride * (is_y_tiled ? 32 : 8);
+               u32 size = rounddown((u32)vma->node.size, row_size);
+
+               val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
+               val |= vma->node.start & 0xfffff000;
+               val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
+               if (is_y_tiled)
+                       val |= BIT(I965_FENCE_TILING_Y_SHIFT);
                val |= I965_FENCE_REG_VALID;
+       }
 
-               I915_WRITE(fence_reg_hi, val >> 32);
-               POSTING_READ(fence_reg_hi);
+       if (!pipelined) {
+               struct drm_i915_private *dev_priv = fence->i915;
 
-               I915_WRITE(fence_reg_lo, val);
+               /* To w/a incoherency with non-atomic 64-bit register updates,
+                * we split the 64-bit update into two 32-bit writes. In order
+                * for a partial fence not to be evaluated between writes, we
+                * precede the update with write to turn off the fence register,
+                * and only enable the fence as the last step.
+                *
+                * For extra levels of paranoia, we make sure each step lands
+                * before applying the next step.
+                */
+               I915_WRITE(fence_reg_lo, 0);
+               POSTING_READ(fence_reg_lo);
+
+               I915_WRITE(fence_reg_hi, upper_32_bits(val));
+               I915_WRITE(fence_reg_lo, lower_32_bits(val));
                POSTING_READ(fence_reg_lo);
-       } else {
-               I915_WRITE(fence_reg_hi, 0);
-               POSTING_READ(fence_reg_hi);
        }
 }
 
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
+static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
+                                struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
-       if (obj) {
-               u32 size = i915_gem_obj_ggtt_size(obj);
+       val = 0;
+       if (vma) {
+               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+               bool is_y_tiled = tiling == I915_TILING_Y;
+               unsigned int stride = i915_gem_object_get_stride(vma->obj);
                int pitch_val;
                int tile_width;
 
-               WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
-                    (size & -size) != size ||
-                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-                    "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-                    i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+               WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
+                    !is_power_of_2(vma->node.size) ||
+                    (vma->node.start & (vma->node.size - 1)),
+                    "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
+                    vma->node.start,
+                    i915_vma_is_map_and_fenceable(vma),
+                    vma->node.size);
 
-               if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+               if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
                        tile_width = 128;
                else
                        tile_width = 512;
 
                /* Note: pitch better be a power of two tile widths */
-               pitch_val = obj->stride / tile_width;
+               pitch_val = stride / tile_width;
                pitch_val = ffs(pitch_val) - 1;
 
-               val = i915_gem_obj_ggtt_offset(obj);
-               if (obj->tiling_mode == I915_TILING_Y)
-                       val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-               val |= I915_FENCE_SIZE_BITS(size);
+               val = vma->node.start;
+               if (is_y_tiled)
+                       val |= BIT(I830_FENCE_TILING_Y_SHIFT);
+               val |= I915_FENCE_SIZE_BITS(vma->node.size);
                val |= pitch_val << I830_FENCE_PITCH_SHIFT;
                val |= I830_FENCE_REG_VALID;
-       } else
-               val = 0;
+       }
+
+       if (!pipelined) {
+               struct drm_i915_private *dev_priv = fence->i915;
+               i915_reg_t reg = FENCE_REG(fence->id);
 
-       I915_WRITE(FENCE_REG(reg), val);
-       POSTING_READ(FENCE_REG(reg));
+               I915_WRITE(reg, val);
+               POSTING_READ(reg);
+       }
 }
 
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
-                               struct drm_i915_gem_object *obj)
+static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
+                                struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t val;
+       u32 val;
 
-       if (obj) {
-               u32 size = i915_gem_obj_ggtt_size(obj);
-               uint32_t pitch_val;
+       val = 0;
+       if (vma) {
+               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+               bool is_y_tiled = tiling == I915_TILING_Y;
+               unsigned int stride = i915_gem_object_get_stride(vma->obj);
+               u32 pitch_val;
 
-               WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
-                    (size & -size) != size ||
-                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-                    "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
-                    i915_gem_obj_ggtt_offset(obj), size);
+               WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
+                    !is_power_of_2(vma->node.size) ||
+                    (vma->node.start & (vma->node.size - 1)),
+                    "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
+                    vma->node.start, vma->node.size);
 
-               pitch_val = obj->stride / 128;
+               pitch_val = stride / 128;
                pitch_val = ffs(pitch_val) - 1;
 
-               val = i915_gem_obj_ggtt_offset(obj);
-               if (obj->tiling_mode == I915_TILING_Y)
-                       val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-               val |= I830_FENCE_SIZE_BITS(size);
+               val = vma->node.start;
+               if (is_y_tiled)
+                       val |= BIT(I830_FENCE_TILING_Y_SHIFT);
+               val |= I830_FENCE_SIZE_BITS(vma->node.size);
                val |= pitch_val << I830_FENCE_PITCH_SHIFT;
                val |= I830_FENCE_REG_VALID;
-       } else
-               val = 0;
+       }
 
-       I915_WRITE(FENCE_REG(reg), val);
-       POSTING_READ(FENCE_REG(reg));
-}
+       if (!pipelined) {
+               struct drm_i915_private *dev_priv = fence->i915;
+               i915_reg_t reg = FENCE_REG(fence->id);
 
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
-       return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+               I915_WRITE(reg, val);
+               POSTING_READ(reg);
+       }
 }
 
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
+static void fence_write(struct drm_i915_fence_reg *fence,
+                       struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       /* Ensure that all CPU reads are completed before installing a fence
-        * and all writes before removing the fence.
+       /* Previous access through the fence register is marshalled by
+        * the mb() inside the fault handlers (i915_gem_release_mmaps)
+        * and explicitly managed for internal users.
         */
-       if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
-               mb();
-
-       WARN(obj && (!obj->stride || !obj->tiling_mode),
-            "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-            obj->stride, obj->tiling_mode);
-
-       if (IS_GEN2(dev))
-               i830_write_fence_reg(dev, reg, obj);
-       else if (IS_GEN3(dev))
-               i915_write_fence_reg(dev, reg, obj);
-       else if (INTEL_INFO(dev)->gen >= 4)
-               i965_write_fence_reg(dev, reg, obj);
-
-       /* And similarly be paranoid that no direct access to this region
-        * is reordered to before the fence is installed.
+
+       if (IS_GEN2(fence->i915))
+               i830_write_fence_reg(fence, vma);
+       else if (IS_GEN3(fence->i915))
+               i915_write_fence_reg(fence, vma);
+       else
+               i965_write_fence_reg(fence, vma);
+
+       /* Access through the fenced region afterwards is
+        * ordered by the posting reads whilst writing the registers.
         */
-       if (i915_gem_object_needs_mb(obj))
-               mb();
-}
 
-static inline int fence_number(struct drm_i915_private *dev_priv,
-                              struct drm_i915_fence_reg *fence)
-{
-       return fence - dev_priv->fence_regs;
+       fence->dirty = false;
 }
 
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-                                        struct drm_i915_fence_reg *fence,
-                                        bool enable)
+static int fence_update(struct drm_i915_fence_reg *fence,
+                       struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       int reg = fence_number(dev_priv, fence);
-
-       i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+       int ret;
 
-       if (enable) {
-               obj->fence_reg = reg;
-               fence->obj = obj;
-               list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
-       } else {
-               obj->fence_reg = I915_FENCE_REG_NONE;
-               fence->obj = NULL;
-               list_del_init(&fence->lru_list);
-       }
-       obj->fence_dirty = false;
-}
+       if (vma) {
+               if (!i915_vma_is_map_and_fenceable(vma))
+                       return -EINVAL;
 
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
-       if (obj->tiling_mode)
-               i915_gem_release_mmap(obj);
+               if (WARN(!i915_gem_object_get_stride(vma->obj) ||
+                        !i915_gem_object_get_tiling(vma->obj),
+                        "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+                        i915_gem_object_get_stride(vma->obj),
+                        i915_gem_object_get_tiling(vma->obj)))
+                       return -EINVAL;
 
-       /* As we do not have an associated fence register, we will force
-        * a tiling change if we ever need to acquire one.
-        */
-       obj->fence_dirty = false;
-       obj->fence_reg = I915_FENCE_REG_NONE;
-}
+               ret = i915_gem_active_retire(&vma->last_fence,
+                                            &vma->obj->base.dev->struct_mutex);
+               if (ret)
+                       return ret;
+       }
 
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
-       if (obj->last_fenced_req) {
-               int ret = i915_wait_request(obj->last_fenced_req);
+       if (fence->vma) {
+               ret = i915_gem_active_retire(&fence->vma->last_fence,
+                                     &fence->vma->obj->base.dev->struct_mutex);
                if (ret)
                        return ret;
+       }
+
+       if (fence->vma && fence->vma != vma) {
+               /* Ensure that all userspace CPU access is completed before
+                * stealing the fence.
+                */
+               i915_gem_release_mmap(fence->vma->obj);
+
+               fence->vma->fence = NULL;
+               fence->vma = NULL;
+
+               list_move(&fence->link, &fence->i915->mm.fence_list);
+       }
+
+       fence_write(fence, vma);
+
+       if (vma) {
+               if (fence->vma != vma) {
+                       vma->fence = fence;
+                       fence->vma = vma;
+               }
 
-               i915_gem_request_assign(&obj->last_fenced_req, NULL);
+               list_move_tail(&fence->link, &fence->i915->mm.fence_list);
        }
 
        return 0;
 }
 
 /**
- * i915_gem_object_put_fence - force-remove fence for an object
- * @obj: object to map through a fence reg
+ * i915_vma_put_fence - force-remove fence for a VMA
+ * @vma: vma to map linearly (not through a fence reg)
  *
  * This function force-removes any fence from the given object, which is useful
  * if the kernel wants to do untiled GTT access.
@@ -284,70 +286,40 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
  * 0 on success, negative error code on failure.
  */
 int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+i915_vma_put_fence(struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct drm_i915_fence_reg *fence;
-       int ret;
+       struct drm_i915_fence_reg *fence = vma->fence;
 
-       ret = i915_gem_object_wait_fence(obj);
-       if (ret)
-               return ret;
-
-       if (obj->fence_reg == I915_FENCE_REG_NONE)
+       if (!fence)
                return 0;
 
-       fence = &dev_priv->fence_regs[obj->fence_reg];
-
-       if (WARN_ON(fence->pin_count))
+       if (fence->pin_count)
                return -EBUSY;
 
-       i915_gem_object_fence_lost(obj);
-       i915_gem_object_update_fence(obj, fence, false);
-
-       return 0;
+       return fence_update(fence, NULL);
 }
 
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
+static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_fence_reg *reg, *avail;
-       int i;
-
-       /* First try to find a free reg */
-       avail = NULL;
-       for (i = 0; i < dev_priv->num_fence_regs; i++) {
-               reg = &dev_priv->fence_regs[i];
-               if (!reg->obj)
-                       return reg;
-
-               if (!reg->pin_count)
-                       avail = reg;
-       }
-
-       if (avail == NULL)
-               goto deadlock;
+       struct drm_i915_fence_reg *fence;
 
-       /* None available, try to steal one or wait for a user to finish */
-       list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
-               if (reg->pin_count)
+       list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+               if (fence->pin_count)
                        continue;
 
-               return reg;
+               return fence;
        }
 
-deadlock:
        /* Wait for completion of pending flips which consume fences */
-       if (intel_has_pending_fb_unpin(dev))
+       if (intel_has_pending_fb_unpin(&dev_priv->drm))
                return ERR_PTR(-EAGAIN);
 
        return ERR_PTR(-EDEADLK);
 }
 
 /**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
+ * i915_vma_get_fence - set up fencing for a vma
+ * @vma: vma to map through a fence reg
  *
  * When mapping objects through the GTT, userspace wants to be able to write
  * to them without having to worry about swizzling if the object is tiled.
@@ -364,103 +336,27 @@ deadlock:
  * 0 on success, negative error code on failure.
  */
 int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+i915_vma_get_fence(struct i915_vma *vma)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       bool enable = obj->tiling_mode != I915_TILING_NONE;
-       struct drm_i915_fence_reg *reg;
-       int ret;
-
-       /* Have we updated the tiling parameters upon the object and so
-        * will need to serialise the write to the associated fence register?
-        */
-       if (obj->fence_dirty) {
-               ret = i915_gem_object_wait_fence(obj);
-               if (ret)
-                       return ret;
-       }
+       struct drm_i915_fence_reg *fence;
+       struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
 
        /* Just update our place in the LRU if our fence is getting reused. */
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               reg = &dev_priv->fence_regs[obj->fence_reg];
-               if (!obj->fence_dirty) {
-                       list_move_tail(&reg->lru_list,
-                                      &dev_priv->mm.fence_list);
+       if (vma->fence) {
+               fence = vma->fence;
+               if (!fence->dirty) {
+                       list_move_tail(&fence->link,
+                                      &fence->i915->mm.fence_list);
                        return 0;
                }
-       } else if (enable) {
-               if (WARN_ON(!obj->map_and_fenceable))
-                       return -EINVAL;
-
-               reg = i915_find_fence_reg(dev);
-               if (IS_ERR(reg))
-                       return PTR_ERR(reg);
-
-               if (reg->obj) {
-                       struct drm_i915_gem_object *old = reg->obj;
-
-                       ret = i915_gem_object_wait_fence(old);
-                       if (ret)
-                               return ret;
-
-                       i915_gem_object_fence_lost(old);
-               }
+       } else if (set) {
+               fence = fence_find(to_i915(vma->vm->dev));
+               if (IS_ERR(fence))
+                       return PTR_ERR(fence);
        } else
                return 0;
 
-       i915_gem_object_update_fence(obj, reg, enable);
-
-       return 0;
-}
-
-/**
- * i915_gem_object_pin_fence - pin fencing state
- * @obj: object to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * object is ready to be used as a scanout target. Fencing status must be
- * synchronize first by calling i915_gem_object_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_gem_object_unpin_fence().
- *
- * Returns:
- *
- * True if the object has a fence, false otherwise.
- */
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-               struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
-               WARN_ON(!ggtt_vma ||
-                       dev_priv->fence_regs[obj->fence_reg].pin_count >
-                       ggtt_vma->pin_count);
-               dev_priv->fence_regs[obj->fence_reg].pin_count++;
-               return true;
-       } else
-               return false;
-}
-
-/**
- * i915_gem_object_unpin_fence - unpin fencing state
- * @obj: object to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_gem_object_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-               WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
-               dev_priv->fence_regs[obj->fence_reg].pin_count--;
-       }
+       return fence_update(fence, set);
 }
 
 /**
@@ -477,17 +373,16 @@ void i915_gem_restore_fences(struct drm_device *dev)
 
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+               struct i915_vma *vma = reg->vma;
 
                /*
                 * Commit delayed tiling changes if we have an object still
                 * attached to the fence, otherwise just clear the fence.
                 */
-               if (reg->obj) {
-                       i915_gem_object_update_fence(reg->obj, reg,
-                                                    reg->obj->tiling_mode);
-               } else {
-                       i915_gem_write_fence(dev, i, NULL);
-               }
+               if (vma && !i915_gem_object_is_tiled(vma->obj))
+                       vma = NULL;
+
+               fence_update(reg, vma);
        }
 }
 
index 10f1e32767e60c7f636161485658bc3f9f5ec99f..0bb4232f66bcac1ad9653c073c1cbe7cb7375c16 100644 (file)
@@ -32,6 +32,8 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+
 /**
  * DOC: Global GTT views
  *
@@ -122,8 +124,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
        has_full_48bit_ppgtt =
                IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
 
-       if (intel_vgpu_active(dev_priv))
-               has_full_ppgtt = false; /* emulation is too hard */
+       if (intel_vgpu_active(dev_priv)) {
+               /* emulation is too hard */
+               has_full_ppgtt = false;
+               has_full_48bit_ppgtt = false;
+       }
 
        if (!has_aliasing_ppgtt)
                return 0;
@@ -158,7 +163,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                return 0;
        }
 
-       if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
+       if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
                return has_full_48bit_ppgtt ? 3 : 2;
        else
                return has_aliasing_ppgtt ? 1 : 0;
@@ -170,11 +175,13 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 {
        u32 pte_flags = 0;
 
+       vma->pages = vma->obj->pages;
+
        /* Currently applicable only to VLV */
        if (vma->obj->gt_ro)
                pte_flags |= PTE_READ_ONLY;
 
-       vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+       vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
                                cache_level, pte_flags);
 
        return 0;
@@ -184,7 +191,7 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
        vma->vm->clear_range(vma->vm,
                             vma->node.start,
-                            vma->obj->base.size,
+                            vma->size,
                             true);
 }
 
@@ -324,16 +331,16 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 static int __setup_page_dma(struct drm_device *dev,
                            struct i915_page_dma *p, gfp_t flags)
 {
-       struct device *device = &dev->pdev->dev;
+       struct device *kdev = &dev->pdev->dev;
 
        p->page = alloc_page(flags);
        if (!p->page)
                return -ENOMEM;
 
-       p->daddr = dma_map_page(device,
+       p->daddr = dma_map_page(kdev,
                                p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(device, p->daddr)) {
+       if (dma_mapping_error(kdev, p->daddr)) {
                __free_page(p->page);
                return -EINVAL;
        }
@@ -343,15 +350,17 @@ static int __setup_page_dma(struct drm_device *dev,
 
 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-       return __setup_page_dma(dev, p, GFP_KERNEL);
+       return __setup_page_dma(dev, p, I915_GFP_DMA);
 }
 
 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
+       struct pci_dev *pdev = dev->pdev;
+
        if (WARN_ON(!p->page))
                return;
 
-       dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
        __free_page(p->page);
        memset(p, 0, sizeof(*p));
 }
@@ -405,33 +414,18 @@ static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
        fill_page_dma(dev, p, v);
 }
 
-static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+static int
+setup_scratch_page(struct drm_device *dev,
+                  struct i915_page_dma *scratch,
+                  gfp_t gfp)
 {
-       struct i915_page_scratch *sp;
-       int ret;
-
-       sp = kzalloc(sizeof(*sp), GFP_KERNEL);
-       if (sp == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
-       if (ret) {
-               kfree(sp);
-               return ERR_PTR(ret);
-       }
-
-       set_pages_uc(px_page(sp), 1);
-
-       return sp;
+       return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
 }
 
-static void free_scratch_page(struct drm_device *dev,
-                             struct i915_page_scratch *sp)
+static void cleanup_scratch_page(struct drm_device *dev,
+                                struct i915_page_dma *scratch)
 {
-       set_pages_wb(px_page(sp), 1);
-
-       cleanup_px(dev, sp);
-       kfree(sp);
+       cleanup_page_dma(dev, scratch);
 }
 
 static struct i915_page_table *alloc_pt(struct drm_device *dev)
@@ -477,7 +471,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
 {
        gen8_pte_t scratch_pte;
 
-       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                      I915_CACHE_LLC, true);
 
        fill_px(vm->dev, pt, scratch_pte);
@@ -488,9 +482,9 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
 {
        gen6_pte_t scratch_pte;
 
-       WARN_ON(px_dma(vm->scratch_page) == 0);
+       WARN_ON(vm->scratch_page.daddr == 0);
 
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, true, 0);
 
        fill32_px(vm->dev, pt, scratch_pte);
@@ -669,6 +663,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
                          unsigned entry,
                          dma_addr_t addr)
 {
+       struct intel_ring *ring = req->ring;
        struct intel_engine_cs *engine = req->engine;
        int ret;
 
@@ -678,13 +673,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
-       intel_ring_emit(engine, upper_32_bits(addr));
-       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
-       intel_ring_emit(engine, lower_32_bits(addr));
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+       intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
+       intel_ring_emit(ring, upper_32_bits(addr));
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+       intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
+       intel_ring_emit(ring, lower_32_bits(addr));
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -773,7 +768,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
                                   bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-       gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+       gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                                 I915_CACHE_LLC, use_scratch);
 
        if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -879,9 +874,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
        struct drm_device *dev = vm->dev;
        int ret;
 
-       vm->scratch_page = alloc_scratch_page(dev);
-       if (IS_ERR(vm->scratch_page))
-               return PTR_ERR(vm->scratch_page);
+       ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+       if (ret)
+               return ret;
 
        vm->scratch_pt = alloc_pt(dev);
        if (IS_ERR(vm->scratch_pt)) {
@@ -915,7 +910,7 @@ free_pd:
 free_pt:
        free_pt(dev, vm->scratch_pt);
 free_scratch_page:
-       free_scratch_page(dev, vm->scratch_page);
+       cleanup_scratch_page(dev, &vm->scratch_page);
 
        return ret;
 }
@@ -959,7 +954,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
                free_pdp(dev, vm->scratch_pdp);
        free_pd(dev, vm->scratch_pd);
        free_pt(dev, vm->scratch_pt);
-       free_scratch_page(dev, vm->scratch_page);
+       cleanup_scratch_page(dev, &vm->scratch_page);
 }
 
 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
@@ -1456,7 +1451,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
        struct i915_address_space *vm = &ppgtt->base;
        uint64_t start = ppgtt->base.start;
        uint64_t length = ppgtt->base.total;
-       gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+       gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                                 I915_CACHE_LLC, true);
 
        if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -1573,7 +1568,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
        uint32_t  pte, pde;
        uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, true, 0);
 
        gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
@@ -1660,11 +1655,12 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
                         struct drm_i915_gem_request *req)
 {
+       struct intel_ring *ring = req->ring;
        struct intel_engine_cs *engine = req->engine;
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
        if (ret)
                return ret;
 
@@ -1672,13 +1668,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
-       intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
-       intel_ring_emit(engine, PP_DIR_DCLV_2G);
-       intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
-       intel_ring_emit(engine, get_pd_offset(ppgtt));
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
+       intel_ring_emit(ring, PP_DIR_DCLV_2G);
+       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
+       intel_ring_emit(ring, get_pd_offset(ppgtt));
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -1686,11 +1682,12 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
                          struct drm_i915_gem_request *req)
 {
+       struct intel_ring *ring = req->ring;
        struct intel_engine_cs *engine = req->engine;
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
        if (ret)
                return ret;
 
@@ -1698,17 +1695,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
-       intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
-       intel_ring_emit(engine, PP_DIR_DCLV_2G);
-       intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
-       intel_ring_emit(engine, get_pd_offset(ppgtt));
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
+       intel_ring_emit(ring, PP_DIR_DCLV_2G);
+       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
+       intel_ring_emit(ring, get_pd_offset(ppgtt));
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        /* XXX: RCS is the only one to auto invalidate the TLBs? */
        if (engine->id != RCS) {
-               ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+               ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
                if (ret)
                        return ret;
        }
@@ -1796,7 +1793,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned first_pte = first_entry % GEN6_PTES;
        unsigned last_pte, i;
 
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, true, 0);
 
        while (num_entries) {
@@ -1942,14 +1939,15 @@ unwind_out:
 static int gen6_init_scratch(struct i915_address_space *vm)
 {
        struct drm_device *dev = vm->dev;
+       int ret;
 
-       vm->scratch_page = alloc_scratch_page(dev);
-       if (IS_ERR(vm->scratch_page))
-               return PTR_ERR(vm->scratch_page);
+       ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+       if (ret)
+               return ret;
 
        vm->scratch_pt = alloc_pt(dev);
        if (IS_ERR(vm->scratch_pt)) {
-               free_scratch_page(dev, vm->scratch_page);
+               cleanup_scratch_page(dev, &vm->scratch_page);
                return PTR_ERR(vm->scratch_pt);
        }
 
@@ -1963,7 +1961,7 @@ static void gen6_free_scratch(struct i915_address_space *vm)
        struct drm_device *dev = vm->dev;
 
        free_pt(dev, vm->scratch_pt);
-       free_scratch_page(dev, vm->scratch_page);
+       cleanup_scratch_page(dev, &vm->scratch_page);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -2009,7 +2007,7 @@ alloc:
                                                  0, ggtt->base.total,
                                                  DRM_MM_TOPDOWN);
        if (ret == -ENOSPC && !retried) {
-               ret = i915_gem_evict_something(dev, &ggtt->base,
+               ret = i915_gem_evict_something(&ggtt->base,
                                               GEN6_PD_SIZE, GEN6_PD_ALIGN,
                                               I915_CACHE_NONE,
                                               0, ggtt->base.total,
@@ -2101,11 +2099,12 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        return 0;
 }
 
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
+                          struct drm_i915_private *dev_priv)
 {
-       ppgtt->base.dev = dev;
+       ppgtt->base.dev = &dev_priv->drm;
 
-       if (INTEL_INFO(dev)->gen < 8)
+       if (INTEL_INFO(dev_priv)->gen < 8)
                return gen6_ppgtt_init(ppgtt);
        else
                return gen8_ppgtt_init(ppgtt);
@@ -2115,9 +2114,9 @@ static void i915_address_space_init(struct i915_address_space *vm,
                                    struct drm_i915_private *dev_priv)
 {
        drm_mm_init(&vm->mm, vm->start, vm->total);
-       vm->dev = &dev_priv->drm;
        INIT_LIST_HEAD(&vm->active_list);
        INIT_LIST_HEAD(&vm->inactive_list);
+       INIT_LIST_HEAD(&vm->unbound_list);
        list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
@@ -2140,15 +2139,17 @@ static void gtt_write_workarounds(struct drm_device *dev)
                I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
 }
 
-static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
+                          struct drm_i915_private *dev_priv,
+                          struct drm_i915_file_private *file_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret = 0;
+       int ret;
 
-       ret = __hw_ppgtt_init(dev, ppgtt);
+       ret = __hw_ppgtt_init(ppgtt, dev_priv);
        if (ret == 0) {
                kref_init(&ppgtt->ref);
                i915_address_space_init(&ppgtt->base, dev_priv);
+               ppgtt->base.file = file_priv;
        }
 
        return ret;
@@ -2180,7 +2181,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 }
 
 struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
+i915_ppgtt_create(struct drm_i915_private *dev_priv,
+                 struct drm_i915_file_private *fpriv)
 {
        struct i915_hw_ppgtt *ppgtt;
        int ret;
@@ -2189,14 +2191,12 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
        if (!ppgtt)
                return ERR_PTR(-ENOMEM);
 
-       ret = i915_ppgtt_init(dev, ppgtt);
+       ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
        if (ret) {
                kfree(ppgtt);
                return ERR_PTR(ret);
        }
 
-       ppgtt->file_priv = fpriv;
-
        trace_i915_ppgtt_create(&ppgtt->base);
 
        return ppgtt;
@@ -2209,9 +2209,10 @@ void  i915_ppgtt_release(struct kref *kref)
 
        trace_i915_ppgtt_release(&ppgtt->base);
 
-       /* vmas should already be unbound */
+       /* vmas should already be unbound and destroyed */
        WARN_ON(!list_empty(&ppgtt->base.active_list));
        WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+       WARN_ON(!list_empty(&ppgtt->base.unbound_list));
 
        list_del(&ppgtt->base.global_link);
        drm_mm_takedown(&ppgtt->base.mm);
@@ -2220,47 +2221,21 @@ void  i915_ppgtt_release(struct kref *kref)
        kfree(ppgtt);
 }
 
-extern int intel_iommu_gfx_mapped;
 /* Certain Gen5 chipsets require require idling the GPU before
  * unmapping anything from the GTT when VT-d is enabled.
  */
-static bool needs_idle_maps(struct drm_device *dev)
+static bool needs_idle_maps(struct drm_i915_private *dev_priv)
 {
 #ifdef CONFIG_INTEL_IOMMU
        /* Query intel_iommu to see if we need the workaround. Presumably that
         * was loaded first.
         */
-       if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+       if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
                return true;
 #endif
        return false;
 }
 
-static bool do_idling(struct drm_i915_private *dev_priv)
-{
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       bool ret = dev_priv->mm.interruptible;
-
-       if (unlikely(ggtt->do_idle_maps)) {
-               dev_priv->mm.interruptible = false;
-               if (i915_gem_wait_for_idle(dev_priv)) {
-                       DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
-                       /* Wait a bit, in hopes it avoids the hang */
-                       udelay(10);
-               }
-       }
-
-       return ret;
-}
-
-static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
-{
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-       if (unlikely(ggtt->do_idle_maps))
-               dev_priv->mm.interruptible = interruptible;
-}
-
 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
@@ -2329,12 +2304,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 
 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
 {
-#ifdef writeq
        writeq(pte, addr);
-#else
-       iowrite32((u32)pte, addr);
-       iowrite32(pte >> 32, addr + 4);
-#endif
 }
 
 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
@@ -2527,7 +2497,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                      I915_CACHE_LLC,
                                      use_scratch);
        for (i = 0; i < num_entries; i++)
@@ -2559,7 +2529,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, use_scratch, 0);
 
        for (i = 0; i < num_entries; i++)
@@ -2638,8 +2608,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
        if (obj->gt_ro)
                pte_flags |= PTE_READ_ONLY;
 
-       vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
-                               vma->node.start,
+       vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
                                cache_level, pte_flags);
 
        /*
@@ -2647,7 +2616,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
         * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
         * upgrade to both bound if we bind either to avoid double-binding.
         */
-       vma->bound |= GLOBAL_BIND | LOCAL_BIND;
+       vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 
        return 0;
 }
@@ -2669,19 +2638,17 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
                pte_flags |= PTE_READ_ONLY;
 
 
-       if (flags & GLOBAL_BIND) {
+       if (flags & I915_VMA_GLOBAL_BIND) {
                vma->vm->insert_entries(vma->vm,
-                                       vma->ggtt_view.pages,
-                                       vma->node.start,
+                                       vma->pages, vma->node.start,
                                        cache_level, pte_flags);
        }
 
-       if (flags & LOCAL_BIND) {
+       if (flags & I915_VMA_LOCAL_BIND) {
                struct i915_hw_ppgtt *appgtt =
                        to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
                appgtt->base.insert_entries(&appgtt->base,
-                                           vma->ggtt_view.pages,
-                                           vma->node.start,
+                                           vma->pages, vma->node.start,
                                            cache_level, pte_flags);
        }
 
@@ -2690,42 +2657,36 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
-       struct drm_device *dev = vma->vm->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_object *obj = vma->obj;
-       const uint64_t size = min_t(uint64_t,
-                                   obj->base.size,
-                                   vma->node.size);
+       struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+       const u64 size = min(vma->size, vma->node.size);
 
-       if (vma->bound & GLOBAL_BIND) {
+       if (vma->flags & I915_VMA_GLOBAL_BIND)
                vma->vm->clear_range(vma->vm,
-                                    vma->node.start,
-                                    size,
+                                    vma->node.start, size,
                                     true);
-       }
-
-       if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
-               struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
 
+       if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
                appgtt->base.clear_range(&appgtt->base,
-                                        vma->node.start,
-                                        size,
+                                        vma->node.start, size,
                                         true);
-       }
 }
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       bool interruptible;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct device *kdev = &dev_priv->drm.pdev->dev;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-       interruptible = do_idling(dev_priv);
+       if (unlikely(ggtt->do_idle_maps)) {
+               if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+                       DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
+                       /* Wait a bit, in hopes it avoids the hang */
+                       udelay(10);
+               }
+       }
 
-       dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+       dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
                     PCI_DMA_BIDIRECTIONAL);
-
-       undo_idling(dev_priv, interruptible);
 }
 
 static void i915_gtt_color_adjust(struct drm_mm_node *node,
@@ -2736,19 +2697,14 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
        if (node->color != color)
                *start += 4096;
 
-       if (!list_empty(&node->node_list)) {
-               node = list_entry(node->node_list.next,
-                                 struct drm_mm_node,
-                                 node_list);
-               if (node->allocated && node->color != color)
-                       *end -= 4096;
-       }
+       node = list_first_entry_or_null(&node->node_list,
+                                       struct drm_mm_node,
+                                       node_list);
+       if (node && node->allocated && node->color != color)
+               *end -= 4096;
 }
 
-static int i915_gem_setup_global_gtt(struct drm_device *dev,
-                                    u64 start,
-                                    u64 mappable_end,
-                                    u64 end)
+int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 {
        /* Let GEM Manage all of the aperture.
         *
@@ -2759,48 +2715,15 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
         * aperture.  One page should be enough to keep any prefetching inside
         * of the aperture.
         */
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct drm_mm_node *entry;
-       struct drm_i915_gem_object *obj;
        unsigned long hole_start, hole_end;
+       struct drm_mm_node *entry;
        int ret;
 
-       BUG_ON(mappable_end > end);
-
-       ggtt->base.start = start;
-
-       /* Subtract the guard page before address space initialization to
-        * shrink the range used by drm_mm */
-       ggtt->base.total = end - start - PAGE_SIZE;
-       i915_address_space_init(&ggtt->base, dev_priv);
-       ggtt->base.total += PAGE_SIZE;
-
        ret = intel_vgt_balloon(dev_priv);
        if (ret)
                return ret;
 
-       if (!HAS_LLC(dev))
-               ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
-
-       /* Mark any preallocated objects as occupied */
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
-
-               DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
-                             i915_gem_obj_ggtt_offset(obj), obj->base.size);
-
-               WARN_ON(i915_gem_obj_ggtt_bound(obj));
-               ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
-               if (ret) {
-                       DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
-                       return ret;
-               }
-               vma->bound |= GLOBAL_BIND;
-               __i915_vma_set_map_and_fenceable(vma);
-               list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
-       }
-
        /* Clear any non-preallocated blocks */
        drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -2810,18 +2733,19 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
        }
 
        /* And finally clear the reserved guard page */
-       ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
+       ggtt->base.clear_range(&ggtt->base,
+                              ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
+                              true);
 
-       if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
+       if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
                struct i915_hw_ppgtt *ppgtt;
 
                ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
                if (!ppgtt)
                        return -ENOMEM;
 
-               ret = __hw_ppgtt_init(dev, ppgtt);
+               ret = __hw_ppgtt_init(ppgtt, dev_priv);
                if (ret) {
-                       ppgtt->base.cleanup(&ppgtt->base);
                        kfree(ppgtt);
                        return ret;
                }
@@ -2848,34 +2772,21 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
        return 0;
 }
 
-/**
- * i915_gem_init_ggtt - Initialize GEM for Global GTT
- * @dev: DRM device
- */
-void i915_gem_init_ggtt(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-       i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
-}
-
 /**
  * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev: DRM device
+ * @dev_priv: i915 device
  */
-void i915_ggtt_cleanup_hw(struct drm_device *dev)
+void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (dev_priv->mm.aliasing_ppgtt) {
                struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
                ppgtt->base.cleanup(&ppgtt->base);
+               kfree(ppgtt);
        }
 
-       i915_gem_cleanup_stolen(dev);
+       i915_gem_cleanup_stolen(&dev_priv->drm);
 
        if (drm_mm_initialized(&ggtt->base.mm)) {
                intel_vgt_deballoon(dev_priv);
@@ -2885,6 +2796,9 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
        }
 
        ggtt->base.cleanup(&ggtt->base);
+
+       arch_phys_wc_del(ggtt->mtrr);
+       io_mapping_fini(&ggtt->mappable);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2965,17 +2879,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
                return (gen9_gmch_ctl - 0xf0 + 1) << 22;
 }
 
-static int ggtt_probe_common(struct drm_device *dev,
-                            size_t gtt_size)
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct i915_page_scratch *scratch_page;
-       phys_addr_t ggtt_phys_addr;
+       struct pci_dev *pdev = ggtt->base.dev->pdev;
+       phys_addr_t phys_addr;
+       int ret;
 
        /* For Modern GENs the PTEs and register space are split in the BAR */
-       ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
-                        (pci_resource_len(dev->pdev, 0) / 2);
+       phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
 
        /*
         * On BXT writes larger than 64 bit to the GTT pagetable range will be
@@ -2984,25 +2895,25 @@ static int ggtt_probe_common(struct drm_device *dev,
         * resort to an uncached mapping. The WC issue is easily caught by the
         * readback check when writing GTT PTE entries.
         */
-       if (IS_BROXTON(dev))
-               ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
+       if (IS_BROXTON(ggtt->base.dev))
+               ggtt->gsm = ioremap_nocache(phys_addr, size);
        else
-               ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
+               ggtt->gsm = ioremap_wc(phys_addr, size);
        if (!ggtt->gsm) {
-               DRM_ERROR("Failed to map the gtt page table\n");
+               DRM_ERROR("Failed to map the ggtt page table\n");
                return -ENOMEM;
        }
 
-       scratch_page = alloc_scratch_page(dev);
-       if (IS_ERR(scratch_page)) {
+       ret = setup_scratch_page(ggtt->base.dev,
+                                &ggtt->base.scratch_page,
+                                GFP_DMA32);
+       if (ret) {
                DRM_ERROR("Scratch setup failed\n");
                /* iounmap will also get called at remove, but meh */
                iounmap(ggtt->gsm);
-               return PTR_ERR(scratch_page);
+               return ret;
        }
 
-       ggtt->base.scratch_page = scratch_page;
-
        return 0;
 }
 
@@ -3079,42 +2990,49 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
 }
 
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+       iounmap(ggtt->gsm);
+       cleanup_scratch_page(vm->dev, &vm->scratch_page);
+}
+
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_device *dev = ggtt->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       unsigned int size;
        u16 snb_gmch_ctl;
-       int ret;
 
        /* TODO: We're not aware of mappable constraints on gen8 yet */
-       ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
-       ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
+       ggtt->mappable_base = pci_resource_start(pdev, 2);
+       ggtt->mappable_end = pci_resource_len(pdev, 2);
 
-       if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
-               pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
+               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
 
-       pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+       pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
-       if (INTEL_INFO(dev)->gen >= 9) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
-               ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
-       } else if (IS_CHERRYVIEW(dev)) {
+               size = gen8_get_total_gtt_size(snb_gmch_ctl);
+       } else if (IS_CHERRYVIEW(dev_priv)) {
                ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
-               ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
+               size = chv_get_total_gtt_size(snb_gmch_ctl);
        } else {
                ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
-               ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
+               size = gen8_get_total_gtt_size(snb_gmch_ctl);
        }
 
-       ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+       ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
 
-       if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+       if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
                chv_setup_private_ppat(dev_priv);
        else
                bdw_setup_private_ppat(dev_priv);
 
-       ret = ggtt_probe_common(dev, ggtt->size);
-
+       ggtt->base.cleanup = gen6_gmch_remove;
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
        ggtt->base.insert_page = gen8_ggtt_insert_page;
@@ -3126,57 +3044,65 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        if (IS_CHERRYVIEW(dev_priv))
                ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
 
-       return ret;
+       return ggtt_probe_common(ggtt, size);
 }
 
 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_device *dev = ggtt->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       unsigned int size;
        u16 snb_gmch_ctl;
-       int ret;
 
-       ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
-       ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
+       ggtt->mappable_base = pci_resource_start(pdev, 2);
+       ggtt->mappable_end = pci_resource_len(pdev, 2);
 
        /* 64/512MB is the current min/max we actually know of, but this is just
         * a coarse sanity check.
         */
-       if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
+       if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
                DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
                return -ENXIO;
        }
 
-       if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
-               pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
-       pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
+               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+       pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
        ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
-       ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
-       ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
-       ret = ggtt_probe_common(dev, ggtt->size);
+       size = gen6_get_total_gtt_size(snb_gmch_ctl);
+       ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
        ggtt->base.clear_range = gen6_ggtt_clear_range;
        ggtt->base.insert_page = gen6_ggtt_insert_page;
        ggtt->base.insert_entries = gen6_ggtt_insert_entries;
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
+       ggtt->base.cleanup = gen6_gmch_remove;
+
+       if (HAS_EDRAM(dev_priv))
+               ggtt->base.pte_encode = iris_pte_encode;
+       else if (IS_HASWELL(dev_priv))
+               ggtt->base.pte_encode = hsw_pte_encode;
+       else if (IS_VALLEYVIEW(dev_priv))
+               ggtt->base.pte_encode = byt_pte_encode;
+       else if (INTEL_GEN(dev_priv) >= 7)
+               ggtt->base.pte_encode = ivb_pte_encode;
+       else
+               ggtt->base.pte_encode = snb_pte_encode;
 
-       return ret;
+       return ggtt_probe_common(ggtt, size);
 }
 
-static void gen6_gmch_remove(struct i915_address_space *vm)
+static void i915_gmch_remove(struct i915_address_space *vm)
 {
-       struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
-
-       iounmap(ggtt->gsm);
-       free_scratch_page(vm->dev, vm->scratch_page);
+       intel_gmch_remove();
 }
 
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_device *dev = ggtt->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
        int ret;
 
        ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3188,12 +3114,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
                      &ggtt->mappable_base, &ggtt->mappable_end);
 
-       ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
+       ggtt->do_idle_maps = needs_idle_maps(dev_priv);
        ggtt->base.insert_page = i915_ggtt_insert_page;
        ggtt->base.insert_entries = i915_ggtt_insert_entries;
        ggtt->base.clear_range = i915_ggtt_clear_range;
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
+       ggtt->base.cleanup = i915_gmch_remove;
 
        if (unlikely(ggtt->do_idle_maps))
                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3201,65 +3128,40 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        return 0;
 }
 
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
-       intel_gmch_remove();
-}
-
 /**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev: DRM device
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @dev_priv: i915 device
  */
-int i915_ggtt_init_hw(struct drm_device *dev)
+int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int ret;
 
-       if (INTEL_INFO(dev)->gen <= 5) {
-               ggtt->probe = i915_gmch_probe;
-               ggtt->base.cleanup = i915_gmch_remove;
-       } else if (INTEL_INFO(dev)->gen < 8) {
-               ggtt->probe = gen6_gmch_probe;
-               ggtt->base.cleanup = gen6_gmch_remove;
-
-               if (HAS_EDRAM(dev))
-                       ggtt->base.pte_encode = iris_pte_encode;
-               else if (IS_HASWELL(dev))
-                       ggtt->base.pte_encode = hsw_pte_encode;
-               else if (IS_VALLEYVIEW(dev))
-                       ggtt->base.pte_encode = byt_pte_encode;
-               else if (INTEL_INFO(dev)->gen >= 7)
-                       ggtt->base.pte_encode = ivb_pte_encode;
-               else
-                       ggtt->base.pte_encode = snb_pte_encode;
-       } else {
-               ggtt->probe = gen8_gmch_probe;
-               ggtt->base.cleanup = gen6_gmch_remove;
-       }
-
-       ggtt->base.dev = dev;
-       ggtt->base.is_ggtt = true;
+       ggtt->base.dev = &dev_priv->drm;
 
-       ret = ggtt->probe(ggtt);
+       if (INTEL_GEN(dev_priv) <= 5)
+               ret = i915_gmch_probe(ggtt);
+       else if (INTEL_GEN(dev_priv) < 8)
+               ret = gen6_gmch_probe(ggtt);
+       else
+               ret = gen8_gmch_probe(ggtt);
        if (ret)
                return ret;
 
        if ((ggtt->base.total - 1) >> 32) {
                DRM_ERROR("We never expected a Global GTT with more than 32bits"
-                         "of address space! Found %lldM!\n",
+                         " of address space! Found %lldM!\n",
                          ggtt->base.total >> 20);
                ggtt->base.total = 1ULL << 32;
                ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
        }
 
-       /*
-        * Initialise stolen early so that we may reserve preallocated
-        * objects for the BIOS to KMS transition.
-        */
-       ret = i915_gem_init_stolen(dev);
-       if (ret)
-               goto out_gtt_cleanup;
+       if (ggtt->mappable_end > ggtt->base.total) {
+               DRM_ERROR("mappable aperture extends past end of GGTT,"
+                         " aperture=%llx, total=%llx\n",
+                         ggtt->mappable_end, ggtt->base.total);
+               ggtt->mappable_end = ggtt->base.total;
+       }
 
        /* GMADR is the PCI mmio aperture into the global GTT. */
        DRM_INFO("Memory usable by graphics device = %lluM\n",
@@ -3272,16 +3174,55 @@ int i915_ggtt_init_hw(struct drm_device *dev)
 #endif
 
        return 0;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev_priv: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+{
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       int ret;
+
+       INIT_LIST_HEAD(&dev_priv->vm_list);
+
+       /* Subtract the guard page before address space initialization to
+        * shrink the range used by drm_mm.
+        */
+       ggtt->base.total -= PAGE_SIZE;
+       i915_address_space_init(&ggtt->base, dev_priv);
+       ggtt->base.total += PAGE_SIZE;
+       if (!HAS_LLC(dev_priv))
+               ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+
+       if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
+                               dev_priv->ggtt.mappable_base,
+                               dev_priv->ggtt.mappable_end)) {
+               ret = -EIO;
+               goto out_gtt_cleanup;
+       }
+
+       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+
+       /*
+        * Initialise stolen early so that we may reserve preallocated
+        * objects for the BIOS to KMS transition.
+        */
+       ret = i915_gem_init_stolen(&dev_priv->drm);
+       if (ret)
+               goto out_gtt_cleanup;
+
+       return 0;
 
 out_gtt_cleanup:
        ggtt->base.cleanup(&ggtt->base);
-
        return ret;
 }
 
-int i915_ggtt_enable_hw(struct drm_device *dev)
+int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
 {
-       if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+       if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
                return -EIO;
 
        return 0;
@@ -3291,8 +3232,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct drm_i915_gem_object *obj;
-       struct i915_vma *vma;
+       struct drm_i915_gem_object *obj, *on;
 
        i915_check_and_clear_faults(dev_priv);
 
@@ -3300,20 +3240,32 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
                               true);
 
-       /* Cache flush objects bound into GGTT and rebind them. */
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+       ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+
+       /* clflush objects bound into the GGTT and rebind them. */
+       list_for_each_entry_safe(obj, on,
+                                &dev_priv->mm.bound_list, global_list) {
+               bool ggtt_bound = false;
+               struct i915_vma *vma;
+
                list_for_each_entry(vma, &obj->vma_list, obj_link) {
                        if (vma->vm != &ggtt->base)
                                continue;
 
+                       if (!i915_vma_unbind(vma))
+                               continue;
+
                        WARN_ON(i915_vma_bind(vma, obj->cache_level,
                                              PIN_UPDATE));
+                       ggtt_bound = true;
                }
 
-               if (obj->pin_display)
+               if (ggtt_bound)
                        WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
        }
 
+       ggtt->base.closed = false;
+
        if (INTEL_INFO(dev)->gen >= 8) {
                if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
                        chv_setup_private_ppat(dev_priv);
@@ -3331,7 +3283,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
                        struct i915_hw_ppgtt *ppgtt;
 
-                       if (vm->is_ggtt)
+                       if (i915_is_ggtt(vm))
                                ppgtt = dev_priv->mm.aliasing_ppgtt;
                        else
                                ppgtt = i915_vm_to_ppgtt(vm);
@@ -3344,65 +3296,155 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        i915_ggtt_flush(dev_priv);
 }
 
+static void
+i915_vma_retire(struct i915_gem_active *active,
+               struct drm_i915_gem_request *rq)
+{
+       const unsigned int idx = rq->engine->id;
+       struct i915_vma *vma =
+               container_of(active, struct i915_vma, last_read[idx]);
+
+       GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+       i915_vma_clear_active(vma, idx);
+       if (i915_vma_is_active(vma))
+               return;
+
+       list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+       if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
+               WARN_ON(i915_vma_unbind(vma));
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+       GEM_BUG_ON(vma->node.allocated);
+       GEM_BUG_ON(i915_vma_is_active(vma));
+       GEM_BUG_ON(!i915_vma_is_closed(vma));
+       GEM_BUG_ON(vma->fence);
+
+       list_del(&vma->vm_link);
+       if (!i915_vma_is_ggtt(vma))
+               i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+       kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+       GEM_BUG_ON(i915_vma_is_closed(vma));
+       vma->flags |= I915_VMA_CLOSED;
+
+       list_del_init(&vma->obj_link);
+       if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
+               WARN_ON(i915_vma_unbind(vma));
+}
+
 static struct i915_vma *
-__i915_gem_vma_create(struct drm_i915_gem_object *obj,
-                     struct i915_address_space *vm,
-                     const struct i915_ggtt_view *ggtt_view)
+__i915_vma_create(struct drm_i915_gem_object *obj,
+                 struct i915_address_space *vm,
+                 const struct i915_ggtt_view *view)
 {
        struct i915_vma *vma;
+       int i;
 
-       if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
-               return ERR_PTR(-EINVAL);
+       GEM_BUG_ON(vm->closed);
 
        vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
        if (vma == NULL)
                return ERR_PTR(-ENOMEM);
 
-       INIT_LIST_HEAD(&vma->vm_link);
-       INIT_LIST_HEAD(&vma->obj_link);
        INIT_LIST_HEAD(&vma->exec_list);
+       for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+               init_request_active(&vma->last_read[i], i915_vma_retire);
+       init_request_active(&vma->last_fence, NULL);
+       list_add(&vma->vm_link, &vm->unbound_list);
        vma->vm = vm;
        vma->obj = obj;
-       vma->is_ggtt = i915_is_ggtt(vm);
+       vma->size = obj->base.size;
+
+       if (view) {
+               vma->ggtt_view = *view;
+               if (view->type == I915_GGTT_VIEW_PARTIAL) {
+                       vma->size = view->params.partial.size;
+                       vma->size <<= PAGE_SHIFT;
+               } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+                       vma->size =
+                               intel_rotation_info_size(&view->params.rotated);
+                       vma->size <<= PAGE_SHIFT;
+               }
+       }
 
-       if (i915_is_ggtt(vm))
-               vma->ggtt_view = *ggtt_view;
-       else
+       if (i915_is_ggtt(vm)) {
+               vma->flags |= I915_VMA_GGTT;
+       } else {
                i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+       }
 
        list_add_tail(&vma->obj_link, &obj->vma_list);
-
        return vma;
 }
 
+static inline bool vma_matches(struct i915_vma *vma,
+                              struct i915_address_space *vm,
+                              const struct i915_ggtt_view *view)
+{
+       if (vma->vm != vm)
+               return false;
+
+       if (!i915_vma_is_ggtt(vma))
+               return true;
+
+       if (!view)
+               return vma->ggtt_view.type == 0;
+
+       if (vma->ggtt_view.type != view->type)
+               return false;
+
+       return memcmp(&vma->ggtt_view.params,
+                     &view->params,
+                     sizeof(view->params)) == 0;
+}
+
 struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm)
+i915_vma_create(struct drm_i915_gem_object *obj,
+               struct i915_address_space *vm,
+               const struct i915_ggtt_view *view)
+{
+       GEM_BUG_ON(view && !i915_is_ggtt(vm));
+       GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+
+       return __i915_vma_create(obj, vm, view);
+}
+
+struct i915_vma *
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+                   struct i915_address_space *vm,
+                   const struct i915_ggtt_view *view)
 {
        struct i915_vma *vma;
 
-       vma = i915_gem_obj_to_vma(obj, vm);
-       if (!vma)
-               vma = __i915_gem_vma_create(obj, vm,
-                                           i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+       list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
+               if (vma_matches(vma, vm, view))
+                       return vma;
 
-       return vma;
+       return NULL;
 }
 
 struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
-                                      const struct i915_ggtt_view *view)
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+                                 struct i915_address_space *vm,
+                                 const struct i915_ggtt_view *view)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
+       struct i915_vma *vma;
 
+       GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+       vma = i915_gem_obj_to_vma(obj, vm, view);
        if (!vma)
-               vma = __i915_gem_vma_create(obj, &ggtt->base, view);
+               vma = __i915_vma_create(obj, vm, view);
 
+       GEM_BUG_ON(i915_vma_is_closed(vma));
        return vma;
-
 }
 
 static struct scatterlist *
@@ -3434,18 +3476,16 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
 }
 
 static struct sg_table *
-intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
+intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
                          struct drm_i915_gem_object *obj)
 {
        const size_t n_pages = obj->base.size / PAGE_SIZE;
-       unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
-       unsigned int size_pages_uv;
+       unsigned int size = intel_rotation_info_size(rot_info);
        struct sgt_iter sgt_iter;
        dma_addr_t dma_addr;
        unsigned long i;
        dma_addr_t *page_addr_list;
        struct sg_table *st;
-       unsigned int uv_start_page;
        struct scatterlist *sg;
        int ret = -ENOMEM;
 
@@ -3456,18 +3496,12 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
        if (!page_addr_list)
                return ERR_PTR(ret);
 
-       /* Account for UV plane with NV12. */
-       if (rot_info->pixel_format == DRM_FORMAT_NV12)
-               size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
-       else
-               size_pages_uv = 0;
-
        /* Allocate target SG list. */
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (!st)
                goto err_st_alloc;
 
-       ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
+       ret = sg_alloc_table(st, size, GFP_KERNEL);
        if (ret)
                goto err_sg_alloc;
 
@@ -3480,32 +3514,14 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
        st->nents = 0;
        sg = st->sgl;
 
-       /* Rotate the pages. */
-       sg = rotate_pages(page_addr_list, 0,
-                         rot_info->plane[0].width, rot_info->plane[0].height,
-                         rot_info->plane[0].width,
-                         st, sg);
-
-       /* Append the UV plane if NV12. */
-       if (rot_info->pixel_format == DRM_FORMAT_NV12) {
-               uv_start_page = size_pages;
-
-               /* Check for tile-row un-alignment. */
-               if (offset_in_page(rot_info->uv_offset))
-                       uv_start_page--;
-
-               rot_info->uv_start_page = uv_start_page;
-
-               sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
-                                 rot_info->plane[1].width, rot_info->plane[1].height,
-                                 rot_info->plane[1].width,
-                                 st, sg);
+       for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+               sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
+                                 rot_info->plane[i].width, rot_info->plane[i].height,
+                                 rot_info->plane[i].stride, st, sg);
        }
 
-       DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
-                     obj->base.size, rot_info->plane[0].width,
-                     rot_info->plane[0].height, size_pages + size_pages_uv,
-                     size_pages);
+       DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
+                     obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
 
        drm_free_large(page_addr_list);
 
@@ -3516,10 +3532,9 @@ err_sg_alloc:
 err_st_alloc:
        drm_free_large(page_addr_list);
 
-       DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
-                     obj->base.size, ret, rot_info->plane[0].width,
-                     rot_info->plane[0].height, size_pages + size_pages_uv,
-                     size_pages);
+       DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+                     obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+
        return ERR_PTR(ret);
 }
 
@@ -3569,28 +3584,27 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 {
        int ret = 0;
 
-       if (vma->ggtt_view.pages)
+       if (vma->pages)
                return 0;
 
        if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
-               vma->ggtt_view.pages = vma->obj->pages;
+               vma->pages = vma->obj->pages;
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
-               vma->ggtt_view.pages =
+               vma->pages =
                        intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
-               vma->ggtt_view.pages =
-                       intel_partial_pages(&vma->ggtt_view, vma->obj);
+               vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
        else
                WARN_ONCE(1, "GGTT view %u not implemented!\n",
                          vma->ggtt_view.type);
 
-       if (!vma->ggtt_view.pages) {
+       if (!vma->pages) {
                DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
                          vma->ggtt_view.type);
                ret = -EINVAL;
-       } else if (IS_ERR(vma->ggtt_view.pages)) {
-               ret = PTR_ERR(vma->ggtt_view.pages);
-               vma->ggtt_view.pages = NULL;
+       } else if (IS_ERR(vma->pages)) {
+               ret = PTR_ERR(vma->pages);
+               vma->pages = NULL;
                DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
                          vma->ggtt_view.type, ret);
        }
@@ -3611,34 +3625,32 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                  u32 flags)
 {
-       int ret;
        u32 bind_flags;
+       u32 vma_flags;
+       int ret;
 
        if (WARN_ON(flags == 0))
                return -EINVAL;
 
        bind_flags = 0;
        if (flags & PIN_GLOBAL)
-               bind_flags |= GLOBAL_BIND;
+               bind_flags |= I915_VMA_GLOBAL_BIND;
        if (flags & PIN_USER)
-               bind_flags |= LOCAL_BIND;
+               bind_flags |= I915_VMA_LOCAL_BIND;
 
+       vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
        if (flags & PIN_UPDATE)
-               bind_flags |= vma->bound;
+               bind_flags |= vma_flags;
        else
-               bind_flags &= ~vma->bound;
-
+               bind_flags &= ~vma_flags;
        if (bind_flags == 0)
                return 0;
 
-       if (vma->bound == 0 && vma->vm->allocate_va_range) {
-               /* XXX: i915_vma_pin() will fix this +- hack */
-               vma->pin_count++;
+       if (vma_flags == 0 && vma->vm->allocate_va_range) {
                trace_i915_va_alloc(vma);
                ret = vma->vm->allocate_va_range(vma->vm,
                                                 vma->node.start,
                                                 vma->node.size);
-               vma->pin_count--;
                if (ret)
                        return ret;
        }
@@ -3647,56 +3659,47 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
        if (ret)
                return ret;
 
-       vma->bound |= bind_flags;
-
+       vma->flags |= bind_flags;
        return 0;
 }
 
-/**
- * i915_ggtt_view_size - Get the size of a GGTT view.
- * @obj: Object the view is of.
- * @view: The view in question.
- *
- * @return The size of the GGTT view in bytes.
- */
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
-                   const struct i915_ggtt_view *view)
-{
-       if (view->type == I915_GGTT_VIEW_NORMAL) {
-               return obj->base.size;
-       } else if (view->type == I915_GGTT_VIEW_ROTATED) {
-               return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
-       } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
-               return view->params.partial.size << PAGE_SHIFT;
-       } else {
-               WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
-               return obj->base.size;
-       }
-}
-
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 {
        void __iomem *ptr;
 
+       /* Access through the GTT requires the device to be awake. */
+       assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
        lockdep_assert_held(&vma->vm->dev->struct_mutex);
-       if (WARN_ON(!vma->obj->map_and_fenceable))
-               return ERR_PTR(-ENODEV);
+       if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
+               return IO_ERR_PTR(-ENODEV);
 
-       GEM_BUG_ON(!vma->is_ggtt);
-       GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+       GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+       GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
 
        ptr = vma->iomap;
        if (ptr == NULL) {
-               ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+               ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
                                        vma->node.start,
                                        vma->node.size);
                if (ptr == NULL)
-                       return ERR_PTR(-ENOMEM);
+                       return IO_ERR_PTR(-ENOMEM);
 
                vma->iomap = ptr;
        }
 
-       vma->pin_count++;
+       __i915_vma_pin(vma);
        return ptr;
 }
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+{
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(p_vma);
+       if (!vma)
+               return;
+
+       i915_vma_unpin(vma);
+       i915_vma_put(vma);
+}
index aa5f31d1c2edaaa4d41ee220c7b4b33005016ae8..ec78be2f8c7727c483c751f0dddb92c7aeb90531 100644 (file)
 
 #include <linux/io-mapping.h>
 
+#include "i915_gem_request.h"
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
 struct drm_i915_file_private;
+struct drm_i915_fence_reg;
 
 typedef uint32_t gen6_pte_t;
 typedef uint64_t gen8_pte_t;
@@ -137,12 +145,9 @@ enum i915_ggtt_view_type {
 };
 
 struct intel_rotation_info {
-       unsigned int uv_offset;
-       uint32_t pixel_format;
-       unsigned int uv_start_page;
        struct {
                /* tiles */
-               unsigned int width, height;
+               unsigned int width, height, stride, offset;
        } plane[2];
 };
 
@@ -156,8 +161,6 @@ struct i915_ggtt_view {
                } partial;
                struct intel_rotation_info rotated;
        } params;
-
-       struct sg_table *pages;
 };
 
 extern const struct i915_ggtt_view i915_ggtt_view_normal;
@@ -177,13 +180,38 @@ struct i915_vma {
        struct drm_mm_node node;
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm;
+       struct drm_i915_fence_reg *fence;
+       struct sg_table *pages;
        void __iomem *iomap;
+       u64 size;
+       u64 display_alignment;
+
+       unsigned int flags;
+       /**
+        * How many users have pinned this object in GTT space. The following
+        * users can each hold at most one reference: pwrite/pread, execbuffer
+        * (objects are not allowed multiple times for the same batchbuffer),
+        * and the framebuffer code. When switching/pageflipping, the
+        * framebuffer code has at most two buffers pinned per crtc.
+        *
+        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+        * bits with absolutely no headroom. So use 4 bits.
+        */
+#define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW  BIT(5)
 
        /** Flags and address space this VMA is bound to */
-#define GLOBAL_BIND    (1<<0)
-#define LOCAL_BIND     (1<<1)
-       unsigned int bound : 4;
-       bool is_ggtt : 1;
+#define I915_VMA_GLOBAL_BIND   BIT(6)
+#define I915_VMA_LOCAL_BIND    BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+
+#define I915_VMA_GGTT          BIT(8)
+#define I915_VMA_CAN_FENCE     BIT(9)
+#define I915_VMA_CLOSED                BIT(10)
+
+       unsigned int active;
+       struct i915_gem_active last_read[I915_NUM_ENGINES];
+       struct i915_gem_active last_fence;
 
        /**
         * Support different GGTT views into the same object.
@@ -208,20 +236,66 @@ struct i915_vma {
        struct hlist_node exec_node;
        unsigned long exec_handle;
        struct drm_i915_gem_exec_object2 *exec_entry;
-
-       /**
-        * How many users have pinned this object in GTT space. The following
-        * users can each hold at most one reference: pwrite/pread, execbuffer
-        * (objects are not allowed multiple times for the same batchbuffer),
-        * and the framebuffer code. When switching/pageflipping, the
-        * framebuffer code has at most two buffers pinned per crtc.
-        *
-        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-        * bits with absolutely no headroom. So use 4 bits. */
-       unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+               struct i915_address_space *vm,
+               const struct i915_ggtt_view *view);
+void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+       return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+       return vma->flags & I915_VMA_CAN_FENCE;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+       return vma->flags & I915_VMA_CLOSED;
+}
+
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+       return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+       return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+                                      unsigned int engine)
+{
+       vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+                                        unsigned int engine)
+{
+       vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+                                             unsigned int engine)
+{
+       return vma->active & BIT(engine);
+}
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+       GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+       GEM_BUG_ON(!vma->node.allocated);
+       GEM_BUG_ON(upper_32_bits(vma->node.start));
+       GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
+       return lower_32_bits(vma->node.start);
+}
+
 struct i915_page_dma {
        struct page *page;
        union {
@@ -238,10 +312,6 @@ struct i915_page_dma {
 #define px_page(px) (px_base(px)->page)
 #define px_dma(px) (px_base(px)->daddr)
 
-struct i915_page_scratch {
-       struct i915_page_dma base;
-};
-
 struct i915_page_table {
        struct i915_page_dma base;
 
@@ -272,13 +342,22 @@ struct i915_pml4 {
 struct i915_address_space {
        struct drm_mm mm;
        struct drm_device *dev;
+       /* Every address space belongs to a struct file - except for the global
+        * GTT that is owned by the driver (and so @file is set to NULL). In
+        * principle, no information should leak from one context to another
+        * (or between files/processes etc) unless explicitly shared by the
+        * owner. Tracking the owner is important in order to free up per-file
+        * objects along with the file, to aide resource tracking, and to
+        * assign blame.
+        */
+       struct drm_i915_file_private *file;
        struct list_head global_link;
        u64 start;              /* Start offset always 0 for dri2 */
        u64 total;              /* size addr space maps (ex. 2GB for ggtt) */
 
-       bool is_ggtt;
+       bool closed;
 
-       struct i915_page_scratch *scratch_page;
+       struct i915_page_dma scratch_page;
        struct i915_page_table *scratch_pt;
        struct i915_page_directory *scratch_pd;
        struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
@@ -306,6 +385,13 @@ struct i915_address_space {
         */
        struct list_head inactive_list;
 
+       /**
+        * List of vma that have been unbound.
+        *
+        * A reference is not held on the buffer while on this list.
+        */
+       struct list_head unbound_list;
+
        /* FIXME: Need a more generic return type */
        gen6_pte_t (*pte_encode)(dma_addr_t addr,
                                 enum i915_cache_level level,
@@ -338,7 +424,7 @@ struct i915_address_space {
                        u32 flags);
 };
 
-#define i915_is_ggtt(V) ((V)->is_ggtt)
+#define i915_is_ggtt(V) (!(V)->file)
 
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
@@ -349,14 +435,13 @@ struct i915_address_space {
  */
 struct i915_ggtt {
        struct i915_address_space base;
+       struct io_mapping mappable;     /* Mapping to our CPU mappable region */
 
        size_t stolen_size;             /* Total size of stolen memory */
        size_t stolen_usable_size;      /* Total size minus BIOS reserved */
        size_t stolen_reserved_base;
        size_t stolen_reserved_size;
-       size_t size;                    /* Total size of Global GTT */
        u64 mappable_end;               /* End offset that we can CPU map */
-       struct io_mapping *mappable;    /* Mapping to our CPU mappable region */
        phys_addr_t mappable_base;      /* PA of our GMADR */
 
        /** "Graphics Stolen Memory" holds the global PTEs */
@@ -365,8 +450,6 @@ struct i915_ggtt {
        bool do_idle_maps;
 
        int mtrr;
-
-       int (*probe)(struct i915_ggtt *ggtt);
 };
 
 struct i915_hw_ppgtt {
@@ -380,8 +463,6 @@ struct i915_hw_ppgtt {
                struct i915_page_directory pd;          /* GEN6-7 */
        };
 
-       struct drm_i915_file_private *file_priv;
-
        gen6_pte_t __iomem *pd_addr;
 
        int (*enable)(struct i915_hw_ppgtt *ppgtt);
@@ -521,14 +602,15 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
                px_dma(ppgtt->base.scratch_pd);
 }
 
-int i915_ggtt_init_hw(struct drm_device *dev);
-int i915_ggtt_enable_hw(struct drm_device *dev);
-void i915_gem_init_ggtt(struct drm_device *dev);
-void i915_ggtt_cleanup_hw(struct drm_device *dev);
+int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
+int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
+int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
+void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 
 int i915_ppgtt_init_hw(struct drm_device *dev);
 void i915_ppgtt_release(struct kref *kref);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
                                        struct drm_i915_file_private *fpriv);
 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
@@ -548,23 +630,67 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 
-static inline bool
-i915_ggtt_view_equal(const struct i915_ggtt_view *a,
-                     const struct i915_ggtt_view *b)
+/* Flags used by pin/bind&friends. */
+#define PIN_NONBLOCK           BIT(0)
+#define PIN_MAPPABLE           BIT(1)
+#define PIN_ZONE_4G            BIT(2)
+#define PIN_NONFAULT           BIT(3)
+
+#define PIN_MBZ                        BIT(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL             BIT(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER               BIT(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE             BIT(8)
+
+#define PIN_HIGH               BIT(9)
+#define PIN_OFFSET_BIAS                BIT(10)
+#define PIN_OFFSET_FIXED       BIT(11)
+#define PIN_OFFSET_MASK                (~4095)
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+                     u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-       if (WARN_ON(!a || !b))
-               return false;
-
-       if (a->type != b->type)
-               return false;
-       if (a->type != I915_GGTT_VIEW_NORMAL)
-               return !memcmp(&a->params, &b->params, sizeof(a->params));
-       return true;
+       BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+       BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+       BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+       /* Pin early to prevent the shrinker/eviction logic from destroying
+        * our vma as we insert and bind.
+        */
+       if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+               return 0;
+
+       return __i915_vma_do_pin(vma, size, alignment, flags);
 }
 
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
-                   const struct i915_ggtt_view *view);
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+       return vma->flags & I915_VMA_PIN_MASK;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+       return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+       vma->flags++;
+       GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!i915_vma_is_pinned(vma));
+       vma->flags--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+       __i915_vma_unpin(vma);
+}
 
 /**
  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
@@ -580,6 +706,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
  * Returns a valid iomapped pointer or ERR_PTR.
  */
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
 
 /**
  * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
@@ -593,9 +720,14 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
 static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
        lockdep_assert_held(&vma->vm->dev->struct_mutex);
-       GEM_BUG_ON(vma->pin_count == 0);
        GEM_BUG_ON(vma->iomap == NULL);
-       vma->pin_count--;
+       i915_vma_unpin(vma);
+}
+
+static inline struct page *i915_vma_first_page(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!vma->pages);
+       return sg_page(vma->pages->sgl);
 }
 
 #endif
index f75bbd67a13a2805768cb5ffb9735a3df49b8957..95b7e9afd5f8847089e8b9dc079a7fe548ffebf5 100644 (file)
 #include "i915_drv.h"
 #include "intel_renderstate.h"
 
+struct render_state {
+       const struct intel_renderstate_rodata *rodata;
+       struct i915_vma *vma;
+       u32 aux_batch_size;
+       u32 aux_batch_offset;
+};
+
 static const struct intel_renderstate_rodata *
-render_state_get_rodata(const int gen)
+render_state_get_rodata(const struct drm_i915_gem_request *req)
 {
-       switch (gen) {
+       switch (INTEL_GEN(req->i915)) {
        case 6:
                return &gen6_null_state;
        case 7:
@@ -45,35 +52,6 @@ render_state_get_rodata(const int gen)
        return NULL;
 }
 
-static int render_state_init(struct render_state *so,
-                            struct drm_i915_private *dev_priv)
-{
-       int ret;
-
-       so->gen = INTEL_GEN(dev_priv);
-       so->rodata = render_state_get_rodata(so->gen);
-       if (so->rodata == NULL)
-               return 0;
-
-       if (so->rodata->batch_items * 4 > 4096)
-               return -EINVAL;
-
-       so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
-       if (IS_ERR(so->obj))
-               return PTR_ERR(so->obj);
-
-       ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
-       if (ret)
-               goto free_gem;
-
-       so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-       return 0;
-
-free_gem:
-       drm_gem_object_unreference(&so->obj->base);
-       return ret;
-}
-
 /*
  * Macro to add commands to auxiliary batch.
  * This macro only checks for page overflow before inserting the commands,
@@ -94,27 +72,28 @@ free_gem:
 
 static int render_state_setup(struct render_state *so)
 {
-       struct drm_device *dev = so->obj->base.dev;
+       struct drm_device *dev = so->vma->vm->dev;
        const struct intel_renderstate_rodata *rodata = so->rodata;
+       const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
        unsigned int i = 0, reloc_index = 0;
        struct page *page;
        u32 *d;
        int ret;
 
-       ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
+       ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
        if (ret)
                return ret;
 
-       page = i915_gem_object_get_dirty_page(so->obj, 0);
+       page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
        d = kmap(page);
 
        while (i < rodata->batch_items) {
                u32 s = rodata->batch[i];
 
                if (i * 4  == rodata->reloc[reloc_index]) {
-                       u64 r = s + so->ggtt_offset;
+                       u64 r = s + so->vma->node.start;
                        s = lower_32_bits(r);
-                       if (so->gen >= 8) {
+                       if (has_64bit_reloc) {
                                if (i + 1 >= rodata->batch_items ||
                                    rodata->batch[i + 1] != 0) {
                                        ret = -EINVAL;
@@ -174,7 +153,7 @@ static int render_state_setup(struct render_state *so)
 
        kunmap(page);
 
-       ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+       ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
        if (ret)
                return ret;
 
@@ -192,67 +171,60 @@ err_out:
 
 #undef OUT_BATCH
 
-void i915_gem_render_state_fini(struct render_state *so)
-{
-       i915_gem_object_ggtt_unpin(so->obj);
-       drm_gem_object_unreference(&so->obj->base);
-}
-
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
-                                 struct render_state *so)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 {
+       struct render_state so;
+       struct drm_i915_gem_object *obj;
        int ret;
 
-       if (WARN_ON(engine->id != RCS))
+       if (WARN_ON(req->engine->id != RCS))
                return -ENOENT;
 
-       ret = render_state_init(so, engine->i915);
-       if (ret)
-               return ret;
-
-       if (so->rodata == NULL)
+       so.rodata = render_state_get_rodata(req);
+       if (!so.rodata)
                return 0;
 
-       ret = render_state_setup(so);
-       if (ret) {
-               i915_gem_render_state_fini(so);
-               return ret;
-       }
+       if (so.rodata->batch_items * 4 > 4096)
+               return -EINVAL;
 
-       return 0;
-}
+       obj = i915_gem_object_create(&req->i915->drm, 4096);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
-{
-       struct render_state so;
-       int ret;
+       so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
+       if (IS_ERR(so.vma)) {
+               ret = PTR_ERR(so.vma);
+               goto err_obj;
+       }
 
-       ret = i915_gem_render_state_prepare(req->engine, &so);
+       ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
        if (ret)
-               return ret;
+               goto err_obj;
 
-       if (so.rodata == NULL)
-               return 0;
+       ret = render_state_setup(&so);
+       if (ret)
+               goto err_unpin;
 
-       ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
-                                            so.rodata->batch_items * 4,
-                                            I915_DISPATCH_SECURE);
+       ret = req->engine->emit_bb_start(req, so.vma->node.start,
+                                        so.rodata->batch_items * 4,
+                                        I915_DISPATCH_SECURE);
        if (ret)
-               goto out;
+               goto err_unpin;
 
        if (so.aux_batch_size > 8) {
-               ret = req->engine->dispatch_execbuffer(req,
-                                                    (so.ggtt_offset +
-                                                     so.aux_batch_offset),
-                                                    so.aux_batch_size,
-                                                    I915_DISPATCH_SECURE);
+               ret = req->engine->emit_bb_start(req,
+                                                (so.vma->node.start +
+                                                 so.aux_batch_offset),
+                                                so.aux_batch_size,
+                                                I915_DISPATCH_SECURE);
                if (ret)
-                       goto out;
+                       goto err_unpin;
        }
 
-       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
-out:
-       i915_gem_render_state_fini(&so);
+       i915_vma_move_to_active(so.vma, req, 0);
+err_unpin:
+       i915_vma_unpin(so.vma);
+err_obj:
+       i915_gem_object_put(obj);
        return ret;
 }
index 6aaa3a10a6309e2f79539fe4df2a79714ea97f26..18cce3f06e9ce603878ad2e4ecc12f662dd3dc91 100644 (file)
 #ifndef _I915_GEM_RENDER_STATE_H_
 #define _I915_GEM_RENDER_STATE_H_
 
-#include <linux/types.h>
-
-struct intel_renderstate_rodata {
-       const u32 *reloc;
-       const u32 *batch;
-       const u32 batch_items;
-};
-
-struct render_state {
-       const struct intel_renderstate_rodata *rodata;
-       struct drm_i915_gem_object *obj;
-       u64 ggtt_offset;
-       int gen;
-       u32 aux_batch_size;
-       u32 aux_batch_offset;
-};
+struct drm_i915_gem_request;
 
 int i915_gem_render_state_init(struct drm_i915_gem_request *req);
-void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
-                                 struct render_state *so);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
new file mode 100644 (file)
index 0000000..40978bc
--- /dev/null
@@ -0,0 +1,946 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prefetch.h>
+
+#include "i915_drv.h"
+
+static const char *i915_fence_get_driver_name(struct fence *fence)
+{
+       return "i915";
+}
+
+static const char *i915_fence_get_timeline_name(struct fence *fence)
+{
+       /* Timelines are bound by eviction to a VM. However, since
+        * we only have a global seqno at the moment, we only have
+        * a single timeline. Note that each timeline will have
+        * multiple execution contexts (fence contexts) as we allow
+        * engines within a single timeline to execute in parallel.
+        */
+       return "global";
+}
+
+static bool i915_fence_signaled(struct fence *fence)
+{
+       return i915_gem_request_completed(to_request(fence));
+}
+
+static bool i915_fence_enable_signaling(struct fence *fence)
+{
+       if (i915_fence_signaled(fence))
+               return false;
+
+       intel_engine_enable_signaling(to_request(fence));
+       return true;
+}
+
+static signed long i915_fence_wait(struct fence *fence,
+                                  bool interruptible,
+                                  signed long timeout_jiffies)
+{
+       s64 timeout_ns, *timeout;
+       int ret;
+
+       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
+               timeout_ns = jiffies_to_nsecs(timeout_jiffies);
+               timeout = &timeout_ns;
+       } else {
+               timeout = NULL;
+       }
+
+       ret = i915_wait_request(to_request(fence),
+                               interruptible, timeout,
+                               NO_WAITBOOST);
+       if (ret == -ETIME)
+               return 0;
+
+       if (ret < 0)
+               return ret;
+
+       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
+               timeout_jiffies = nsecs_to_jiffies(timeout_ns);
+
+       return timeout_jiffies;
+}
+
+static void i915_fence_value_str(struct fence *fence, char *str, int size)
+{
+       snprintf(str, size, "%u", fence->seqno);
+}
+
+static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+                                         int size)
+{
+       snprintf(str, size, "%u",
+                intel_engine_get_seqno(to_request(fence)->engine));
+}
+
+static void i915_fence_release(struct fence *fence)
+{
+       struct drm_i915_gem_request *req = to_request(fence);
+
+       kmem_cache_free(req->i915->requests, req);
+}
+
+const struct fence_ops i915_fence_ops = {
+       .get_driver_name = i915_fence_get_driver_name,
+       .get_timeline_name = i915_fence_get_timeline_name,
+       .enable_signaling = i915_fence_enable_signaling,
+       .signaled = i915_fence_signaled,
+       .wait = i915_fence_wait,
+       .release = i915_fence_release,
+       .fence_value_str = i915_fence_value_str,
+       .timeline_value_str = i915_fence_timeline_value_str,
+};
+
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file)
+{
+       struct drm_i915_private *dev_private;
+       struct drm_i915_file_private *file_priv;
+
+       WARN_ON(!req || !file || req->file_priv);
+
+       if (!req || !file)
+               return -EINVAL;
+
+       if (req->file_priv)
+               return -EINVAL;
+
+       dev_private = req->i915;
+       file_priv = file->driver_priv;
+
+       spin_lock(&file_priv->mm.lock);
+       req->file_priv = file_priv;
+       list_add_tail(&req->client_list, &file_priv->mm.request_list);
+       spin_unlock(&file_priv->mm.lock);
+
+       return 0;
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+       struct drm_i915_file_private *file_priv = request->file_priv;
+
+       if (!file_priv)
+               return;
+
+       spin_lock(&file_priv->mm.lock);
+       list_del(&request->client_list);
+       request->file_priv = NULL;
+       spin_unlock(&file_priv->mm.lock);
+}
+
+void i915_gem_retire_noop(struct i915_gem_active *active,
+                         struct drm_i915_gem_request *request)
+{
+       /* Space left intentionally blank */
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+       struct i915_gem_active *active, *next;
+
+       trace_i915_gem_request_retire(request);
+       list_del(&request->link);
+
+       /* We know the GPU must have read the request to have
+        * sent us the seqno + interrupt, so use the position
+        * of tail of the request to update the last known position
+        * of the GPU head.
+        *
+        * Note this requires that we are always called in request
+        * completion order.
+        */
+       list_del(&request->ring_link);
+       request->ring->last_retired_head = request->postfix;
+
+       /* Walk through the active list, calling retire on each. This allows
+        * objects to track their GPU activity and mark themselves as idle
+        * when their *last* active request is completed (updating state
+        * tracking lists for eviction, active references for GEM, etc).
+        *
+        * As the ->retire() may free the node, we decouple it first and
+        * pass along the auxiliary information (to avoid dereferencing
+        * the node after the callback).
+        */
+       list_for_each_entry_safe(active, next, &request->active_list, link) {
+               /* In microbenchmarks or focusing upon time inside the kernel,
+                * we may spend an inordinate amount of time simply handling
+                * the retirement of requests and processing their callbacks.
+                * Of which, this loop itself is particularly hot due to the
+                * cache misses when jumping around the list of i915_gem_active.
+                * So we try to keep this loop as streamlined as possible and
+                * also prefetch the next i915_gem_active to try and hide
+                * the likely cache miss.
+                */
+               prefetchw(next);
+
+               INIT_LIST_HEAD(&active->link);
+               RCU_INIT_POINTER(active->request, NULL);
+
+               active->retire(active, request);
+       }
+
+       i915_gem_request_remove_from_client(request);
+
+       if (request->previous_context) {
+               if (i915.enable_execlists)
+                       intel_lr_context_unpin(request->previous_context,
+                                              request->engine);
+       }
+
+       i915_gem_context_put(request->ctx);
+       i915_gem_request_put(request);
+}
+
+void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+{
+       struct intel_engine_cs *engine = req->engine;
+       struct drm_i915_gem_request *tmp;
+
+       lockdep_assert_held(&req->i915->drm.struct_mutex);
+       GEM_BUG_ON(list_empty(&req->link));
+
+       do {
+               tmp = list_first_entry(&engine->request_list,
+                                      typeof(*tmp), link);
+
+               i915_gem_request_retire(tmp);
+       } while (tmp != req);
+}
+
+static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
+{
+       struct i915_gpu_error *error = &dev_priv->gpu_error;
+
+       if (i915_terminally_wedged(error))
+               return -EIO;
+
+       if (i915_reset_in_progress(error)) {
+               /* Non-interruptible callers can't handle -EAGAIN, hence return
+                * -EIO unconditionally for these.
+                */
+               if (!dev_priv->mm.interruptible)
+                       return -EIO;
+
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
+static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+{
+       struct intel_engine_cs *engine;
+       int ret;
+
+       /* Carefully retire all requests without writing to the rings */
+       for_each_engine(engine, dev_priv) {
+               ret = intel_engine_idle(engine,
+                                       I915_WAIT_INTERRUPTIBLE |
+                                       I915_WAIT_LOCKED);
+               if (ret)
+                       return ret;
+       }
+       i915_gem_retire_requests(dev_priv);
+
+       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+               while (intel_kick_waiters(dev_priv) ||
+                      intel_kick_signalers(dev_priv))
+                       yield();
+       }
+
+       /* Finally reset hw state */
+       for_each_engine(engine, dev_priv)
+               intel_engine_init_seqno(engine, seqno);
+
+       return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int ret;
+
+       if (seqno == 0)
+               return -EINVAL;
+
+       /* HWS page needs to be set less than what we
+        * will inject to ring
+        */
+       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+       if (ret)
+               return ret;
+
+       dev_priv->next_seqno = seqno;
+       return 0;
+}
+
+static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+{
+       /* reserve 0 for non-seqno */
+       if (unlikely(dev_priv->next_seqno == 0)) {
+               int ret;
+
+               ret = i915_gem_init_seqno(dev_priv, 0);
+               if (ret)
+                       return ret;
+
+               dev_priv->next_seqno = 1;
+       }
+
+       *seqno = dev_priv->next_seqno++;
+       return 0;
+}
+
+static int __i915_sw_fence_call
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+       struct drm_i915_gem_request *request =
+               container_of(fence, typeof(*request), submit);
+
+       /* Will be called from irq-context when using foreign DMA fences */
+
+       switch (state) {
+       case FENCE_COMPLETE:
+               request->engine->submit_request(request);
+               break;
+
+       case FENCE_FREE:
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct i915_gem_context *ctx)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct drm_i915_gem_request *req;
+       u32 seqno;
+       int ret;
+
+       /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+        * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+        * and restart.
+        */
+       ret = i915_gem_check_wedge(dev_priv);
+       if (ret)
+               return ERR_PTR(ret);
+
+       /* Move the oldest request to the slab-cache (if not in use!) */
+       req = list_first_entry_or_null(&engine->request_list,
+                                      typeof(*req), link);
+       if (req && i915_gem_request_completed(req))
+               i915_gem_request_retire(req);
+
+       /* Beware: Dragons be flying overhead.
+        *
+        * We use RCU to look up requests in flight. The lookups may
+        * race with the request being allocated from the slab freelist.
+        * That is the request we are writing to here, may be in the process
+        * of being read by __i915_gem_active_get_rcu(). As such,
+        * we have to be very careful when overwriting the contents. During
+        * the RCU lookup, we change chase the request->engine pointer,
+        * read the request->fence.seqno and increment the reference count.
+        *
+        * The reference count is incremented atomically. If it is zero,
+        * the lookup knows the request is unallocated and complete. Otherwise,
+        * it is either still in use, or has been reallocated and reset
+        * with fence_init(). This increment is safe for release as we check
+        * that the request we have a reference to and matches the active
+        * request.
+        *
+        * Before we increment the refcount, we chase the request->engine
+        * pointer. We must not call kmem_cache_zalloc() or else we set
+        * that pointer to NULL and cause a crash during the lookup. If
+        * we see the request is completed (based on the value of the
+        * old engine and seqno), the lookup is complete and reports NULL.
+        * If we decide the request is not completed (new engine or seqno),
+        * then we grab a reference and double check that it is still the
+        * active request - which it won't be and restart the lookup.
+        *
+        * Do not use kmem_cache_zalloc() here!
+        */
+       req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
+       if (!req)
+               return ERR_PTR(-ENOMEM);
+
+       ret = i915_gem_get_seqno(dev_priv, &seqno);
+       if (ret)
+               goto err;
+
+       spin_lock_init(&req->lock);
+       fence_init(&req->fence,
+                  &i915_fence_ops,
+                  &req->lock,
+                  engine->fence_context,
+                  seqno);
+
+       i915_sw_fence_init(&req->submit, submit_notify);
+
+       INIT_LIST_HEAD(&req->active_list);
+       req->i915 = dev_priv;
+       req->engine = engine;
+       req->ctx = i915_gem_context_get(ctx);
+
+       /* No zalloc, must clear what we need by hand */
+       req->previous_context = NULL;
+       req->file_priv = NULL;
+       req->batch = NULL;
+
+       /*
+        * Reserve space in the ring buffer for all the commands required to
+        * eventually emit this request. This is to guarantee that the
+        * i915_add_request() call can't fail. Note that the reserve may need
+        * to be redone if the request is not actually submitted straight
+        * away, e.g. because a GPU scheduler has deferred it.
+        */
+       req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
+       if (i915.enable_execlists)
+               ret = intel_logical_ring_alloc_request_extras(req);
+       else
+               ret = intel_ring_alloc_request_extras(req);
+       if (ret)
+               goto err_ctx;
+
+       /* Record the position of the start of the request so that
+        * should we detect the updated seqno part-way through the
+        * GPU processing the request, we never over-estimate the
+        * position of the head.
+        */
+       req->head = req->ring->tail;
+
+       return req;
+
+err_ctx:
+       i915_gem_context_put(ctx);
+err:
+       kmem_cache_free(dev_priv->requests, req);
+       return ERR_PTR(ret);
+}
+
+static int
+i915_gem_request_await_request(struct drm_i915_gem_request *to,
+                              struct drm_i915_gem_request *from)
+{
+       int idx, ret;
+
+       GEM_BUG_ON(to == from);
+
+       if (to->engine == from->engine)
+               return 0;
+
+       idx = intel_engine_sync_index(from->engine, to->engine);
+       if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+               return 0;
+
+       trace_i915_gem_ring_sync_to(to, from);
+       if (!i915.semaphores) {
+               if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
+                       ret = i915_sw_fence_await_dma_fence(&to->submit,
+                                                           &from->fence, 0,
+                                                           GFP_KERNEL);
+                       if (ret < 0)
+                               return ret;
+               }
+       } else {
+               ret = to->engine->semaphore.sync_to(to, from);
+               if (ret)
+                       return ret;
+       }
+
+       from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+       return 0;
+}
+
+/**
+ * i915_gem_request_await_object - set this request to (async) wait upon a bo
+ *
+ * @to: request we are wishing to use
+ * @obj: object which may be in use on another ring.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ *   request must wait for it to complete (either CPU or in hw, requests
+ *   on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ *   request must wait for outstanding read requests to complete.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+                             struct drm_i915_gem_object *obj,
+                             bool write)
+{
+       struct i915_gem_active *active;
+       unsigned long active_mask;
+       int idx;
+
+       if (write) {
+               active_mask = i915_gem_object_get_active(obj);
+               active = obj->last_read;
+       } else {
+               active_mask = 1;
+               active = &obj->last_write;
+       }
+
+       for_each_active(active_mask, idx) {
+               struct drm_i915_gem_request *request;
+               int ret;
+
+               request = i915_gem_active_peek(&active[idx],
+                                              &obj->base.dev->struct_mutex);
+               if (!request)
+                       continue;
+
+               ret = i915_gem_request_await_request(to, request);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       dev_priv->gt.active_engines |= intel_engine_flag(engine);
+       if (dev_priv->gt.awake)
+               return;
+
+       intel_runtime_pm_get_noresume(dev_priv);
+       dev_priv->gt.awake = true;
+
+       intel_enable_gt_powersave(dev_priv);
+       i915_update_gfx_val(dev_priv);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_busy(dev_priv);
+
+       queue_delayed_work(dev_priv->wq,
+                          &dev_priv->gt.retire_work,
+                          round_jiffies_up_relative(HZ));
+}
+
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
+{
+       struct intel_engine_cs *engine = request->engine;
+       struct intel_ring *ring = request->ring;
+       struct drm_i915_gem_request *prev;
+       u32 request_start;
+       u32 reserved_tail;
+       int ret;
+
+       trace_i915_gem_request_add(request);
+
+       /*
+        * To ensure that this call will not fail, space for its emissions
+        * should already have been reserved in the ring buffer. Let the ring
+        * know that it is time to use that space up.
+        */
+       request_start = ring->tail;
+       reserved_tail = request->reserved_space;
+       request->reserved_space = 0;
+
+       /*
+        * Emit any outstanding flushes - execbuf can fail to emit the flush
+        * after having emitted the batchbuffer command. Hence we need to fix
+        * things up similar to emitting the lazy request. The difference here
+        * is that the flush _must_ happen before the next request, no matter
+        * what.
+        */
+       if (flush_caches) {
+               ret = engine->emit_flush(request, EMIT_FLUSH);
+
+               /* Not allowed to fail! */
+               WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+       }
+
+       /* Record the position of the start of the breadcrumb so that
+        * should we detect the updated seqno part-way through the
+        * GPU processing the request, we never over-estimate the
+        * position of the ring's HEAD.
+        */
+       request->postfix = ring->tail;
+
+       /* Not allowed to fail! */
+       ret = engine->emit_request(request);
+       WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
+
+       /* Sanity check that the reserved size was large enough. */
+       ret = ring->tail - request_start;
+       if (ret < 0)
+               ret += ring->size;
+       WARN_ONCE(ret > reserved_tail,
+                 "Not enough space reserved (%d bytes) "
+                 "for adding the request (%d bytes)\n",
+                 reserved_tail, ret);
+
+       /* Seal the request and mark it as pending execution. Note that
+        * we may inspect this state, without holding any locks, during
+        * hangcheck. Hence we apply the barrier to ensure that we do not
+        * see a more recent value in the hws than we are tracking.
+        */
+
+       prev = i915_gem_active_raw(&engine->last_request,
+                                  &request->i915->drm.struct_mutex);
+       if (prev)
+               i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
+                                            &request->submitq);
+
+       request->emitted_jiffies = jiffies;
+       request->previous_seqno = engine->last_submitted_seqno;
+       engine->last_submitted_seqno = request->fence.seqno;
+       i915_gem_active_set(&engine->last_request, request);
+       list_add_tail(&request->link, &engine->request_list);
+       list_add_tail(&request->ring_link, &ring->request_list);
+
+       i915_gem_mark_busy(engine);
+
+       local_bh_disable();
+       i915_sw_fence_commit(&request->submit);
+       local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+}
+
+static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       if (list_empty(&wait->task_list))
+               __add_wait_queue(q, wait);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static unsigned long local_clock_us(unsigned int *cpu)
+{
+       unsigned long t;
+
+       /* Cheaply and approximately convert from nanoseconds to microseconds.
+        * The result and subsequent calculations are also defined in the same
+        * approximate microseconds units. The principal source of timing
+        * error here is from the simple truncation.
+        *
+        * Note that local_clock() is only defined wrt to the current CPU;
+        * the comparisons are no longer valid if we switch CPUs. Instead of
+        * blocking preemption for the entire busywait, we can detect the CPU
+        * switch and use that as indicator of system load and a reason to
+        * stop busywaiting, see busywait_stop().
+        */
+       *cpu = get_cpu();
+       t = local_clock() >> 10;
+       put_cpu();
+
+       return t;
+}
+
+static bool busywait_stop(unsigned long timeout, unsigned int cpu)
+{
+       unsigned int this_cpu;
+
+       if (time_after(local_clock_us(&this_cpu), timeout))
+               return true;
+
+       return this_cpu != cpu;
+}
+
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+                        int state, unsigned long timeout_us)
+{
+       unsigned int cpu;
+
+       /* When waiting for high frequency requests, e.g. during synchronous
+        * rendering split between the CPU and GPU, the finite amount of time
+        * required to set up the irq and wait upon it limits the response
+        * rate. By busywaiting on the request completion for a short while we
+        * can service the high frequency waits as quick as possible. However,
+        * if it is a slow request, we want to sleep as quickly as possible.
+        * The tradeoff between waiting and sleeping is roughly the time it
+        * takes to sleep on a request, on the order of a microsecond.
+        */
+
+       timeout_us += local_clock_us(&cpu);
+       do {
+               if (i915_gem_request_completed(req))
+                       return true;
+
+               if (signal_pending_state(state, current))
+                       break;
+
+               if (busywait_stop(timeout_us, cpu))
+                       break;
+
+               cpu_relax_lowlatency();
+       } while (!need_resched());
+
+       return false;
+}
+
+/**
+ * i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @flags: how to wait
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ * @rps: client to charge for RPS boosting
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
+ * Returns 0 if the request was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+int i915_wait_request(struct drm_i915_gem_request *req,
+                     unsigned int flags,
+                     s64 *timeout,
+                     struct intel_rps_client *rps)
+{
+       const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+       DEFINE_WAIT(reset);
+       struct intel_wait wait;
+       unsigned long timeout_remain;
+       int ret = 0;
+
+       might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+       GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+                  !!(flags & I915_WAIT_LOCKED));
+#endif
+
+       if (i915_gem_request_completed(req))
+               return 0;
+
+       timeout_remain = MAX_SCHEDULE_TIMEOUT;
+       if (timeout) {
+               if (WARN_ON(*timeout < 0))
+                       return -EINVAL;
+
+               if (*timeout == 0)
+                       return -ETIME;
+
+               /* Record current time in case interrupted, or wedged */
+               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
+               *timeout += ktime_get_raw_ns();
+       }
+
+       trace_i915_gem_request_wait_begin(req);
+
+       /* This client is about to stall waiting for the GPU. In many cases
+        * this is undesirable and limits the throughput of the system, as
+        * many clients cannot continue processing user input/output whilst
+        * blocked. RPS autotuning may take tens of milliseconds to respond
+        * to the GPU load and thus incurs additional latency for the client.
+        * We can circumvent that by promoting the GPU frequency to maximum
+        * before we wait. This makes the GPU throttle up much more quickly
+        * (good for benchmarks and user experience, e.g. window animations),
+        * but at a cost of spending more power processing the workload
+        * (bad for battery). Not all clients even want their results
+        * immediately and for them we should just let the GPU select its own
+        * frequency to maximise efficiency. To prevent a single client from
+        * forcing the clocks too high for the whole system, we only allow
+        * each client to waitboost once in a busy period.
+        */
+       if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
+               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+
+       /* Optimistic short spin before touching IRQs */
+       if (i915_spin_request(req, state, 5))
+               goto complete;
+
+       set_current_state(state);
+       if (flags & I915_WAIT_LOCKED)
+               add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+
+       intel_wait_init(&wait, req->fence.seqno);
+       if (intel_engine_add_wait(req->engine, &wait))
+               /* In order to check that we haven't missed the interrupt
+                * as we enabled it, we need to kick ourselves to do a
+                * coherent check on the seqno before we sleep.
+                */
+               goto wakeup;
+
+       for (;;) {
+               if (signal_pending_state(state, current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+
+               timeout_remain = io_schedule_timeout(timeout_remain);
+               if (timeout_remain == 0) {
+                       ret = -ETIME;
+                       break;
+               }
+
+               if (intel_wait_complete(&wait))
+                       break;
+
+               set_current_state(state);
+
+wakeup:
+               /* Carefully check if the request is complete, giving time
+                * for the seqno to be visible following the interrupt.
+                * We also have to check in case we are kicked by the GPU
+                * reset in order to drop the struct_mutex.
+                */
+               if (__i915_request_irq_complete(req))
+                       break;
+
+               /* If the GPU is hung, and we hold the lock, reset the GPU
+                * and then check for completion. On a full reset, the engine's
+                * HW seqno will be advanced passed us and we are complete.
+                * If we do a partial reset, we have to wait for the GPU to
+                * resume and update the breadcrumb.
+                *
+                * If we don't hold the mutex, we can just wait for the worker
+                * to come along and update the breadcrumb (either directly
+                * itself, or indirectly by recovering the GPU).
+                */
+               if (flags & I915_WAIT_LOCKED &&
+                   i915_reset_in_progress(&req->i915->gpu_error)) {
+                       __set_current_state(TASK_RUNNING);
+                       i915_reset(req->i915);
+                       reset_wait_queue(&req->i915->gpu_error.wait_queue,
+                                        &reset);
+                       continue;
+               }
+
+               /* Only spin if we know the GPU is processing this request */
+               if (i915_spin_request(req, state, 2))
+                       break;
+       }
+
+       intel_engine_remove_wait(req->engine, &wait);
+       if (flags & I915_WAIT_LOCKED)
+               remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+       __set_current_state(TASK_RUNNING);
+
+complete:
+       trace_i915_gem_request_wait_end(req);
+
+       if (timeout) {
+               *timeout -= ktime_get_raw_ns();
+               if (*timeout < 0)
+                       *timeout = 0;
+
+               /*
+                * Apparently ktime isn't accurate enough and occasionally has a
+                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+                * things up to make the test happy. We allow up to 1 jiffy.
+                *
+                * This is a regrssion from the timespec->ktime conversion.
+                */
+               if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+                       *timeout = 0;
+       }
+
+       if (IS_RPS_USER(rps) &&
+           req->fence.seqno == req->engine->last_submitted_seqno) {
+               /* The GPU is now idle and this client has stalled.
+                * Since no other client has submitted a request in the
+                * meantime, assume that this client is the only one
+                * supplying work to the GPU but is unable to keep that
+                * work supplied because it is waiting. Since the GPU is
+                * then never kept fully busy, RPS autoclocking will
+                * keep the clocks relatively low, causing further delays.
+                * Compensate by giving the synchronous client credit for
+                * a waitboost next time.
+                */
+               spin_lock(&req->i915->rps.client_lock);
+               list_del_init(&rps->link);
+               spin_unlock(&req->i915->rps.client_lock);
+       }
+
+       return ret;
+}
+
+static bool engine_retire_requests(struct intel_engine_cs *engine)
+{
+       struct drm_i915_gem_request *request, *next;
+
+       list_for_each_entry_safe(request, next, &engine->request_list, link) {
+               if (!i915_gem_request_completed(request))
+                       return false;
+
+               i915_gem_request_retire(request);
+       }
+
+       return true;
+}
+
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+       unsigned int tmp;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       if (dev_priv->gt.active_engines == 0)
+               return;
+
+       GEM_BUG_ON(!dev_priv->gt.awake);
+
+       for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
+               if (engine_retire_requests(engine))
+                       dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
+
+       if (dev_priv->gt.active_engines == 0)
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.idle_work,
+                                  msecs_to_jiffies(100));
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
new file mode 100644 (file)
index 0000000..974bd7b
--- /dev/null
@@ -0,0 +1,689 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_REQUEST_H
+#define I915_GEM_REQUEST_H
+
+#include <linux/fence.h>
+
+#include "i915_gem.h"
+#include "i915_sw_fence.h"
+
+struct intel_wait {
+       struct rb_node node;
+       struct task_struct *tsk;
+       u32 seqno;
+};
+
+struct intel_signal_node {
+       struct rb_node node;
+       struct intel_wait wait;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
+ *
+ * When modifying this structure be very aware that we perform a lockless
+ * RCU lookup of it that may race against reallocation of the struct
+ * from the slab freelist. We intentionally do not zero the structure on
+ * allocation so that the lookup can use the dangling pointers (and is
+ * cogniscent that those pointers may be wrong). Instead, everything that
+ * needs to be initialised must be done so explicitly.
+ *
+ * The requests are reference counted.
+ */
+struct drm_i915_gem_request {
+       struct fence fence;
+       spinlock_t lock;
+
+       /** On Which ring this request was generated */
+       struct drm_i915_private *i915;
+
+       /**
+        * Context and ring buffer related to this request
+        * Contexts are refcounted, so when this request is associated with a
+        * context, we must increment the context's refcount, to guarantee that
+        * it persists while any request is linked to it. Requests themselves
+        * are also refcounted, so the request will only be freed when the last
+        * reference to it is dismissed, and the code in
+        * i915_gem_request_free() will then decrement the refcount on the
+        * context.
+        */
+       struct i915_gem_context *ctx;
+       struct intel_engine_cs *engine;
+       struct intel_ring *ring;
+       struct intel_signal_node signaling;
+
+       struct i915_sw_fence submit;
+       wait_queue_t submitq;
+
+       /** GEM sequence number associated with the previous request,
+        * when the HWS breadcrumb is equal to this the GPU is processing
+        * this request.
+        */
+       u32 previous_seqno;
+
+       /** Position in the ring of the start of the request */
+       u32 head;
+
+       /**
+        * Position in the ring of the start of the postfix.
+        * This is required to calculate the maximum available ring space
+        * without overwriting the postfix.
+        */
+       u32 postfix;
+
+       /** Position in the ring of the end of the whole request */
+       u32 tail;
+
+       /** Position in the ring of the end of any workarounds after the tail */
+       u32 wa_tail;
+
+       /** Preallocate space in the ring for the emitting the request */
+       u32 reserved_space;
+
+       /**
+        * Context related to the previous request.
+        * As the contexts are accessed by the hardware until the switch is
+        * completed to a new context, the hardware may still be writing
+        * to the context object after the breadcrumb is visible. We must
+        * not unpin/unbind/prune that object whilst still active and so
+        * we keep the previous context pinned until the following (this)
+        * request is retired.
+        */
+       struct i915_gem_context *previous_context;
+
+       /** Batch buffer related to this request if any (used for
+        * error state dump only).
+        */
+       struct i915_vma *batch;
+       struct list_head active_list;
+
+       /** Time at which this request was emitted, in jiffies. */
+       unsigned long emitted_jiffies;
+
+       /** engine->request_list entry for this request */
+       struct list_head link;
+
+       /** ring->request_list entry for this request */
+       struct list_head ring_link;
+
+       struct drm_i915_file_private *file_priv;
+       /** file_priv list entry for this request */
+       struct list_head client_list;
+
+       /** Link in the execlist submission queue, guarded by execlist_lock. */
+       struct list_head execlist_link;
+};
+
+extern const struct fence_ops i915_fence_ops;
+
+static inline bool fence_is_i915(struct fence *fence)
+{
+       return fence->ops == &i915_fence_ops;
+}
+
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct i915_gem_context *ctx);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file);
+void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+
+static inline u32
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+       return req ? req->fence.seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_engine(struct drm_i915_gem_request *req)
+{
+       return req ? req->engine : NULL;
+}
+
+static inline struct drm_i915_gem_request *
+to_request(struct fence *fence)
+{
+       /* We assume that NULL fence/request are interoperable */
+       BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
+       GEM_BUG_ON(fence && !fence_is_i915(fence));
+       return container_of(fence, struct drm_i915_gem_request, fence);
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_get(struct drm_i915_gem_request *req)
+{
+       return to_request(fence_get(&req->fence));
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+{
+       return to_request(fence_get_rcu(&req->fence));
+}
+
+static inline void
+i915_gem_request_put(struct drm_i915_gem_request *req)
+{
+       fence_put(&req->fence);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+                                          struct drm_i915_gem_request *src)
+{
+       if (src)
+               i915_gem_request_get(src);
+
+       if (*pdst)
+               i915_gem_request_put(*pdst);
+
+       *pdst = src;
+}
+
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+                             struct drm_i915_gem_object *obj,
+                             bool write);
+
+void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
+#define i915_add_request(req) \
+       __i915_add_request(req, true)
+#define i915_add_request_no_flush(req) \
+       __i915_add_request(req, false)
+
+struct intel_rps_client;
+#define NO_WAITBOOST ERR_PTR(-1)
+#define IS_RPS_CLIENT(p) (!IS_ERR(p))
+#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
+
+int i915_wait_request(struct drm_i915_gem_request *req,
+                     unsigned int flags,
+                     s64 *timeout,
+                     struct intel_rps_client *rps)
+       __attribute__((nonnull(1)));
+#define I915_WAIT_INTERRUPTIBLE        BIT(0)
+#define I915_WAIT_LOCKED       BIT(1) /* struct_mutex held, handle GPU reset */
+
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
+{
+       return (s32)(seq1 - seq2) >= 0;
+}
+
+static inline bool
+i915_gem_request_started(const struct drm_i915_gem_request *req)
+{
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+                                req->previous_seqno);
+}
+
+static inline bool
+i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+                                req->fence.seqno);
+}
+
+bool __i915_spin_request(const struct drm_i915_gem_request *request,
+                        int state, unsigned long timeout_us);
+static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
+                                    int state, unsigned long timeout_us)
+{
+       return (i915_gem_request_started(request) &&
+               __i915_spin_request(request, state, timeout_us));
+}
+
+/* We treat requests as fences. This is not be to confused with our
+ * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
+ * We use the fences to synchronize access from the CPU with activity on the
+ * GPU, for example, we should not rewrite an object's PTE whilst the GPU
+ * is reading them. We also track fences at a higher level to provide
+ * implicit synchronisation around GEM objects, e.g. set-domain will wait
+ * for outstanding GPU rendering before marking the object ready for CPU
+ * access, or a pageflip will wait until the GPU is complete before showing
+ * the frame on the scanout.
+ *
+ * In order to use a fence, the object must track the fence it needs to
+ * serialise with. For example, GEM objects want to track both read and
+ * write access so that we can perform concurrent read operations between
+ * the CPU and GPU engines, as well as waiting for all rendering to
+ * complete, or waiting for the last GPU user of a "fence register". The
+ * object then embeds a #i915_gem_active to track the most recent (in
+ * retirement order) request relevant for the desired mode of access.
+ * The #i915_gem_active is updated with i915_gem_active_set() to track the
+ * most recent fence request, typically this is done as part of
+ * i915_vma_move_to_active().
+ *
+ * When the #i915_gem_active completes (is retired), it will
+ * signal its completion to the owner through a callback as well as mark
+ * itself as idle (i915_gem_active.request == NULL). The owner
+ * can then perform any action, such as delayed freeing of an active
+ * resource including itself.
+ */
+struct i915_gem_active;
+
+typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
+                                  struct drm_i915_gem_request *);
+
+struct i915_gem_active {
+       struct drm_i915_gem_request __rcu *request;
+       struct list_head link;
+       i915_gem_retire_fn retire;
+};
+
+void i915_gem_retire_noop(struct i915_gem_active *,
+                         struct drm_i915_gem_request *request);
+
+/**
+ * init_request_active - prepares the activity tracker for use
+ * @active - the active tracker
+ * @func - a callback when then the tracker is retired (becomes idle),
+ *         can be NULL
+ *
+ * init_request_active() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active request
+ * associated with it. When the last request becomes idle, when it is retired
+ * after completion, the optional callback @func is invoked.
+ */
+static inline void
+init_request_active(struct i915_gem_active *active,
+                   i915_gem_retire_fn retire)
+{
+       INIT_LIST_HEAD(&active->link);
+       active->retire = retire ?: i915_gem_retire_noop;
+}
+
+/**
+ * i915_gem_active_set - updates the tracker to watch the current request
+ * @active - the active tracker
+ * @request - the request to watch
+ *
+ * i915_gem_active_set() watches the given @request for completion. Whilst
+ * that @request is busy, the @active reports busy. When that @request is
+ * retired, the @active tracker is updated to report idle.
+ */
+static inline void
+i915_gem_active_set(struct i915_gem_active *active,
+                   struct drm_i915_gem_request *request)
+{
+       list_move(&active->link, &request->active_list);
+       rcu_assign_pointer(active->request, request);
+}
+
+static inline struct drm_i915_gem_request *
+__i915_gem_active_peek(const struct i915_gem_active *active)
+{
+       /* Inside the error capture (running with the driver in an unknown
+        * state), we want to bend the rules slightly (a lot).
+        *
+        * Work is in progress to make it safer, in the meantime this keeps
+        * the known issue from spamming the logs.
+        */
+       return rcu_dereference_protected(active->request, 1);
+}
+
+/**
+ * i915_gem_active_raw - return the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_raw() returns the current request being tracked, or NULL.
+ * It does not obtain a reference on the request for the caller, so the caller
+ * must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
+{
+       return rcu_dereference_protected(active->request,
+                                        lockdep_is_held(mutex));
+}
+
+/**
+ * i915_gem_active_peek - report the active request being monitored
+ * @active - the active tracker
+ *
+ * i915_gem_active_peek() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, so the caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
+{
+       struct drm_i915_gem_request *request;
+
+       request = i915_gem_active_raw(active, mutex);
+       if (!request || i915_gem_request_completed(request))
+               return NULL;
+
+       return request;
+}
+
+/**
+ * i915_gem_active_get - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
+{
+       return i915_gem_request_get(i915_gem_active_peek(active, mutex));
+}
+
+/**
+ * __i915_gem_active_get_rcu - return a reference to the active request
+ * @active - the active tracker
+ *
+ * __i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold the RCU read lock, but
+ * the returned pointer is safe to use outside of RCU.
+ */
+static inline struct drm_i915_gem_request *
+__i915_gem_active_get_rcu(const struct i915_gem_active *active)
+{
+       /* Performing a lockless retrieval of the active request is super
+        * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
+        * slab of request objects will not be freed whilst we hold the
+        * RCU read lock. It does not guarantee that the request itself
+        * will not be freed and then *reused*. Viz,
+        *
+        * Thread A                     Thread B
+        *
+        * req = active.request
+        *                              retire(req) -> free(req);
+        *                              (req is now first on the slab freelist)
+        *                              active.request = NULL
+        *
+        *                              req = new submission on a new object
+        * ref(req)
+        *
+        * To prevent the request from being reused whilst the caller
+        * uses it, we take a reference like normal. Whilst acquiring
+        * the reference we check that it is not in a destroyed state
+        * (refcnt == 0). That prevents the request being reallocated
+        * whilst the caller holds on to it. To check that the request
+        * was not reallocated as we acquired the reference we have to
+        * check that our request remains the active request across
+        * the lookup, in the same manner as a seqlock. The visibility
+        * of the pointer versus the reference counting is controlled
+        * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
+        *
+        * In the middle of all that, we inspect whether the request is
+        * complete. Retiring is lazy so the request may be completed long
+        * before the active tracker is updated. Querying whether the
+        * request is complete is far cheaper (as it involves no locked
+        * instructions setting cachelines to exclusive) than acquiring
+        * the reference, so we do it first. The RCU read lock ensures the
+        * pointer dereference is valid, but does not ensure that the
+        * seqno nor HWS is the right one! However, if the request was
+        * reallocated, that means the active tracker's request was complete.
+        * If the new request is also complete, then both are and we can
+        * just report the active tracker is idle. If the new request is
+        * incomplete, then we acquire a reference on it and check that
+        * it remained the active request.
+        *
+        * It is then imperative that we do not zero the request on
+        * reallocation, so that we can chase the dangling pointers!
+        * See i915_gem_request_alloc().
+        */
+       do {
+               struct drm_i915_gem_request *request;
+
+               request = rcu_dereference(active->request);
+               if (!request || i915_gem_request_completed(request))
+                       return NULL;
+
+               /* An especially silly compiler could decide to recompute the
+                * result of i915_gem_request_completed, more specifically
+                * re-emit the load for request->fence.seqno. A race would catch
+                * a later seqno value, which could flip the result from true to
+                * false. Which means part of the instructions below might not
+                * be executed, while later on instructions are executed. Due to
+                * barriers within the refcounting the inconsistency can't reach
+                * past the call to i915_gem_request_get_rcu, but not executing
+                * that while still executing i915_gem_request_put() creates
+                * havoc enough.  Prevent this with a compiler barrier.
+                */
+               barrier();
+
+               request = i915_gem_request_get_rcu(request);
+
+               /* What stops the following rcu_access_pointer() from occurring
+                * before the above i915_gem_request_get_rcu()? If we were
+                * to read the value before pausing to get the reference to
+                * the request, we may not notice a change in the active
+                * tracker.
+                *
+                * The rcu_access_pointer() is a mere compiler barrier, which
+                * means both the CPU and compiler are free to perform the
+                * memory read without constraint. The compiler only has to
+                * ensure that any operations after the rcu_access_pointer()
+                * occur afterwards in program order. This means the read may
+                * be performed earlier by an out-of-order CPU, or adventurous
+                * compiler.
+                *
+                * The atomic operation at the heart of
+                * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+                * atomic_inc_not_zero() which is only a full memory barrier
+                * when successful. That is, if i915_gem_request_get_rcu()
+                * returns the request (and so with the reference counted
+                * incremented) then the following read for rcu_access_pointer()
+                * must occur after the atomic operation and so confirm
+                * that this request is the one currently being tracked.
+                *
+                * The corresponding write barrier is part of
+                * rcu_assign_pointer().
+                */
+               if (!request || request == rcu_access_pointer(active->request))
+                       return rcu_pointer_handoff(request);
+
+               i915_gem_request_put(request);
+       } while (1);
+}
+
+/**
+ * i915_gem_active_get_unlocked - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get_unlocked() returns a reference to the active request,
+ * or NULL if the active tracker is idle. The reference is obtained under RCU,
+ * so no locking is required by the caller.
+ *
+ * The reference should be freed with i915_gem_request_put().
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get_unlocked(const struct i915_gem_active *active)
+{
+       struct drm_i915_gem_request *request;
+
+       rcu_read_lock();
+       request = __i915_gem_active_get_rcu(active);
+       rcu_read_unlock();
+
+       return request;
+}
+
+/**
+ * i915_gem_active_isset - report whether the active tracker is assigned
+ * @active - the active tracker
+ *
+ * i915_gem_active_isset() returns true if the active tracker is currently
+ * assigned to a request. Due to the lazy retiring, that request may be idle
+ * and this may report stale information.
+ */
+static inline bool
+i915_gem_active_isset(const struct i915_gem_active *active)
+{
+       return rcu_access_pointer(active->request);
+}
+
+/**
+ * i915_gem_active_is_idle - report whether the active tracker is idle
+ * @active - the active tracker
+ *
+ * i915_gem_active_is_idle() returns true if the active tracker is currently
+ * unassigned or if the request is complete (but not yet retired). Requires
+ * the caller to hold struct_mutex (but that can be relaxed if desired).
+ */
+static inline bool
+i915_gem_active_is_idle(const struct i915_gem_active *active,
+                       struct mutex *mutex)
+{
+       return !i915_gem_active_peek(active, mutex);
+}
+
+/**
+ * i915_gem_active_wait - waits until the request is completed
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_wait() waits until the request is completed before
+ * returning. Note that it does not guarantee that the request is
+ * retired first, see i915_gem_active_retire().
+ *
+ * i915_gem_active_wait() returns immediately if the active
+ * request is already complete.
+ */
+static inline int __must_check
+i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
+{
+       struct drm_i915_gem_request *request;
+
+       request = i915_gem_active_peek(active, mutex);
+       if (!request)
+               return 0;
+
+       return i915_wait_request(request,
+                                I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                                NULL, NULL);
+}
+
+/**
+ * i915_gem_active_wait_unlocked - waits until the request is completed
+ * @active - the active request on which to wait
+ * @flags - how to wait
+ * @timeout - how long to wait at most
+ * @rps - userspace client to charge for a waitboost
+ *
+ * i915_gem_active_wait_unlocked() waits until the request is completed before
+ * returning, without requiring any locks to be held. Note that it does not
+ * retire any requests before returning.
+ *
+ * This function relies on RCU in order to acquire the reference to the active
+ * request without holding any locks. See __i915_gem_active_get_rcu() for the
+ * glory details on how that is managed. Once the reference is acquired, we
+ * can then wait upon the request, and afterwards release our reference,
+ * free of any locking.
+ *
+ * This function wraps i915_wait_request(), see it for the full details on
+ * the arguments.
+ *
+ * Returns 0 if successful, or a negative error code.
+ */
+static inline int
+i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
+                             unsigned int flags,
+                             s64 *timeout,
+                             struct intel_rps_client *rps)
+{
+       struct drm_i915_gem_request *request;
+       int ret = 0;
+
+       request = i915_gem_active_get_unlocked(active);
+       if (request) {
+               ret = i915_wait_request(request, flags, timeout, rps);
+               i915_gem_request_put(request);
+       }
+
+       return ret;
+}
+
+/**
+ * i915_gem_active_retire - waits until the request is retired
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_retire() waits until the request is completed,
+ * and then ensures that at least the retirement handler for this
+ * @active tracker is called before returning. If the @active
+ * tracker is idle, the function returns immediately.
+ */
+static inline int __must_check
+i915_gem_active_retire(struct i915_gem_active *active,
+                      struct mutex *mutex)
+{
+       struct drm_i915_gem_request *request;
+       int ret;
+
+       request = i915_gem_active_raw(active, mutex);
+       if (!request)
+               return 0;
+
+       ret = i915_wait_request(request,
+                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                               NULL, NULL);
+       if (ret)
+               return ret;
+
+       list_del_init(&active->link);
+       RCU_INIT_POINTER(active->request, NULL);
+
+       active->retire(active, request);
+
+       return 0;
+}
+
+/* Convenience functions for peeking at state inside active's request whilst
+ * guarded by the struct_mutex.
+ */
+
+static inline uint32_t
+i915_gem_active_get_seqno(const struct i915_gem_active *active,
+                         struct mutex *mutex)
+{
+       return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
+}
+
+static inline struct intel_engine_cs *
+i915_gem_active_get_engine(const struct i915_gem_active *active,
+                          struct mutex *mutex)
+{
+       return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
+}
+
+#define for_each_active(mask, idx) \
+       for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
+
+#endif /* I915_GEM_REQUEST_H */
index 6f10b421487b843cc03f0defe666c06ef060fe81..1c237d02f30b1307deef2938f4a54e50629b68f6 100644 (file)
@@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 #endif
 }
 
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
-       int count = 0;
 
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (drm_mm_node_allocated(&vma->node))
-                       count++;
-               if (vma->pin_count)
-                       count++;
-       }
+       list_for_each_entry(vma, &obj->vma_list, obj_link)
+               if (i915_vma_is_pinned(vma))
+                       return true;
 
-       return count;
+       return false;
 }
 
 static bool swap_available(void)
@@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
         * to the GPU, simply unbinding from the GPU is not going to succeed
         * in releasing our pin count on the pages themselves.
         */
-       if (obj->pages_pin_count != num_vma_bound(obj))
+       if (obj->pages_pin_count > obj->bind_count)
+               return false;
+
+       if (any_vma_pinned(obj))
                return false;
 
        /* We can only return physical pages to the system if we can either
@@ -163,17 +162,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
         */
        for (phase = phases; phase->list; phase++) {
                struct list_head still_in_list;
+               struct drm_i915_gem_object *obj;
 
                if ((flags & phase->bit) == 0)
                        continue;
 
                INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(phase->list)) {
-                       struct drm_i915_gem_object *obj;
-                       struct i915_vma *vma, *v;
-
-                       obj = list_first_entry(phase->list,
-                                              typeof(*obj), global_list);
+               while (count < target &&
+                      (obj = list_first_entry_or_null(phase->list,
+                                                      typeof(*obj),
+                                                      global_list))) {
                        list_move_tail(&obj->global_list, &still_in_list);
 
                        if (flags & I915_SHRINK_PURGEABLE &&
@@ -184,24 +182,21 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                            !is_vmalloc_addr(obj->mapping))
                                continue;
 
-                       if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
+                       if ((flags & I915_SHRINK_ACTIVE) == 0 &&
+                           i915_gem_object_is_active(obj))
                                continue;
 
                        if (!can_release_pages(obj))
                                continue;
 
-                       drm_gem_object_reference(&obj->base);
+                       i915_gem_object_get(obj);
 
                        /* For the unbound phase, this should be a no-op! */
-                       list_for_each_entry_safe(vma, v,
-                                                &obj->vma_list, obj_link)
-                               if (i915_vma_unbind(vma))
-                                       break;
-
+                       i915_gem_object_unbind(obj);
                        if (i915_gem_object_put_pages(obj) == 0)
                                count += obj->base.size >> PAGE_SHIFT;
 
-                       drm_gem_object_unreference(&obj->base);
+                       i915_gem_object_put(obj);
                }
                list_splice(&still_in_list, phase->list);
        }
@@ -210,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                intel_runtime_pm_put(dev_priv);
 
        i915_gem_retire_requests(dev_priv);
+       /* expedite the RCU grace period to free some request slabs */
+       synchronize_rcu_expedited();
 
        return count;
 }
@@ -230,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
  */
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
-       return i915_gem_shrink(dev_priv, -1UL,
-                              I915_SHRINK_BOUND |
-                              I915_SHRINK_UNBOUND |
-                              I915_SHRINK_ACTIVE);
+       unsigned long freed;
+
+       freed = i915_gem_shrink(dev_priv, -1UL,
+                               I915_SHRINK_BOUND |
+                               I915_SHRINK_UNBOUND |
+                               I915_SHRINK_ACTIVE);
+       rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+
+       return freed;
 }
 
 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
@@ -242,9 +244,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
                        return false;
 
-               if (to_i915(dev)->mm.shrinker_no_lock_stealing)
-                       return false;
-
                *unlock = false;
        } else
                *unlock = true;
@@ -273,7 +272,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
                        count += obj->base.size >> PAGE_SHIFT;
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (!obj->active && can_release_pages(obj))
+               if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
@@ -321,17 +320,22 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
                                       struct shrinker_lock_uninterruptible *slu,
                                       int timeout_ms)
 {
-       unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
+       unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
+
+       do {
+               if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
+                   i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
+                       break;
 
-       while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
                schedule_timeout_killable(1);
                if (fatal_signal_pending(current))
                        return false;
-               if (--timeout == 0) {
+
+               if (time_after(jiffies, timeout)) {
                        pr_err("Unable to lock GPU to purge memory.\n");
                        return false;
                }
-       }
+       } while (1);
 
        slu->was_interruptible = dev_priv->mm.interruptible;
        dev_priv->mm.interruptible = false;
@@ -410,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gem_wait_for_idle(dev_priv);
+       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
        if (ret)
                goto out;
 
index 66be299a1486f4f656ba4e3970249a0e5e909aeb..59989e8ee5dc8b1834eee109e72eb02961e3934d 100644 (file)
@@ -92,6 +92,7 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct resource *r;
        u32 base;
@@ -111,33 +112,44 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen >= 3) {
                u32 bsm;
 
-               pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
+               pci_read_config_dword(pdev, INTEL_BSM, &bsm);
 
                base = bsm & INTEL_BSM_MASK;
        } else if (IS_I865G(dev)) {
+               u32 tseg_size = 0;
                u16 toud = 0;
+               u8 tmp;
 
-               /*
-                * FIXME is the graphics stolen memory region
-                * always at TOUD? Ie. is it always the last
-                * one to be allocated by the BIOS?
-                */
-               pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
+                                        I845_ESMRAMC, &tmp);
+
+               if (tmp & TSEG_ENABLE) {
+                       switch (tmp & I845_TSEG_SIZE_MASK) {
+                       case I845_TSEG_SIZE_512K:
+                               tseg_size = KB(512);
+                               break;
+                       case I845_TSEG_SIZE_1M:
+                               tseg_size = MB(1);
+                               break;
+                       }
+               }
+
+               pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
                                         I865_TOUD, &toud);
 
-               base = toud << 16;
+               base = (toud << 16) + tseg_size;
        } else if (IS_I85X(dev)) {
                u32 tseg_size = 0;
                u32 tom;
                u8 tmp;
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I85X_ESMRAMC, &tmp);
 
                if (tmp & TSEG_ENABLE)
                        tseg_size = MB(1);
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
                                         I85X_DRB3, &tmp);
                tom = tmp * MB(32);
 
@@ -147,7 +159,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                u32 tom;
                u8 tmp;
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I845_ESMRAMC, &tmp);
 
                if (tmp & TSEG_ENABLE) {
@@ -161,7 +173,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                        }
                }
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I830_DRB3, &tmp);
                tom = tmp * MB(32);
 
@@ -171,7 +183,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                u32 tom;
                u8 tmp;
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I830_ESMRAMC, &tmp);
 
                if (tmp & TSEG_ENABLE) {
@@ -181,7 +193,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                                tseg_size = KB(512);
                }
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I830_DRB3, &tmp);
                tom = tmp * MB(32);
 
@@ -685,7 +697,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        if (gtt_offset == I915_GTT_OFFSET_NONE)
                return obj;
 
-       vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
+       vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err;
@@ -698,24 +710,25 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
         */
        vma->node.start = gtt_offset;
        vma->node.size = size;
-       if (drm_mm_initialized(&ggtt->base.mm)) {
-               ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
-               if (ret) {
-                       DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-                       goto err;
-               }
 
-               vma->bound |= GLOBAL_BIND;
-               __i915_vma_set_map_and_fenceable(vma);
-               list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
+       ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
+       if (ret) {
+               DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+               goto err;
        }
 
+       vma->pages = obj->pages;
+       vma->flags |= I915_VMA_GLOBAL_BIND;
+       __i915_vma_set_map_and_fenceable(vma);
+       list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+       obj->bind_count++;
+
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
        i915_gem_object_pin_pages(obj);
 
        return obj;
 
 err:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        return NULL;
 }
index 8030199731dbb5856bdce6f38b9cd386550e98cf..a14b1e3d4c78709d1dc459ecd728d2eccb3212a9 100644 (file)
@@ -68,6 +68,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
        if (tiling_mode == I915_TILING_NONE)
                return true;
 
+       if (tiling_mode > I915_TILING_LAST)
+               return false;
+
        if (IS_GEN2(dev) ||
            (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
                tile_width = 128;
@@ -113,36 +116,58 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
        return true;
 }
 
-/* Is the current GTT allocation valid for the change in tiling? */
-static bool
-i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
 {
+       struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
        u32 size;
 
-       if (tiling_mode == I915_TILING_NONE)
-               return true;
-
-       if (INTEL_INFO(obj->base.dev)->gen >= 4)
+       if (!i915_vma_is_map_and_fenceable(vma))
                return true;
 
-       if (IS_GEN3(obj->base.dev)) {
-               if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
+       if (INTEL_GEN(dev_priv) == 3) {
+               if (vma->node.start & ~I915_FENCE_START_MASK)
                        return false;
        } else {
-               if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
+               if (vma->node.start & ~I830_FENCE_START_MASK)
                        return false;
        }
 
-       size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
-       if (i915_gem_obj_ggtt_size(obj) != size)
+       size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
+       if (vma->node.size < size)
                return false;
 
-       if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
+       if (vma->node.start & (size - 1))
                return false;
 
        return true;
 }
 
+/* Make the current GTT allocation valid for the change in tiling. */
+static int
+i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct i915_vma *vma;
+       int ret;
+
+       if (tiling_mode == I915_TILING_NONE)
+               return 0;
+
+       if (INTEL_GEN(dev_priv) >= 4)
+               return 0;
+
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (i915_vma_fence_prepare(vma, tiling_mode))
+                       continue;
+
+               ret = i915_vma_unbind(vma);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 /**
  * i915_gem_set_tiling - IOCTL handler to set tiling mode
  * @dev: DRM device
@@ -164,15 +189,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
        struct drm_i915_gem_set_tiling *args = data;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
-       int ret = 0;
+       int err = 0;
+
+       /* Make sure we don't cross-contaminate obj->tiling_and_stride */
+       BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL)
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
                return -ENOENT;
 
        if (!i915_tiling_ok(dev,
                            args->stride, obj->base.size, args->tiling_mode)) {
-               drm_gem_object_unreference_unlocked(&obj->base);
+               i915_gem_object_put_unlocked(obj);
                return -EINVAL;
        }
 
@@ -180,7 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 
        mutex_lock(&dev->struct_mutex);
        if (obj->pin_display || obj->framebuffer_references) {
-               ret = -EBUSY;
+               err = -EBUSY;
                goto err;
        }
 
@@ -213,8 +241,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                }
        }
 
-       if (args->tiling_mode != obj->tiling_mode ||
-           args->stride != obj->stride) {
+       if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
+           args->stride != i915_gem_object_get_stride(obj)) {
                /* We need to rebind the object if its current allocation
                 * no longer meets the alignment restrictions for its new
                 * tiling mode. Otherwise we can just leave it alone, but
@@ -227,34 +255,36 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                 * has to also include the unfenced register the GPU uses
                 * whilst executing a fenced command for an untiled object.
                 */
-               if (obj->map_and_fenceable &&
-                   !i915_gem_object_fence_ok(obj, args->tiling_mode))
-                       ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
 
-               if (ret == 0) {
+               err = i915_gem_object_fence_prepare(obj, args->tiling_mode);
+               if (!err) {
+                       struct i915_vma *vma;
+
                        if (obj->pages &&
                            obj->madv == I915_MADV_WILLNEED &&
                            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                                if (args->tiling_mode == I915_TILING_NONE)
                                        i915_gem_object_unpin_pages(obj);
-                               if (obj->tiling_mode == I915_TILING_NONE)
+                               if (!i915_gem_object_is_tiled(obj))
                                        i915_gem_object_pin_pages(obj);
                        }
 
-                       obj->fence_dirty =
-                               obj->last_fenced_req ||
-                               obj->fence_reg != I915_FENCE_REG_NONE;
+                       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+                               if (!vma->fence)
+                                       continue;
 
-                       obj->tiling_mode = args->tiling_mode;
-                       obj->stride = args->stride;
+                               vma->fence->dirty = true;
+                       }
+                       obj->tiling_and_stride =
+                               args->stride | args->tiling_mode;
 
                        /* Force the fence to be reacquired for GTT access */
                        i915_gem_release_mmap(obj);
                }
        }
        /* we have to maintain this existing ABI... */
-       args->stride = obj->stride;
-       args->tiling_mode = obj->tiling_mode;
+       args->stride = i915_gem_object_get_stride(obj);
+       args->tiling_mode = i915_gem_object_get_tiling(obj);
 
        /* Try to preallocate memory required to save swizzling on put-pages */
        if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -268,12 +298,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
        }
 
 err:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
 
        intel_runtime_pm_put(dev_priv);
 
-       return ret;
+       return err;
 }
 
 /**
@@ -297,14 +327,12 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL)
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
                return -ENOENT;
 
-       mutex_lock(&dev->struct_mutex);
-
-       args->tiling_mode = obj->tiling_mode;
-       switch (obj->tiling_mode) {
+       args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
+       switch (args->tiling_mode) {
        case I915_TILING_X:
                args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
                break;
@@ -328,8 +356,6 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
                args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
 
-       drm_gem_object_unreference(&obj->base);
-       mutex_unlock(&dev->struct_mutex);
-
+       i915_gem_object_put_unlocked(obj);
        return 0;
 }
index 2314c88323e39861152485ccfd1b8643ce8bc41f..e537930c64b53d5a18ebbf7fcb79be68f0114acc 100644 (file)
@@ -63,33 +63,12 @@ struct i915_mmu_object {
 
 static void wait_rendering(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-       int i, n;
-
-       if (!obj->active)
-               return;
-
-       n = 0;
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               struct drm_i915_gem_request *req;
-
-               req = obj->last_read_req[i];
-               if (req == NULL)
-                       continue;
-
-               requests[n++] = i915_gem_request_reference(req);
-       }
-
-       mutex_unlock(&dev->struct_mutex);
-
-       for (i = 0; i < n; i++)
-               __i915_wait_request(requests[i], false, NULL, NULL);
-
-       mutex_lock(&dev->struct_mutex);
+       unsigned long active = __I915_BO_ACTIVE(obj);
+       int idx;
 
-       for (i = 0; i < n; i++)
-               i915_gem_request_unreference(requests[i]);
+       for_each_active(active, idx)
+               i915_gem_active_wait_unlocked(&obj->last_read[idx],
+                                             0, NULL, NULL);
 }
 
 static void cancel_userptr(struct work_struct *work)
@@ -98,28 +77,19 @@ static void cancel_userptr(struct work_struct *work)
        struct drm_i915_gem_object *obj = mo->obj;
        struct drm_device *dev = obj->base.dev;
 
+       wait_rendering(obj);
+
        mutex_lock(&dev->struct_mutex);
        /* Cancel any active worker and force us to re-evaluate gup */
        obj->userptr.work = NULL;
 
        if (obj->pages != NULL) {
-               struct drm_i915_private *dev_priv = to_i915(dev);
-               struct i915_vma *vma, *tmp;
-               bool was_interruptible;
-
-               wait_rendering(obj);
-
-               was_interruptible = dev_priv->mm.interruptible;
-               dev_priv->mm.interruptible = false;
-
-               list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
-                       WARN_ON(i915_vma_unbind(vma));
+               /* We are inside a kthread context and can't be interrupted */
+               WARN_ON(i915_gem_object_unbind(obj));
                WARN_ON(i915_gem_object_put_pages(obj));
-
-               dev_priv->mm.interruptible = was_interruptible;
        }
 
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -572,12 +542,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                        }
                }
                obj->userptr.work = ERR_PTR(ret);
-               if (ret)
-                       __i915_gem_userptr_set_active(obj, false);
        }
 
        obj->userptr.workers--;
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
 
        release_pages(pvec, pinned, 0);
@@ -622,8 +590,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
        obj->userptr.work = &work->work;
        obj->userptr.workers++;
 
-       work->obj = obj;
-       drm_gem_object_reference(&obj->base);
+       work->obj = i915_gem_object_get(obj);
 
        work->task = current;
        get_task_struct(work->task);
@@ -659,15 +626,14 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
         * to the vma (discard or cloning) which should prevent the more
         * egregious cases from causing harm.
         */
-       if (IS_ERR(obj->userptr.work)) {
-               /* active flag will have been dropped already by the worker */
-               ret = PTR_ERR(obj->userptr.work);
-               obj->userptr.work = NULL;
-               return ret;
-       }
-       if (obj->userptr.work)
+
+       if (obj->userptr.work) {
                /* active flag should still be held for the pending work */
-               return -EAGAIN;
+               if (IS_ERR(obj->userptr.work))
+                       return PTR_ERR(obj->userptr.work);
+               else
+                       return -EAGAIN;
+       }
 
        /* Let the mmu-notifier know that we have begun and need cancellation */
        ret = __i915_gem_userptr_set_active(obj, true);
@@ -846,7 +812,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
                ret = drm_gem_handle_create(file, &obj->base, &handle);
 
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(&obj->base);
+       i915_gem_object_put_unlocked(obj);
        if (ret)
                return ret;
 
index 9d73d2216adc6925f09b97f76a71690daf213374..334f15df7c8d6788e51a466fc3452409c35cb64d 100644 (file)
@@ -30,9 +30,9 @@
 #include <generated/utsrelease.h>
 #include "i915_drv.h"
 
-static const char *ring_str(int ring)
+static const char *engine_str(int engine)
 {
-       switch (ring) {
+       switch (engine) {
        case RCS: return "render";
        case VCS: return "bsd";
        case BCS: return "blt";
@@ -42,16 +42,6 @@ static const char *ring_str(int ring)
        }
 }
 
-static const char *pin_flag(int pinned)
-{
-       if (pinned > 0)
-               return " P";
-       else if (pinned < 0)
-               return " p";
-       else
-               return "";
-}
-
 static const char *tiling_flag(int tiling)
 {
        switch (tiling) {
@@ -189,7 +179,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
 {
        int i;
 
-       err_printf(m, "  %s [%d]:\n", name, count);
+       err_printf(m, "%s [%d]:\n", name, count);
 
        while (count--) {
                err_printf(m, "    %08x_%08x %8u %02x %02x [ ",
@@ -202,13 +192,12 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
                        err_printf(m, "%02x ", err->rseqno[i]);
 
                err_printf(m, "] %02x", err->wseqno);
-               err_puts(m, pin_flag(err->pinned));
                err_puts(m, tiling_flag(err->tiling));
                err_puts(m, dirty_flag(err->dirty));
                err_puts(m, purgeable_flag(err->purgeable));
                err_puts(m, err->userptr ? " userptr" : "");
-               err_puts(m, err->ring != -1 ? " " : "");
-               err_puts(m, ring_str(err->ring));
+               err_puts(m, err->engine != -1 ? " " : "");
+               err_puts(m, engine_str(err->engine));
                err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
 
                if (err->name)
@@ -221,7 +210,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
        }
 }
 
-static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
 {
        switch (a) {
        case HANGCHECK_IDLE:
@@ -239,70 +228,74 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
        return "unknown";
 }
 
-static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
-                                 struct drm_device *dev,
-                                 struct drm_i915_error_state *error,
-                                 int ring_idx)
+static void error_print_engine(struct drm_i915_error_state_buf *m,
+                              struct drm_i915_error_engine *ee)
 {
-       struct drm_i915_error_ring *ring = &error->ring[ring_idx];
-
-       if (!ring->valid)
-               return;
-
-       err_printf(m, "%s command stream:\n", ring_str(ring_idx));
-       err_printf(m, "  START: 0x%08x\n", ring->start);
-       err_printf(m, "  HEAD:  0x%08x\n", ring->head);
-       err_printf(m, "  TAIL:  0x%08x\n", ring->tail);
-       err_printf(m, "  CTL:   0x%08x\n", ring->ctl);
-       err_printf(m, "  HWS:   0x%08x\n", ring->hws);
-       err_printf(m, "  ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
-       err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
-       err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
-       err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
-       if (INTEL_INFO(dev)->gen >= 4) {
-               err_printf(m, "  BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
-               err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
-               err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
+       err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
+       err_printf(m, "  START: 0x%08x\n", ee->start);
+       err_printf(m, "  HEAD:  0x%08x\n", ee->head);
+       err_printf(m, "  TAIL:  0x%08x\n", ee->tail);
+       err_printf(m, "  CTL:   0x%08x\n", ee->ctl);
+       err_printf(m, "  MODE:  0x%08x\n", ee->mode);
+       err_printf(m, "  HWS:   0x%08x\n", ee->hws);
+       err_printf(m, "  ACTHD: 0x%08x %08x\n",
+                  (u32)(ee->acthd>>32), (u32)ee->acthd);
+       err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
+       err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
+       err_printf(m, "  INSTDONE: 0x%08x\n", ee->instdone);
+       if (ee->batchbuffer) {
+               u64 start = ee->batchbuffer->gtt_offset;
+               u64 end = start + ee->batchbuffer->gtt_size;
+
+               err_printf(m, "  batch: [0x%08x_%08x, 0x%08x_%08x]\n",
+                          upper_32_bits(start), lower_32_bits(start),
+                          upper_32_bits(end), lower_32_bits(end));
        }
-       err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
-       err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
-                  lower_32_bits(ring->faddr));
-       if (INTEL_INFO(dev)->gen >= 6) {
-               err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
-               err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
+       if (INTEL_GEN(m->i915) >= 4) {
+               err_printf(m, "  BBADDR: 0x%08x_%08x\n",
+                          (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
+               err_printf(m, "  BB_STATE: 0x%08x\n", ee->bbstate);
+               err_printf(m, "  INSTPS: 0x%08x\n", ee->instps);
+       }
+       err_printf(m, "  INSTPM: 0x%08x\n", ee->instpm);
+       err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
+                  lower_32_bits(ee->faddr));
+       if (INTEL_GEN(m->i915) >= 6) {
+               err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
+               err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
                err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
-                          ring->semaphore_mboxes[0],
-                          ring->semaphore_seqno[0]);
+                          ee->semaphore_mboxes[0],
+                          ee->semaphore_seqno[0]);
                err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
-                          ring->semaphore_mboxes[1],
-                          ring->semaphore_seqno[1]);
-               if (HAS_VEBOX(dev)) {
+                          ee->semaphore_mboxes[1],
+                          ee->semaphore_seqno[1]);
+               if (HAS_VEBOX(m->i915)) {
                        err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
-                                  ring->semaphore_mboxes[2],
-                                  ring->semaphore_seqno[2]);
+                                  ee->semaphore_mboxes[2],
+                                  ee->semaphore_seqno[2]);
                }
        }
-       if (USES_PPGTT(dev)) {
-               err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+       if (USES_PPGTT(m->i915)) {
+               err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
 
-               if (INTEL_INFO(dev)->gen >= 8) {
+               if (INTEL_GEN(m->i915) >= 8) {
                        int i;
                        for (i = 0; i < 4; i++)
                                err_printf(m, "  PDP%d: 0x%016llx\n",
-                                          i, ring->vm_info.pdp[i]);
+                                          i, ee->vm_info.pdp[i]);
                } else {
                        err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
-                                  ring->vm_info.pp_dir_base);
+                                  ee->vm_info.pp_dir_base);
                }
        }
-       err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
-       err_printf(m, "  last_seqno: 0x%08x\n", ring->last_seqno);
-       err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
-       err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
-       err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
+       err_printf(m, "  seqno: 0x%08x\n", ee->seqno);
+       err_printf(m, "  last_seqno: 0x%08x\n", ee->last_seqno);
+       err_printf(m, "  waiting: %s\n", yesno(ee->waiting));
+       err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
+       err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
        err_printf(m, "  hangcheck: %s [%d]\n",
-                  hangcheck_action_to_str(ring->hangcheck_action),
-                  ring->hangcheck_score);
+                  hangcheck_action_to_str(ee->hangcheck_action),
+                  ee->hangcheck_score);
 }
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -328,11 +321,22 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
        }
 }
 
+static void err_print_capabilities(struct drm_i915_error_state_buf *m,
+                                  const struct intel_device_info *info)
+{
+#define PRINT_FLAG(x)  err_printf(m, #x ": %s\n", yesno(info->x))
+#define SEP_SEMICOLON ;
+       DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+#undef PRINT_FLAG
+#undef SEP_SEMICOLON
+}
+
 int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                            const struct i915_error_state_file_priv *error_priv)
 {
        struct drm_device *dev = error_priv->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_i915_error_state *error = error_priv->error;
        struct drm_i915_error_object *obj;
        int i, j, offset, elt;
@@ -347,27 +351,28 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
                   error->time.tv_usec);
        err_printf(m, "Kernel: " UTS_RELEASE "\n");
+       err_print_capabilities(m, &error->device_info);
        max_hangcheck_score = 0;
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               if (error->ring[i].hangcheck_score > max_hangcheck_score)
-                       max_hangcheck_score = error->ring[i].hangcheck_score;
+       for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+               if (error->engine[i].hangcheck_score > max_hangcheck_score)
+                       max_hangcheck_score = error->engine[i].hangcheck_score;
        }
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               if (error->ring[i].hangcheck_score == max_hangcheck_score &&
-                   error->ring[i].pid != -1) {
+       for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+               if (error->engine[i].hangcheck_score == max_hangcheck_score &&
+                   error->engine[i].pid != -1) {
                        err_printf(m, "Active process (on ring %s): %s [%d]\n",
-                                  ring_str(i),
-                                  error->ring[i].comm,
-                                  error->ring[i].pid);
+                                  engine_str(i),
+                                  error->engine[i].comm,
+                                  error->engine[i].pid);
                }
        }
        err_printf(m, "Reset count: %u\n", error->reset_count);
        err_printf(m, "Suspend count: %u\n", error->suspend_count);
-       err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
-       err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+       err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
+       err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
        err_printf(m, "PCI Subsystem: %04x:%04x\n",
-                  dev->pdev->subsystem_vendor,
-                  dev->pdev->subsystem_device);
+                  pdev->subsystem_vendor,
+                  pdev->subsystem_device);
        err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
        if (HAS_CSR(dev)) {
@@ -414,36 +419,55 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        if (IS_GEN7(dev))
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++)
-               i915_ring_error_state(m, dev, error, i);
+       for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+               if (error->engine[i].engine_id != -1)
+                       error_print_engine(m, &error->engine[i]);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
+               char buf[128];
+               int len, first = 1;
+
+               if (!error->active_vm[i])
+                       break;
 
-       for (i = 0; i < error->vm_count; i++) {
-               err_printf(m, "vm[%d]\n", i);
+               len = scnprintf(buf, sizeof(buf), "Active (");
+               for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
+                       if (error->engine[j].vm != error->active_vm[i])
+                               continue;
 
-               print_error_buffers(m, "Active",
+                       len += scnprintf(buf + len, sizeof(buf), "%s%s",
+                                        first ? "" : ", ",
+                                        dev_priv->engine[j].name);
+                       first = 0;
+               }
+               scnprintf(buf + len, sizeof(buf), ")");
+               print_error_buffers(m, buf,
                                    error->active_bo[i],
                                    error->active_bo_count[i]);
-
-               print_error_buffers(m, "Pinned",
-                                   error->pinned_bo[i],
-                                   error->pinned_bo_count[i]);
        }
 
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               obj = error->ring[i].batchbuffer;
+       print_error_buffers(m, "Pinned (global)",
+                           error->pinned_bo,
+                           error->pinned_bo_count);
+
+       for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+               struct drm_i915_error_engine *ee = &error->engine[i];
+
+               obj = ee->batchbuffer;
                if (obj) {
                        err_puts(m, dev_priv->engine[i].name);
-                       if (error->ring[i].pid != -1)
+                       if (ee->pid != -1)
                                err_printf(m, " (submitted by %s [%d])",
-                                          error->ring[i].comm,
-                                          error->ring[i].pid);
+                                          ee->comm,
+                                          ee->pid);
                        err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
                                   upper_32_bits(obj->gtt_offset),
                                   lower_32_bits(obj->gtt_offset));
                        print_error_obj(m, obj);
                }
 
-               obj = error->ring[i].wa_batchbuffer;
+               obj = ee->wa_batchbuffer;
                if (obj) {
                        err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
                                   dev_priv->engine[i].name,
@@ -451,38 +475,43 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                        print_error_obj(m, obj);
                }
 
-               if (error->ring[i].num_requests) {
+               if (ee->num_requests) {
                        err_printf(m, "%s --- %d requests\n",
                                   dev_priv->engine[i].name,
-                                  error->ring[i].num_requests);
-                       for (j = 0; j < error->ring[i].num_requests; j++) {
-                               err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
-                                          error->ring[i].requests[j].seqno,
-                                          error->ring[i].requests[j].jiffies,
-                                          error->ring[i].requests[j].tail);
+                                  ee->num_requests);
+                       for (j = 0; j < ee->num_requests; j++) {
+                               err_printf(m, "  pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
+                                          ee->requests[j].pid,
+                                          ee->requests[j].seqno,
+                                          ee->requests[j].jiffies,
+                                          ee->requests[j].head,
+                                          ee->requests[j].tail);
                        }
                }
 
-               if (error->ring[i].num_waiters) {
+               if (IS_ERR(ee->waiters)) {
+                       err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
+                                  dev_priv->engine[i].name);
+               } else if (ee->num_waiters) {
                        err_printf(m, "%s --- %d waiters\n",
                                   dev_priv->engine[i].name,
-                                  error->ring[i].num_waiters);
-                       for (j = 0; j < error->ring[i].num_waiters; j++) {
+                                  ee->num_waiters);
+                       for (j = 0; j < ee->num_waiters; j++) {
                                err_printf(m, " seqno 0x%08x for %s [%d]\n",
-                                          error->ring[i].waiters[j].seqno,
-                                          error->ring[i].waiters[j].comm,
-                                          error->ring[i].waiters[j].pid);
+                                          ee->waiters[j].seqno,
+                                          ee->waiters[j].comm,
+                                          ee->waiters[j].pid);
                        }
                }
 
-               if ((obj = error->ring[i].ringbuffer)) {
+               if ((obj = ee->ringbuffer)) {
                        err_printf(m, "%s --- ringbuffer = 0x%08x\n",
                                   dev_priv->engine[i].name,
                                   lower_32_bits(obj->gtt_offset));
                        print_error_obj(m, obj);
                }
 
-               if ((obj = error->ring[i].hws_page)) {
+               if ((obj = ee->hws_page)) {
                        u64 hws_offset = obj->gtt_offset;
                        u32 *hws_page = &obj->pages[0][0];
 
@@ -504,7 +533,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                        }
                }
 
-               obj = error->ring[i].wa_ctx;
+               obj = ee->wa_ctx;
                if (obj) {
                        u64 wa_ctx_offset = obj->gtt_offset;
                        u32 *wa_ctx_page = &obj->pages[0][0];
@@ -526,7 +555,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                        }
                }
 
-               if ((obj = error->ring[i].ctx)) {
+               if ((obj = ee->ctx)) {
                        err_printf(m, "%s --- HW Context = 0x%08x\n",
                                   dev_priv->engine[i].name,
                                   lower_32_bits(obj->gtt_offset));
@@ -534,7 +563,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                }
        }
 
-       if ((obj = error->semaphore_obj)) {
+       if ((obj = error->semaphore)) {
                err_printf(m, "Semaphore page = 0x%08x\n",
                           lower_32_bits(obj->gtt_offset));
                for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
@@ -611,26 +640,27 @@ static void i915_error_state_free(struct kref *error_ref)
                                                          typeof(*error), ref);
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               i915_error_object_free(error->ring[i].batchbuffer);
-               i915_error_object_free(error->ring[i].wa_batchbuffer);
-               i915_error_object_free(error->ring[i].ringbuffer);
-               i915_error_object_free(error->ring[i].hws_page);
-               i915_error_object_free(error->ring[i].ctx);
-               i915_error_object_free(error->ring[i].wa_ctx);
-               kfree(error->ring[i].requests);
-               kfree(error->ring[i].waiters);
+       for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+               struct drm_i915_error_engine *ee = &error->engine[i];
+
+               i915_error_object_free(ee->batchbuffer);
+               i915_error_object_free(ee->wa_batchbuffer);
+               i915_error_object_free(ee->ringbuffer);
+               i915_error_object_free(ee->hws_page);
+               i915_error_object_free(ee->ctx);
+               i915_error_object_free(ee->wa_ctx);
+
+               kfree(ee->requests);
+               if (!IS_ERR_OR_NULL(ee->waiters))
+                       kfree(ee->waiters);
        }
 
-       i915_error_object_free(error->semaphore_obj);
+       i915_error_object_free(error->semaphore);
 
-       for (i = 0; i < error->vm_count; i++)
+       for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
                kfree(error->active_bo[i]);
-
-       kfree(error->active_bo);
-       kfree(error->active_bo_count);
        kfree(error->pinned_bo);
-       kfree(error->pinned_bo_count);
+
        kfree(error->overlay);
        kfree(error->display);
        kfree(error);
@@ -638,46 +668,45 @@ static void i915_error_state_free(struct kref *error_ref)
 
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_i915_private *dev_priv,
-                        struct drm_i915_gem_object *src,
-                        struct i915_address_space *vm)
+                        struct i915_vma *vma)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_i915_gem_object *src;
        struct drm_i915_error_object *dst;
-       struct i915_vma *vma = NULL;
        int num_pages;
        bool use_ggtt;
        int i = 0;
        u64 reloc_offset;
 
-       if (src == NULL || src->pages == NULL)
+       if (!vma)
+               return NULL;
+
+       src = vma->obj;
+       if (!src->pages)
                return NULL;
 
        num_pages = src->base.size >> PAGE_SHIFT;
 
        dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
-       if (dst == NULL)
+       if (!dst)
                return NULL;
 
-       if (i915_gem_obj_bound(src, vm))
-               dst->gtt_offset = i915_gem_obj_offset(src, vm);
-       else
-               dst->gtt_offset = -1;
+       dst->gtt_offset = vma->node.start;
+       dst->gtt_size = vma->node.size;
 
        reloc_offset = dst->gtt_offset;
-       if (i915_is_ggtt(vm))
-               vma = i915_gem_obj_to_ggtt(src);
        use_ggtt = (src->cache_level == I915_CACHE_NONE &&
-                  vma && (vma->bound & GLOBAL_BIND) &&
+                  (vma->flags & I915_VMA_GLOBAL_BIND) &&
                   reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
 
        /* Cannot access stolen address directly, try to use the aperture */
        if (src->stolen) {
                use_ggtt = true;
 
-               if (!(vma && vma->bound & GLOBAL_BIND))
+               if (!(vma->flags & I915_VMA_GLOBAL_BIND))
                        goto unwind;
 
-               reloc_offset = i915_gem_obj_ggtt_offset(src);
+               reloc_offset = vma->node.start;
                if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
                        goto unwind;
        }
@@ -705,7 +734,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                         * captures what the GPU read.
                         */
 
-                       s = io_mapping_map_atomic_wc(ggtt->mappable,
+                       s = io_mapping_map_atomic_wc(&ggtt->mappable,
                                                     reloc_offset);
                        memcpy_fromio(d, s, PAGE_SIZE);
                        io_mapping_unmap_atomic(s);
@@ -737,8 +766,24 @@ unwind:
        kfree(dst);
        return NULL;
 }
-#define i915_error_ggtt_object_create(dev_priv, src) \
-       i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
+
+/* The error capture is special as tries to run underneath the normal
+ * locking rules - so we use the raw version of the i915_gem_active lookup.
+ */
+static inline uint32_t
+__active_get_seqno(struct i915_gem_active *active)
+{
+       return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+}
+
+static inline int
+__active_get_engine_id(struct i915_gem_active *active)
+{
+       struct intel_engine_cs *engine;
+
+       engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
+       return engine ? engine->id : -1;
+}
 
 static void capture_bo(struct drm_i915_error_buffer *err,
                       struct i915_vma *vma)
@@ -748,32 +793,34 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 
        err->size = obj->base.size;
        err->name = obj->base.name;
+
        for (i = 0; i < I915_NUM_ENGINES; i++)
-               err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
-       err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
+               err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
+       err->wseqno = __active_get_seqno(&obj->last_write);
+       err->engine = __active_get_engine_id(&obj->last_write);
+
        err->gtt_offset = vma->node.start;
        err->read_domains = obj->base.read_domains;
        err->write_domain = obj->base.write_domain;
-       err->fence_reg = obj->fence_reg;
-       err->pinned = 0;
-       if (i915_gem_obj_is_pinned(obj))
-               err->pinned = 1;
-       err->tiling = obj->tiling_mode;
+       err->fence_reg = vma->fence ? vma->fence->id : -1;
+       err->tiling = i915_gem_object_get_tiling(obj);
        err->dirty = obj->dirty;
        err->purgeable = obj->madv != I915_MADV_WILLNEED;
        err->userptr = obj->userptr.mm != NULL;
-       err->ring = obj->last_write_req ?
-                       i915_gem_request_get_engine(obj->last_write_req)->id : -1;
        err->cache_level = obj->cache_level;
 }
 
-static u32 capture_active_bo(struct drm_i915_error_buffer *err,
-                            int count, struct list_head *head)
+static u32 capture_error_bo(struct drm_i915_error_buffer *err,
+                           int count, struct list_head *head,
+                           bool pinned_only)
 {
        struct i915_vma *vma;
        int i = 0;
 
        list_for_each_entry(vma, head, vm_link) {
+               if (pinned_only && !i915_vma_is_pinned(vma))
+                       continue;
+
                capture_bo(err++, vma);
                if (++i == count)
                        break;
@@ -782,28 +829,6 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
        return i;
 }
 
-static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
-                            int count, struct list_head *head,
-                            struct i915_address_space *vm)
-{
-       struct drm_i915_gem_object *obj;
-       struct drm_i915_error_buffer * const first = err;
-       struct drm_i915_error_buffer * const last = err + count;
-
-       list_for_each_entry(obj, head, global_list) {
-               struct i915_vma *vma;
-
-               if (err == last)
-                       break;
-
-               list_for_each_entry(vma, &obj->vma_list, obj_link)
-                       if (vma->vm == vm && vma->pin_count > 0)
-                               capture_bo(err++, vma);
-       }
-
-       return err - first;
-}
-
 /* Generate a semi-unique error code. The code is not meant to have meaning, The
  * code's only purpose is to try to prevent false duplicated bug reports by
  * grossly estimating a GPU error state.
@@ -815,7 +840,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  */
 static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
                                         struct drm_i915_error_state *error,
-                                        int *ring_id)
+                                        int *engine_id)
 {
        uint32_t error_code = 0;
        int i;
@@ -826,11 +851,11 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
         * strictly a client bug. Use instdone to differentiate those some.
         */
        for (i = 0; i < I915_NUM_ENGINES; i++) {
-               if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
-                       if (ring_id)
-                               *ring_id = i;
+               if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
+                       if (engine_id)
+                               *engine_id = i;
 
-                       return error->ring[i].ipehr ^ error->ring[i].instdone;
+                       return error->engine[i].ipehr ^ error->engine[i].instdone;
                }
        }
 
@@ -855,22 +880,17 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
 }
 
 
-static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
-                                       struct drm_i915_error_state *error,
+static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
                                        struct intel_engine_cs *engine,
-                                       struct drm_i915_error_ring *ering)
+                                       struct drm_i915_error_engine *ee)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        struct intel_engine_cs *to;
        enum intel_engine_id id;
 
-       if (!i915_semaphore_is_enabled(dev_priv))
+       if (!error->semaphore)
                return;
 
-       if (!error->semaphore_obj)
-               error->semaphore_obj =
-                       i915_error_ggtt_object_create(dev_priv,
-                                                     dev_priv->semaphore_obj);
-
        for_each_engine_id(to, dev_priv, id) {
                int idx;
                u16 signal_offset;
@@ -879,44 +899,52 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
                if (engine == to)
                        continue;
 
-               signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
-                               / 4;
-               tmp = error->semaphore_obj->pages[0];
-               idx = intel_ring_sync_index(engine, to);
+               signal_offset =
+                       (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
+               tmp = error->semaphore->pages[0];
+               idx = intel_engine_sync_index(engine, to);
 
-               ering->semaphore_mboxes[idx] = tmp[signal_offset];
-               ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
+               ee->semaphore_mboxes[idx] = tmp[signal_offset];
+               ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
        }
 }
 
-static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
-                                       struct intel_engine_cs *engine,
-                                       struct drm_i915_error_ring *ering)
+static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
+                                       struct drm_i915_error_engine *ee)
 {
-       ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
-       ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
-       ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
-       ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
+       ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
+       ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
+       ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
 
        if (HAS_VEBOX(dev_priv)) {
-               ering->semaphore_mboxes[2] =
+               ee->semaphore_mboxes[2] =
                        I915_READ(RING_SYNC_2(engine->mmio_base));
-               ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
+               ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
        }
 }
 
-static void engine_record_waiters(struct intel_engine_cs *engine,
-                                 struct drm_i915_error_ring *ering)
+static void error_record_engine_waiters(struct intel_engine_cs *engine,
+                                       struct drm_i915_error_engine *ee)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
        struct drm_i915_error_waiter *waiter;
        struct rb_node *rb;
        int count;
 
-       ering->num_waiters = 0;
-       ering->waiters = NULL;
+       ee->num_waiters = 0;
+       ee->waiters = NULL;
+
+       if (RB_EMPTY_ROOT(&b->waiters))
+               return;
+
+       if (!spin_trylock(&b->lock)) {
+               ee->waiters = ERR_PTR(-EDEADLK);
+               return;
+       }
 
-       spin_lock(&b->lock);
        count = 0;
        for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
                count++;
@@ -930,9 +958,13 @@ static void engine_record_waiters(struct intel_engine_cs *engine,
        if (!waiter)
                return;
 
-       ering->waiters = waiter;
+       if (!spin_trylock(&b->lock)) {
+               kfree(waiter);
+               ee->waiters = ERR_PTR(-EDEADLK);
+               return;
+       }
 
-       spin_lock(&b->lock);
+       ee->waiters = waiter;
        for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
                struct intel_wait *w = container_of(rb, typeof(*w), node);
 
@@ -941,57 +973,59 @@ static void engine_record_waiters(struct intel_engine_cs *engine,
                waiter->seqno = w->seqno;
                waiter++;
 
-               if (++ering->num_waiters == count)
+               if (++ee->num_waiters == count)
                        break;
        }
        spin_unlock(&b->lock);
 }
 
-static void i915_record_ring_state(struct drm_i915_private *dev_priv,
-                                  struct drm_i915_error_state *error,
-                                  struct intel_engine_cs *engine,
-                                  struct drm_i915_error_ring *ering)
+static void error_record_engine_registers(struct drm_i915_error_state *error,
+                                         struct intel_engine_cs *engine,
+                                         struct drm_i915_error_engine *ee)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
+
        if (INTEL_GEN(dev_priv) >= 6) {
-               ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
-               ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
+               ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
+               ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
                if (INTEL_GEN(dev_priv) >= 8)
-                       gen8_record_semaphore_state(dev_priv, error, engine,
-                                                   ering);
+                       gen8_record_semaphore_state(error, engine, ee);
                else
-                       gen6_record_semaphore_state(dev_priv, engine, ering);
+                       gen6_record_semaphore_state(engine, ee);
        }
 
        if (INTEL_GEN(dev_priv) >= 4) {
-               ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
-               ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
-               ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-               ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
-               ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
-               ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+               ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+               ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
+               ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+               ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
+               ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
+               ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
                if (INTEL_GEN(dev_priv) >= 8) {
-                       ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
-                       ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
+                       ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
+                       ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
                }
-               ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
+               ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
        } else {
-               ering->faddr = I915_READ(DMA_FADD_I8XX);
-               ering->ipeir = I915_READ(IPEIR);
-               ering->ipehr = I915_READ(IPEHR);
-               ering->instdone = I915_READ(GEN2_INSTDONE);
+               ee->faddr = I915_READ(DMA_FADD_I8XX);
+               ee->ipeir = I915_READ(IPEIR);
+               ee->ipehr = I915_READ(IPEHR);
+               ee->instdone = I915_READ(GEN2_INSTDONE);
        }
 
-       ering->waiting = intel_engine_has_waiter(engine);
-       ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
-       ering->acthd = intel_ring_get_active_head(engine);
-       ering->seqno = intel_engine_get_seqno(engine);
-       ering->last_seqno = engine->last_submitted_seqno;
-       ering->start = I915_READ_START(engine);
-       ering->head = I915_READ_HEAD(engine);
-       ering->tail = I915_READ_TAIL(engine);
-       ering->ctl = I915_READ_CTL(engine);
-
-       if (I915_NEED_GFX_HWS(dev_priv)) {
+       ee->waiting = intel_engine_has_waiter(engine);
+       ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+       ee->acthd = intel_engine_get_active_head(engine);
+       ee->seqno = intel_engine_get_seqno(engine);
+       ee->last_seqno = engine->last_submitted_seqno;
+       ee->start = I915_READ_START(engine);
+       ee->head = I915_READ_HEAD(engine);
+       ee->tail = I915_READ_TAIL(engine);
+       ee->ctl = I915_READ_CTL(engine);
+       if (INTEL_GEN(dev_priv) > 2)
+               ee->mode = I915_READ_MODE(engine);
+
+       if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
                i915_reg_t mmio;
 
                if (IS_GEN7(dev_priv)) {
@@ -1017,107 +1051,150 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
                        mmio = RING_HWS_PGA(engine->mmio_base);
                }
 
-               ering->hws = I915_READ(mmio);
+               ee->hws = I915_READ(mmio);
        }
 
-       ering->hangcheck_score = engine->hangcheck.score;
-       ering->hangcheck_action = engine->hangcheck.action;
+       ee->hangcheck_score = engine->hangcheck.score;
+       ee->hangcheck_action = engine->hangcheck.action;
 
        if (USES_PPGTT(dev_priv)) {
                int i;
 
-               ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
+               ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
                if (IS_GEN6(dev_priv))
-                       ering->vm_info.pp_dir_base =
+                       ee->vm_info.pp_dir_base =
                                I915_READ(RING_PP_DIR_BASE_READ(engine));
                else if (IS_GEN7(dev_priv))
-                       ering->vm_info.pp_dir_base =
+                       ee->vm_info.pp_dir_base =
                                I915_READ(RING_PP_DIR_BASE(engine));
                else if (INTEL_GEN(dev_priv) >= 8)
                        for (i = 0; i < 4; i++) {
-                               ering->vm_info.pdp[i] =
+                               ee->vm_info.pdp[i] =
                                        I915_READ(GEN8_RING_PDP_UDW(engine, i));
-                               ering->vm_info.pdp[i] <<= 32;
-                               ering->vm_info.pdp[i] |=
+                               ee->vm_info.pdp[i] <<= 32;
+                               ee->vm_info.pdp[i] |=
                                        I915_READ(GEN8_RING_PDP_LDW(engine, i));
                        }
        }
 }
 
-
-static void i915_gem_record_active_context(struct intel_engine_cs *engine,
-                                          struct drm_i915_error_state *error,
-                                          struct drm_i915_error_ring *ering)
+static void engine_record_requests(struct intel_engine_cs *engine,
+                                  struct drm_i915_gem_request *first,
+                                  struct drm_i915_error_engine *ee)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_request *request;
+       int count;
 
-       /* Currently render ring is the only HW context user */
-       if (engine->id != RCS || !error->ccid)
+       count = 0;
+       request = first;
+       list_for_each_entry_from(request, &engine->request_list, link)
+               count++;
+       if (!count)
                return;
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (!i915_gem_obj_ggtt_bound(obj))
-                       continue;
+       ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
+       if (!ee->requests)
+               return;
 
-               if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
-                       ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
+       ee->num_requests = count;
+
+       count = 0;
+       request = first;
+       list_for_each_entry_from(request, &engine->request_list, link) {
+               struct drm_i915_error_request *erq;
+
+               if (count >= ee->num_requests) {
+                       /*
+                        * If the ring request list was changed in
+                        * between the point where the error request
+                        * list was created and dimensioned and this
+                        * point then just exit early to avoid crashes.
+                        *
+                        * We don't need to communicate that the
+                        * request list changed state during error
+                        * state capture and that the error state is
+                        * slightly incorrect as a consequence since we
+                        * are typically only interested in the request
+                        * list state at the point of error state
+                        * capture, not in any changes happening during
+                        * the capture.
+                        */
                        break;
                }
+
+               erq = &ee->requests[count++];
+               erq->seqno = request->fence.seqno;
+               erq->jiffies = request->emitted_jiffies;
+               erq->head = request->head;
+               erq->tail = request->tail;
+
+               rcu_read_lock();
+               erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+               rcu_read_unlock();
        }
+       ee->num_requests = count;
 }
 
 static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
                                  struct drm_i915_error_state *error)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct drm_i915_gem_request *request;
-       int i, count;
+       int i;
+
+       error->semaphore =
+               i915_error_object_create(dev_priv, dev_priv->semaphore);
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct intel_engine_cs *engine = &dev_priv->engine[i];
+               struct drm_i915_error_engine *ee = &error->engine[i];
+               struct drm_i915_gem_request *request;
 
-               error->ring[i].pid = -1;
+               ee->pid = -1;
+               ee->engine_id = -1;
 
                if (!intel_engine_initialized(engine))
                        continue;
 
-               error->ring[i].valid = true;
+               ee->engine_id = i;
 
-               i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
-               engine_record_waiters(engine, &error->ring[i]);
+               error_record_engine_registers(error, engine, ee);
+               error_record_engine_waiters(engine, ee);
 
                request = i915_gem_find_active_request(engine);
                if (request) {
-                       struct i915_address_space *vm;
-                       struct intel_ringbuffer *rb;
+                       struct intel_ring *ring;
+                       struct pid *pid;
 
-                       vm = request->ctx->ppgtt ?
+                       ee->vm = request->ctx->ppgtt ?
                                &request->ctx->ppgtt->base : &ggtt->base;
 
                        /* We need to copy these to an anonymous buffer
                         * as the simplest method to avoid being overwritten
                         * by userspace.
                         */
-                       error->ring[i].batchbuffer =
+                       ee->batchbuffer =
                                i915_error_object_create(dev_priv,
-                                                        request->batch_obj,
-                                                        vm);
+                                                        request->batch);
 
                        if (HAS_BROKEN_CS_TLB(dev_priv))
-                               error->ring[i].wa_batchbuffer =
-                                       i915_error_ggtt_object_create(dev_priv,
-                                                            engine->scratch.obj);
+                               ee->wa_batchbuffer =
+                                       i915_error_object_create(dev_priv,
+                                                                engine->scratch);
+
+                       ee->ctx =
+                               i915_error_object_create(dev_priv,
+                                                        request->ctx->engine[i].state);
 
-                       if (request->pid) {
+                       pid = request->ctx->pid;
+                       if (pid) {
                                struct task_struct *task;
 
                                rcu_read_lock();
-                               task = pid_task(request->pid, PIDTYPE_PID);
+                               task = pid_task(pid, PIDTYPE_PID);
                                if (task) {
-                                       strcpy(error->ring[i].comm, task->comm);
-                                       error->ring[i].pid = task->pid;
+                                       strcpy(ee->comm, task->comm);
+                                       ee->pid = task->pid;
                                }
                                rcu_read_unlock();
                        }
@@ -1125,153 +1202,106 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
                        error->simulated |=
                                request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
 
-                       rb = request->ringbuf;
-                       error->ring[i].cpu_ring_head = rb->head;
-                       error->ring[i].cpu_ring_tail = rb->tail;
-                       error->ring[i].ringbuffer =
-                               i915_error_ggtt_object_create(dev_priv,
-                                                             rb->obj);
-               }
-
-               error->ring[i].hws_page =
-                       i915_error_ggtt_object_create(dev_priv,
-                                                     engine->status_page.obj);
+                       ring = request->ring;
+                       ee->cpu_ring_head = ring->head;
+                       ee->cpu_ring_tail = ring->tail;
+                       ee->ringbuffer =
+                               i915_error_object_create(dev_priv, ring->vma);
 
-               if (engine->wa_ctx.obj) {
-                       error->ring[i].wa_ctx =
-                               i915_error_ggtt_object_create(dev_priv,
-                                                             engine->wa_ctx.obj);
+                       engine_record_requests(engine, request, ee);
                }
 
-               i915_gem_record_active_context(engine, error, &error->ring[i]);
-
-               count = 0;
-               list_for_each_entry(request, &engine->request_list, list)
-                       count++;
-
-               error->ring[i].num_requests = count;
-               error->ring[i].requests =
-                       kcalloc(count, sizeof(*error->ring[i].requests),
-                               GFP_ATOMIC);
-               if (error->ring[i].requests == NULL) {
-                       error->ring[i].num_requests = 0;
-                       continue;
-               }
-
-               count = 0;
-               list_for_each_entry(request, &engine->request_list, list) {
-                       struct drm_i915_error_request *erq;
-
-                       if (count >= error->ring[i].num_requests) {
-                               /*
-                                * If the ring request list was changed in
-                                * between the point where the error request
-                                * list was created and dimensioned and this
-                                * point then just exit early to avoid crashes.
-                                *
-                                * We don't need to communicate that the
-                                * request list changed state during error
-                                * state capture and that the error state is
-                                * slightly incorrect as a consequence since we
-                                * are typically only interested in the request
-                                * list state at the point of error state
-                                * capture, not in any changes happening during
-                                * the capture.
-                                */
-                               break;
-                       }
+               ee->hws_page =
+                       i915_error_object_create(dev_priv,
+                                                engine->status_page.vma);
 
-                       erq = &error->ring[i].requests[count++];
-                       erq->seqno = request->seqno;
-                       erq->jiffies = request->emitted_jiffies;
-                       erq->tail = request->postfix;
-               }
+               ee->wa_ctx =
+                       i915_error_object_create(dev_priv, engine->wa_ctx.vma);
        }
 }
 
-/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
- * VM.
- */
 static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
                                struct drm_i915_error_state *error,
                                struct i915_address_space *vm,
-                               const int ndx)
+                               int idx)
 {
-       struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_error_buffer *active_bo;
        struct i915_vma *vma;
-       int i;
+       int count;
 
-       i = 0;
+       count = 0;
        list_for_each_entry(vma, &vm->active_list, vm_link)
-               i++;
-       error->active_bo_count[ndx] = i;
-
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               list_for_each_entry(vma, &obj->vma_list, obj_link)
-                       if (vma->vm == vm && vma->pin_count > 0)
-                               i++;
-       }
-       error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
-
-       if (i) {
-               active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
-               if (active_bo)
-                       pinned_bo = active_bo + error->active_bo_count[ndx];
-       }
+               count++;
 
+       active_bo = NULL;
+       if (count)
+               active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
        if (active_bo)
-               error->active_bo_count[ndx] =
-                       capture_active_bo(active_bo,
-                                         error->active_bo_count[ndx],
-                                         &vm->active_list);
-
-       if (pinned_bo)
-               error->pinned_bo_count[ndx] =
-                       capture_pinned_bo(pinned_bo,
-                                         error->pinned_bo_count[ndx],
-                                         &dev_priv->mm.bound_list, vm);
-       error->active_bo[ndx] = active_bo;
-       error->pinned_bo[ndx] = pinned_bo;
+               count = capture_error_bo(active_bo, count, &vm->active_list, false);
+       else
+               count = 0;
+
+       error->active_vm[idx] = vm;
+       error->active_bo[idx] = active_bo;
+       error->active_bo_count[idx] = count;
 }
 
-static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
-                                    struct drm_i915_error_state *error)
+static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
+                                       struct drm_i915_error_state *error)
 {
-       struct i915_address_space *vm;
-       int cnt = 0, i = 0;
-
-       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-               cnt++;
-
-       error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
-       error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
-       error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
-                                        GFP_ATOMIC);
-       error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
-                                        GFP_ATOMIC);
-
-       if (error->active_bo == NULL ||
-           error->pinned_bo == NULL ||
-           error->active_bo_count == NULL ||
-           error->pinned_bo_count == NULL) {
-               kfree(error->active_bo);
-               kfree(error->active_bo_count);
-               kfree(error->pinned_bo);
-               kfree(error->pinned_bo_count);
-
-               error->active_bo = NULL;
-               error->active_bo_count = NULL;
-               error->pinned_bo = NULL;
-               error->pinned_bo_count = NULL;
-       } else {
-               list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-                       i915_gem_capture_vm(dev_priv, error, vm, i++);
+       int cnt = 0, i, j;
 
-               error->vm_count = cnt;
+       BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
+       BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
+       BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
+
+       /* Scan each engine looking for unique active contexts/vm */
+       for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+               struct drm_i915_error_engine *ee = &error->engine[i];
+               bool found;
+
+               if (!ee->vm)
+                       continue;
+
+               found = false;
+               for (j = 0; j < i && !found; j++)
+                       found = error->engine[j].vm == ee->vm;
+               if (!found)
+                       i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
        }
 }
 
+static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
+                                       struct drm_i915_error_state *error)
+{
+       struct i915_address_space *vm = &dev_priv->ggtt.base;
+       struct drm_i915_error_buffer *bo;
+       struct i915_vma *vma;
+       int count_inactive, count_active;
+
+       count_inactive = 0;
+       list_for_each_entry(vma, &vm->active_list, vm_link)
+               count_inactive++;
+
+       count_active = 0;
+       list_for_each_entry(vma, &vm->inactive_list, vm_link)
+               count_active++;
+
+       bo = NULL;
+       if (count_inactive + count_active)
+               bo = kcalloc(count_inactive + count_active,
+                            sizeof(*bo), GFP_ATOMIC);
+       if (!bo)
+               return;
+
+       count_inactive = capture_error_bo(bo, count_inactive,
+                                         &vm->active_list, true);
+       count_active = capture_error_bo(bo + count_inactive, count_active,
+                                       &vm->inactive_list, true);
+       error->pinned_bo_count = count_inactive + count_active;
+       error->pinned_bo = bo;
+}
+
 /* Capture all registers which don't fit into another category. */
 static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
                                   struct drm_i915_error_state *error)
@@ -1352,20 +1382,20 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
                                   const char *error_msg)
 {
        u32 ecode;
-       int ring_id = -1, len;
+       int engine_id = -1, len;
 
-       ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+       ecode = i915_error_generate_code(dev_priv, error, &engine_id);
 
        len = scnprintf(error->error_msg, sizeof(error->error_msg),
                        "GPU HANG: ecode %d:%d:0x%08x",
-                       INTEL_GEN(dev_priv), ring_id, ecode);
+                       INTEL_GEN(dev_priv), engine_id, ecode);
 
-       if (ring_id != -1 && error->ring[ring_id].pid != -1)
+       if (engine_id != -1 && error->engine[engine_id].pid != -1)
                len += scnprintf(error->error_msg + len,
                                 sizeof(error->error_msg) - len,
                                 ", in %s [%d]",
-                                error->ring[ring_id].comm,
-                                error->ring[ring_id].pid);
+                                error->engine[engine_id].comm,
+                                error->engine[engine_id].pid);
 
        scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
                  ", reason: %s, action: %s",
@@ -1382,6 +1412,10 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
 #endif
        error->reset_count = i915_reset_count(&dev_priv->gpu_error);
        error->suspend_count = dev_priv->suspend_count;
+
+       memcpy(&error->device_info,
+              INTEL_INFO(dev_priv),
+              sizeof(error->device_info));
 }
 
 /**
@@ -1415,9 +1449,10 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
 
        i915_capture_gen_state(dev_priv, error);
        i915_capture_reg_state(dev_priv, error);
-       i915_gem_capture_buffers(dev_priv, error);
        i915_gem_record_fences(dev_priv, error);
        i915_gem_record_rings(dev_priv, error);
+       i915_capture_active_buffers(dev_priv, error);
+       i915_capture_pinned_buffers(dev_priv, error);
 
        do_gettimeofday(&error->time);
 
index cf5a65be4fe0609247fdfc3147349230e5aa6c48..a47e1e4aec0390240b8c1494764f5330ff4443ac 100644 (file)
 #define HOST2GUC_INTERRUPT             _MMIO(0xc4c8)
 #define   HOST2GUC_TRIGGER               (1<<0)
 
-#define DRBMISC1                       0x1984
-#define   DOORBELL_ENABLE                (1<<0)
-
 #define GEN8_DRBREGL(x)                        _MMIO(0x1000 + (x) * 8)
 #define   GEN8_DRB_VALID                 (1<<0)
 #define GEN8_DRBREGU(x)                        _MMIO(0x1000 + (x) * 8 + 4)
index 2112e029db6a929ea7f55c4266ffbe24af860d83..43358e18d34cb3f5f770bec71a95644dd225ff32 100644 (file)
@@ -59,7 +59,7 @@
  * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
  * represents in-order queue. The kernel driver packs ring tail pointer and an
  * ELSP context descriptor dword into Work Item.
- * See guc_add_workqueue_item()
+ * See guc_wq_item_append()
  *
  */
 
@@ -114,10 +114,8 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
                if (ret != -ETIMEDOUT)
                        ret = -EIO;
 
-               DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
-                               "status=0x%08X response=0x%08X\n",
-                               data[0], ret, status,
-                               I915_READ(SOFT_SCRATCH(15)));
+               DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
+                        data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
 
                dev_priv->guc.action_fail += 1;
                dev_priv->guc.action_err = ret;
@@ -183,7 +181,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
                                  struct i915_guc_client *client,
                                  u16 new_id)
 {
-       struct sg_table *sg = guc->ctx_pool_obj->pages;
+       struct sg_table *sg = guc->ctx_pool_vma->pages;
        void *doorbell_bitmap = guc->doorbell_bitmap;
        struct guc_doorbell_info *doorbell;
        struct guc_context_desc desc;
@@ -290,7 +288,7 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
 /*
  * Initialise the process descriptor shared with the GuC firmware.
  */
-static void guc_init_proc_desc(struct intel_guc *guc,
+static void guc_proc_desc_init(struct intel_guc *guc,
                               struct i915_guc_client *client)
 {
        struct guc_process_desc *desc;
@@ -322,15 +320,15 @@ static void guc_init_proc_desc(struct intel_guc *guc,
  * write queue, etc).
  */
 
-static void guc_init_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_init(struct intel_guc *guc,
                              struct i915_guc_client *client)
 {
-       struct drm_i915_gem_object *client_obj = client->client_obj;
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx = client->owner;
        struct guc_context_desc desc;
        struct sg_table *sg;
+       unsigned int tmp;
        u32 gfx_addr;
 
        memset(&desc, 0, sizeof(desc));
@@ -340,10 +338,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        desc.priority = client->priority;
        desc.db_id = client->doorbell_id;
 
-       for_each_engine(engine, dev_priv) {
+       for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
                struct intel_context *ce = &ctx->engine[engine->id];
-               struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
-               struct drm_i915_gem_object *obj;
+               uint32_t guc_engine_id = engine->guc_id;
+               struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
 
                /* TODO: We have a design issue to be solved here. Only when we
                 * receive the first batch, we know which engine is used by the
@@ -358,30 +356,29 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                lrc->context_desc = lower_32_bits(ce->lrc_desc);
 
                /* The state page is after PPHWSP */
-               gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
-               lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
+               lrc->ring_lcra =
+                       i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
                lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
-                               (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
-
-               obj = ce->ringbuf->obj;
-               gfx_addr = i915_gem_obj_ggtt_offset(obj);
+                               (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
 
-               lrc->ring_begin = gfx_addr;
-               lrc->ring_end = gfx_addr + obj->base.size - 1;
-               lrc->ring_next_free_location = gfx_addr;
+               lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
+               lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
+               lrc->ring_next_free_location = lrc->ring_begin;
                lrc->ring_current_tail_pointer_value = 0;
 
-               desc.engines_used |= (1 << engine->guc_id);
+               desc.engines_used |= (1 << guc_engine_id);
        }
 
+       DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
+                       client->engines, desc.engines_used);
        WARN_ON(desc.engines_used == 0);
 
        /*
         * The doorbell, process descriptor, and workqueue are all parts
         * of the client object, which the GuC will reference via the GGTT
         */
-       gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
-       desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
+       gfx_addr = i915_ggtt_offset(client->vma);
+       desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
                                client->doorbell_offset;
        desc.db_trigger_cpu = (uintptr_t)client->client_base +
                                client->doorbell_offset;
@@ -397,12 +394,12 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        desc.desc_private = (uintptr_t)client;
 
        /* Pool context is pinned already */
-       sg = guc->ctx_pool_obj->pages;
+       sg = guc->ctx_pool_vma->pages;
        sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
                             sizeof(desc) * client->ctx_index);
 }
 
-static void guc_fini_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_fini(struct intel_guc *guc,
                              struct i915_guc_client *client)
 {
        struct guc_context_desc desc;
@@ -410,13 +407,13 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
 
        memset(&desc, 0, sizeof(desc));
 
-       sg = guc->ctx_pool_obj->pages;
+       sg = guc->ctx_pool_vma->pages;
        sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
                             sizeof(desc) * client->ctx_index);
 }
 
 /**
- * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
  * @request:   request associated with the commands
  *
  * Return:     0 if space is available
@@ -424,39 +421,44 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
  *
  * This function must be called (and must return 0) before a request
  * is submitted to the GuC via i915_guc_submit() below. Once a result
- * of 0 has been returned, it remains valid until (but only until)
- * the next call to submit().
+ * of 0 has been returned, it must be balanced by a corresponding
+ * call to submit().
  *
- * This precheck allows the caller to determine in advance that space
+ * Reservation allows the caller to determine in advance that space
  * will be available for the next submission before committing resources
  * to it, and helps avoid late failures with complicated recovery paths.
  */
-int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
+int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
 {
        const size_t wqi_size = sizeof(struct guc_wq_item);
        struct i915_guc_client *gc = request->i915->guc.execbuf_client;
-       struct guc_process_desc *desc;
+       struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
        u32 freespace;
+       int ret;
 
-       GEM_BUG_ON(gc == NULL);
-
-       desc = gc->client_base + gc->proc_desc_offset;
-
+       spin_lock(&gc->wq_lock);
        freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
-       if (likely(freespace >= wqi_size))
-               return 0;
-
-       gc->no_wq_space += 1;
+       freespace -= gc->wq_rsvd;
+       if (likely(freespace >= wqi_size)) {
+               gc->wq_rsvd += wqi_size;
+               ret = 0;
+       } else {
+               gc->no_wq_space++;
+               ret = -EAGAIN;
+       }
+       spin_unlock(&gc->wq_lock);
 
-       return -EAGAIN;
+       return ret;
 }
 
-static void guc_add_workqueue_item(struct i915_guc_client *gc,
-                                  struct drm_i915_gem_request *rq)
+/* Construct a Work Item and append it to the GuC's Work Queue */
+static void guc_wq_item_append(struct i915_guc_client *gc,
+                              struct drm_i915_gem_request *rq)
 {
        /* wqi_len is in DWords, and does not include the one-word header */
        const size_t wqi_size = sizeof(struct guc_wq_item);
        const u32 wqi_len = wqi_size/sizeof(u32) - 1;
+       struct intel_engine_cs *engine = rq->engine;
        struct guc_process_desc *desc;
        struct guc_wq_item *wqi;
        void *base;
@@ -464,7 +466,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
 
        desc = gc->client_base + gc->proc_desc_offset;
 
-       /* Free space is guaranteed, see i915_guc_wq_check_space() above */
+       /* Free space is guaranteed, see i915_guc_wq_reserve() above */
        freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
        GEM_BUG_ON(freespace < wqi_size);
 
@@ -482,31 +484,32 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
         * workqueue buffer dw by dw.
         */
        BUILD_BUG_ON(wqi_size != 16);
+       GEM_BUG_ON(gc->wq_rsvd < wqi_size);
 
        /* postincrement WQ tail for next time */
        wq_off = gc->wq_tail;
+       GEM_BUG_ON(wq_off & (wqi_size - 1));
        gc->wq_tail += wqi_size;
        gc->wq_tail &= gc->wq_size - 1;
-       GEM_BUG_ON(wq_off & (wqi_size - 1));
+       gc->wq_rsvd -= wqi_size;
 
        /* WQ starts from the page after doorbell / process_desc */
        wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
        wq_off &= PAGE_SIZE - 1;
-       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
+       base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
        wqi = (struct guc_wq_item *)((char *)base + wq_off);
 
        /* Now fill in the 4-word work queue item */
        wqi->header = WQ_TYPE_INORDER |
                        (wqi_len << WQ_LEN_SHIFT) |
-                       (rq->engine->guc_id << WQ_TARGET_SHIFT) |
+                       (engine->guc_id << WQ_TARGET_SHIFT) |
                        WQ_NO_WCFLUSH_WAIT;
 
        /* The GuC wants only the low-order word of the context descriptor */
-       wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
-                                                            rq->engine);
+       wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
 
        wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
-       wqi->fence_id = rq->seqno;
+       wqi->fence_id = rq->fence.seqno;
 
        kunmap_atomic(base);
 }
@@ -553,8 +556,8 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
                if (db_ret.db_status == GUC_DOORBELL_DISABLED)
                        break;
 
-               DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
-                         db_cmp.cookie, db_ret.cookie);
+               DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
+                        db_cmp.cookie, db_ret.cookie);
 
                /* update the cookie to newly read cookie from GuC */
                db_cmp.cookie = db_ret.cookie;
@@ -573,26 +576,26 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
  * Return:     0 on success, otherwise an errno.
  *             (Note: nonzero really shouldn't happen!)
  *
- * The caller must have already called i915_guc_wq_check_space() above
- * with a result of 0 (success) since the last request submission. This
- * guarantees that there is space in the work queue for the new request,
- * so enqueuing the item cannot fail.
+ * The caller must have already called i915_guc_wq_reserve() above with
+ * a result of 0 (success), guaranteeing that there is space in the work
+ * queue for the new request, so enqueuing the item cannot fail.
  *
  * Bad Things Will Happen if the caller violates this protocol e.g. calls
- * submit() when check() says there's no space, or calls submit() multiple
- * times with no intervening check().
+ * submit() when _reserve() says there's no space, or calls _submit()
+ * a different number of times from (successful) calls to _reserve().
  *
  * The only error here arises if the doorbell hardware isn't functioning
  * as expected, which really shouln't happen.
  */
-int i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
 {
        unsigned int engine_id = rq->engine->id;
        struct intel_guc *guc = &rq->i915->guc;
        struct i915_guc_client *client = guc->execbuf_client;
        int b_ret;
 
-       guc_add_workqueue_item(client, rq);
+       spin_lock(&client->wq_lock);
+       guc_wq_item_append(client, rq);
        b_ret = guc_ring_doorbell(client);
 
        client->submissions[engine_id] += 1;
@@ -601,9 +604,8 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
                client->b_fail += 1;
 
        guc->submissions[engine_id] += 1;
-       guc->last_seqno[engine_id] = rq->seqno;
-
-       return b_ret;
+       guc->last_seqno[engine_id] = rq->fence.seqno;
+       spin_unlock(&client->wq_lock);
 }
 
 /*
@@ -613,55 +615,48 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
  */
 
 /**
- * gem_allocate_guc_obj() - Allocate gem object for GuC usage
- * @dev_priv:  driver private data structure
- * @size:      size of object
+ * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * @guc:       the guc
+ * @size:      size of area to allocate (both virtual space and memory)
  *
- * This is a wrapper to create a gem obj. In order to use it inside GuC, the
- * object needs to be pinned lifetime. Also we must pin it to gtt space other
- * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
+ * This is a wrapper to create an object for use with the GuC. In order to
+ * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
+ * both some backing storage and a range inside the Global GTT. We must pin
+ * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
+ * range is reserved inside GuC.
  *
- * Return:     A drm_i915_gem_object if successful, otherwise NULL.
+ * Return:     A i915_vma if successful, otherwise an ERR_PTR.
  */
-static struct drm_i915_gem_object *
-gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
+static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
 {
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int ret;
 
        obj = i915_gem_object_create(&dev_priv->drm, size);
        if (IS_ERR(obj))
-               return NULL;
+               return ERR_CAST(obj);
 
-       if (i915_gem_object_get_pages(obj)) {
-               drm_gem_object_unreference(&obj->base);
-               return NULL;
-       }
+       vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+       if (IS_ERR(vma))
+               goto err;
 
-       if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-                       PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
-               drm_gem_object_unreference(&obj->base);
-               return NULL;
+       ret = i915_vma_pin(vma, 0, PAGE_SIZE,
+                          PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+       if (ret) {
+               vma = ERR_PTR(ret);
+               goto err;
        }
 
        /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
        I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 
-       return obj;
-}
-
-/**
- * gem_release_guc_obj() - Release gem object allocated for GuC usage
- * @obj:       gem obj to be released
- */
-static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
-{
-       if (!obj)
-               return;
+       return vma;
 
-       if (i915_gem_obj_is_pinned(obj))
-               i915_gem_object_ggtt_unpin(obj);
-
-       drm_gem_object_unreference(&obj->base);
+err:
+       i915_gem_object_put(obj);
+       return vma;
 }
 
 static void
@@ -688,61 +683,74 @@ guc_client_free(struct drm_i915_private *dev_priv,
                kunmap(kmap_to_page(client->client_base));
        }
 
-       gem_release_guc_obj(client->client_obj);
+       i915_vma_unpin_and_release(&client->vma);
 
        if (client->ctx_index != GUC_INVALID_CTX_ID) {
-               guc_fini_ctx_desc(guc, client);
+               guc_ctx_desc_fini(guc, client);
                ida_simple_remove(&guc->ctx_ids, client->ctx_index);
        }
 
        kfree(client);
 }
 
+/* Check that a doorbell register is in the expected state */
+static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       i915_reg_t drbreg = GEN8_DRBREGL(db_id);
+       uint32_t value = I915_READ(drbreg);
+       bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
+       bool expected = test_bit(db_id, guc->doorbell_bitmap);
+
+       if (enabled == expected)
+               return true;
+
+       DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
+                        db_id, drbreg.reg, value,
+                        expected ? "active" : "inactive");
+
+       return false;
+}
+
 /*
- * Borrow the first client to set up & tear down every doorbell
+ * Borrow the first client to set up & tear down each unused doorbell
  * in turn, to ensure that all doorbell h/w is (re)initialised.
  */
 static void guc_init_doorbell_hw(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct i915_guc_client *client = guc->execbuf_client;
-       uint16_t db_id, i;
-       int err;
+       uint16_t db_id;
+       int i, err;
 
+       /* Save client's original doorbell selection */
        db_id = client->doorbell_id;
 
        for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
-               i915_reg_t drbreg = GEN8_DRBREGL(i);
-               u32 value = I915_READ(drbreg);
+               /* Skip if doorbell is OK */
+               if (guc_doorbell_check(guc, i))
+                       continue;
 
                err = guc_update_doorbell_id(guc, client, i);
-
-               /* Report update failure or unexpectedly active doorbell */
-               if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED)))
-                       DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n",
-                                         i, drbreg.reg, value, err);
+               if (err)
+                       DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
+                                       i, err);
        }
 
        /* Restore to original value */
        err = guc_update_doorbell_id(guc, client, db_id);
        if (err)
-               DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
-                       db_id, err);
+               DRM_WARN("Failed to restore doorbell to %d, err %d\n",
+                        db_id, err);
 
-       for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
-               i915_reg_t drbreg = GEN8_DRBREGL(i);
-               u32 value = I915_READ(drbreg);
-
-               if (i != db_id && (value & GUC_DOORBELL_ENABLED))
-                       DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n",
-                                         i, drbreg.reg, value);
-
-       }
+       /* Read back & verify all doorbell registers */
+       for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
+               (void)guc_doorbell_check(guc, i);
 }
 
 /**
  * guc_client_alloc() - Allocate an i915_guc_client
  * @dev_priv:  driver private data structure
+ * @engines:   The set of engines to enable for this client
  * @priority:  four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
  *             The kernel client to replace ExecList submission is created with
  *             NORMAL priority. Priority of a client for scheduler can be HIGH,
@@ -754,22 +762,24 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
  */
 static struct i915_guc_client *
 guc_client_alloc(struct drm_i915_private *dev_priv,
+                uint32_t engines,
                 uint32_t priority,
                 struct i915_gem_context *ctx)
 {
        struct i915_guc_client *client;
        struct intel_guc *guc = &dev_priv->guc;
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        uint16_t db_id;
 
        client = kzalloc(sizeof(*client), GFP_KERNEL);
        if (!client)
                return NULL;
 
-       client->doorbell_id = GUC_INVALID_DOORBELL_ID;
-       client->priority = priority;
        client->owner = ctx;
        client->guc = guc;
+       client->engines = engines;
+       client->priority = priority;
+       client->doorbell_id = GUC_INVALID_DOORBELL_ID;
 
        client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
                        GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
@@ -779,13 +789,15 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
        }
 
        /* The first page is doorbell/proc_desc. Two followed pages are wq. */
-       obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
-       if (!obj)
+       vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
+       if (IS_ERR(vma))
                goto err;
 
        /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
-       client->client_obj = obj;
-       client->client_base = kmap(i915_gem_object_get_page(obj, 0));
+       client->vma = vma;
+       client->client_base = kmap(i915_vma_first_page(vma));
+
+       spin_lock_init(&client->wq_lock);
        client->wq_offset = GUC_DB_SIZE;
        client->wq_size = GUC_WQ_SIZE;
 
@@ -806,29 +818,26 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
        else
                client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
-       guc_init_proc_desc(guc, client);
-       guc_init_ctx_desc(guc, client);
+       guc_proc_desc_init(guc, client);
+       guc_ctx_desc_init(guc, client);
        if (guc_init_doorbell(guc, client, db_id))
                goto err;
 
-       DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n",
-               priority, client, client->ctx_index);
+       DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
+               priority, client, client->engines, client->ctx_index);
        DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
                client->doorbell_id, client->doorbell_offset);
 
        return client;
 
 err:
-       DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
-
        guc_client_free(dev_priv, client);
        return NULL;
 }
 
-static void guc_create_log(struct intel_guc *guc)
+static void guc_log_create(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        unsigned long offset;
        uint32_t size, flags;
 
@@ -844,16 +853,16 @@ static void guc_create_log(struct intel_guc *guc)
                GUC_LOG_ISR_PAGES + 1 +
                GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
 
-       obj = guc->log_obj;
-       if (!obj) {
-               obj = gem_allocate_guc_obj(dev_priv, size);
-               if (!obj) {
+       vma = guc->log_vma;
+       if (!vma) {
+               vma = guc_allocate_vma(guc, size);
+               if (IS_ERR(vma)) {
                        /* logging will be off */
                        i915.guc_log_level = -1;
                        return;
                }
 
-               guc->log_obj = obj;
+               guc->log_vma = vma;
        }
 
        /* each allocated unit is a page */
@@ -862,11 +871,11 @@ static void guc_create_log(struct intel_guc *guc)
                (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
                (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
 
-       offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
+       offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
        guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
-static void init_guc_policies(struct guc_policies *policies)
+static void guc_policies_init(struct guc_policies *policies)
 {
        struct guc_policy *policy;
        u32 p, i;
@@ -888,10 +897,10 @@ static void init_guc_policies(struct guc_policies *policies)
        policies->is_valid = 1;
 }
 
-static void guc_create_ads(struct intel_guc *guc)
+static void guc_addon_create(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        struct guc_ads *ads;
        struct guc_policies *policies;
        struct guc_mmio_reg_state *reg_state;
@@ -904,16 +913,16 @@ static void guc_create_ads(struct intel_guc *guc)
                        sizeof(struct guc_mmio_reg_state) +
                        GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
 
-       obj = guc->ads_obj;
-       if (!obj) {
-               obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
-               if (!obj)
+       vma = guc->ads_vma;
+       if (!vma) {
+               vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
+               if (IS_ERR(vma))
                        return;
 
-               guc->ads_obj = obj;
+               guc->ads_vma = vma;
        }
 
-       page = i915_gem_object_get_page(obj, 0);
+       page = i915_vma_first_page(vma);
        ads = kmap(page);
 
        /*
@@ -924,17 +933,17 @@ static void guc_create_ads(struct intel_guc *guc)
         * to find it.
         */
        engine = &dev_priv->engine[RCS];
-       ads->golden_context_lrca = engine->status_page.gfx_addr;
+       ads->golden_context_lrca = engine->status_page.ggtt_offset;
 
        for_each_engine(engine, dev_priv)
                ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
 
        /* GuC scheduling policies */
        policies = (void *)ads + sizeof(struct guc_ads);
-       init_guc_policies(policies);
+       guc_policies_init(policies);
 
-       ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
-                       sizeof(struct guc_ads);
+       ads->scheduler_policies =
+               i915_ggtt_offset(vma) + sizeof(struct guc_ads);
 
        /* MMIO reg state */
        reg_state = (void *)policies + sizeof(struct guc_policies);
@@ -966,6 +975,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
        const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
        const size_t gemsize = round_up(poolsize, PAGE_SIZE);
        struct intel_guc *guc = &dev_priv->guc;
+       struct i915_vma *vma;
 
        /* Wipe bitmap & delete client in case of reinitialisation */
        bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
@@ -974,16 +984,17 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
        if (!i915.enable_guc_submission)
                return 0; /* not enabled  */
 
-       if (guc->ctx_pool_obj)
+       if (guc->ctx_pool_vma)
                return 0; /* already allocated */
 
-       guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
-       if (!guc->ctx_pool_obj)
-               return -ENOMEM;
+       vma = guc_allocate_vma(guc, gemsize);
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
 
+       guc->ctx_pool_vma = vma;
        ida_init(&guc->ctx_ids);
-       guc_create_log(guc);
-       guc_create_ads(guc);
+       guc_log_create(guc);
+       guc_addon_create(guc);
 
        return 0;
 }
@@ -992,13 +1003,16 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 {
        struct intel_guc *guc = &dev_priv->guc;
        struct i915_guc_client *client;
+       struct intel_engine_cs *engine;
+       struct drm_i915_gem_request *request;
 
        /* client for execbuf submission */
        client = guc_client_alloc(dev_priv,
+                                 INTEL_INFO(dev_priv)->ring_mask,
                                  GUC_CTX_PRIORITY_KMD_NORMAL,
                                  dev_priv->kernel_context);
        if (!client) {
-               DRM_ERROR("Failed to create execbuf guc_client\n");
+               DRM_ERROR("Failed to create normal GuC client!\n");
                return -ENOMEM;
        }
 
@@ -1006,6 +1020,18 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
        host2guc_sample_forcewake(guc, client);
        guc_init_doorbell_hw(guc);
 
+       /* Take over from manual control of ELSP (execlists) */
+       for_each_engine(engine, dev_priv) {
+               engine->submit_request = i915_guc_submit;
+
+               /* Replay the current set of previously submitted requests */
+               list_for_each_entry(request, &engine->request_list, link) {
+                       client->wq_rsvd += sizeof(struct guc_wq_item);
+                       if (i915_sw_fence_done(&request->submit))
+                               i915_guc_submit(request);
+               }
+       }
+
        return 0;
 }
 
@@ -1013,6 +1039,12 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
 {
        struct intel_guc *guc = &dev_priv->guc;
 
+       if (!guc->execbuf_client)
+               return;
+
+       /* Revert back to manual ELSP submission */
+       intel_execlists_enable_submission(dev_priv);
+
        guc_client_free(dev_priv, guc->execbuf_client);
        guc->execbuf_client = NULL;
 }
@@ -1021,16 +1053,12 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
 {
        struct intel_guc *guc = &dev_priv->guc;
 
-       gem_release_guc_obj(dev_priv->guc.ads_obj);
-       guc->ads_obj = NULL;
-
-       gem_release_guc_obj(dev_priv->guc.log_obj);
-       guc->log_obj = NULL;
+       i915_vma_unpin_and_release(&guc->ads_vma);
+       i915_vma_unpin_and_release(&guc->log_vma);
 
-       if (guc->ctx_pool_obj)
+       if (guc->ctx_pool_vma)
                ida_destroy(&guc->ctx_ids);
-       gem_release_guc_obj(guc->ctx_pool_obj);
-       guc->ctx_pool_obj = NULL;
+       i915_vma_unpin_and_release(&guc->ctx_pool_vma);
 }
 
 /**
@@ -1053,7 +1081,7 @@ int intel_guc_suspend(struct drm_device *dev)
        /* any value greater than GUC_POWER_D0 */
        data[1] = GUC_POWER_D1;
        /* first page is shared data with GuC */
-       data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+       data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
 
        return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
@@ -1078,7 +1106,7 @@ int intel_guc_resume(struct drm_device *dev)
        data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
        data[1] = GUC_POWER_D0;
        /* first page is shared data with GuC */
-       data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+       data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
 
        return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
index 1c2aec39241257fe63183a770053a135b68e2f61..c128fdbd24e4f045d6d11c6bfe7e7799f1dc9136 100644 (file)
@@ -371,7 +371,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
        spin_lock_irq(&dev_priv->irq_lock);
        dev_priv->rps.interrupts_enabled = false;
 
-       I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+       I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
        __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
        I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -656,12 +656,6 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
  *   of horizontal active on the first line of vertical active
  */
 
-static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
-       /* Gen2 doesn't have a hardware frame counter */
-       return 0;
-}
-
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
@@ -978,10 +972,8 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
 static void notify_ring(struct intel_engine_cs *engine)
 {
        smp_store_mb(engine->breadcrumbs.irq_posted, true);
-       if (intel_engine_wakeup(engine)) {
+       if (intel_engine_wakeup(engine))
                trace_i915_gem_request_notify(engine);
-               engine->breadcrumbs.irq_wakeups++;
-       }
 }
 
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1105,9 +1097,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
        new_delay = dev_priv->rps.cur_freq;
        min = dev_priv->rps.min_freq_softlimit;
        max = dev_priv->rps.max_freq_softlimit;
-
-       if (client_boost) {
-               new_delay = dev_priv->rps.max_freq_softlimit;
+       if (client_boost || any_waiters(dev_priv))
+               max = dev_priv->rps.max_freq;
+       if (client_boost && new_delay < dev_priv->rps.boost_freq) {
+               new_delay = dev_priv->rps.boost_freq;
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
                if (adj > 0)
@@ -1122,7 +1115,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
                        new_delay = dev_priv->rps.efficient_freq;
                        adj = 0;
                }
-       } else if (any_waiters(dev_priv)) {
+       } else if (client_boost || any_waiters(dev_priv)) {
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
                if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
@@ -2504,57 +2497,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-       int ret;
 
        kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
 
+       DRM_DEBUG_DRIVER("resetting chip\n");
+       kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
        /*
-        * Note that there's only one work item which does gpu resets, so we
-        * need not worry about concurrent gpu resets potentially incrementing
-        * error->reset_counter twice. We only need to take care of another
-        * racing irq/hangcheck declaring the gpu dead for a second time. A
-        * quick check for that is good enough: schedule_work ensures the
-        * correct ordering between hang detection and this work item, and since
-        * the reset in-progress bit is only ever set by code outside of this
-        * work we don't need to worry about any other races.
+        * In most cases it's guaranteed that we get here with an RPM
+        * reference held, for example because there is a pending GPU
+        * request that won't finish until the reset is done. This
+        * isn't the case at least when we get here by doing a
+        * simulated reset via debugs, so get an RPM reference.
         */
-       if (i915_reset_in_progress(&dev_priv->gpu_error)) {
-               DRM_DEBUG_DRIVER("resetting chip\n");
-               kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
-
-               /*
-                * In most cases it's guaranteed that we get here with an RPM
-                * reference held, for example because there is a pending GPU
-                * request that won't finish until the reset is done. This
-                * isn't the case at least when we get here by doing a
-                * simulated reset via debugs, so get an RPM reference.
-                */
-               intel_runtime_pm_get(dev_priv);
-
-               intel_prepare_reset(dev_priv);
+       intel_runtime_pm_get(dev_priv);
+       intel_prepare_reset(dev_priv);
 
+       do {
                /*
                 * All state reset _must_ be completed before we update the
                 * reset counter, for otherwise waiters might miss the reset
                 * pending state and not properly drop locks, resulting in
                 * deadlocks with the reset work.
                 */
-               ret = i915_reset(dev_priv);
+               if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+                       i915_reset(dev_priv);
+                       mutex_unlock(&dev_priv->drm.struct_mutex);
+               }
 
-               intel_finish_reset(dev_priv);
+               /* We need to wait for anyone holding the lock to wakeup */
+       } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+                                    I915_RESET_IN_PROGRESS,
+                                    TASK_UNINTERRUPTIBLE,
+                                    HZ));
 
-               intel_runtime_pm_put(dev_priv);
+       intel_finish_reset(dev_priv);
+       intel_runtime_pm_put(dev_priv);
 
-               if (ret == 0)
-                       kobject_uevent_env(kobj,
-                                          KOBJ_CHANGE, reset_done_event);
+       if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+               kobject_uevent_env(kobj,
+                                  KOBJ_CHANGE, reset_done_event);
 
-               /*
-                * Note: The wake_up also serves as a memory barrier so that
-                * waiters see the update value of the reset counter atomic_t.
-                */
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
-       }
+       /*
+        * Note: The wake_up also serves as a memory barrier so that
+        * waiters see the updated value of the dev_priv->gpu_error.
+        */
+       wake_up_all(&dev_priv->gpu_error.reset_queue);
 }
 
 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
@@ -2673,25 +2661,26 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
        i915_capture_error_state(dev_priv, engine_mask, error_msg);
        i915_report_and_clear_eir(dev_priv);
 
-       if (engine_mask) {
-               atomic_or(I915_RESET_IN_PROGRESS_FLAG,
-                               &dev_priv->gpu_error.reset_counter);
+       if (!engine_mask)
+               return;
 
-               /*
-                * Wakeup waiting processes so that the reset function
-                * i915_reset_and_wakeup doesn't deadlock trying to grab
-                * various locks. By bumping the reset counter first, the woken
-                * processes will see a reset in progress and back off,
-                * releasing their locks and then wait for the reset completion.
-                * We must do this for _all_ gpu waiters that might hold locks
-                * that the reset work needs to acquire.
-                *
-                * Note: The wake_up serves as the required memory barrier to
-                * ensure that the waiters see the updated value of the reset
-                * counter atomic_t.
-                */
-               i915_error_wake_up(dev_priv);
-       }
+       if (test_and_set_bit(I915_RESET_IN_PROGRESS,
+                            &dev_priv->gpu_error.flags))
+               return;
+
+       /*
+        * Wakeup waiting processes so that the reset function
+        * i915_reset_and_wakeup doesn't deadlock trying to grab
+        * various locks. By bumping the reset counter first, the woken
+        * processes will see a reset in progress and back off,
+        * releasing their locks and then wait for the reset completion.
+        * We must do this for _all_ gpu waiters that might hold locks
+        * that the reset work needs to acquire.
+        *
+        * Note: The wake_up also provides a memory barrier to ensure that the
+        * waiters see the updated value of the reset flags.
+        */
+       i915_error_wake_up(dev_priv);
 
        i915_reset_and_wakeup(dev_priv);
 }
@@ -2803,13 +2792,6 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static bool
-ring_idle(struct intel_engine_cs *engine, u32 seqno)
-{
-       return i915_seqno_passed(seqno,
-                                READ_ONCE(engine->last_submitted_seqno));
-}
-
 static bool
 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
 {
@@ -2849,16 +2831,17 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
                }
        }
 
-       DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
-                 engine->id, ipehr, offset);
+       DRM_DEBUG_DRIVER("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+                        engine->id, ipehr, offset);
 
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static struct intel_engine_cs *
 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
        struct drm_i915_private *dev_priv = engine->i915;
+       void __iomem *vaddr;
        u32 cmd, ipehr, head;
        u64 offset = 0;
        int i, backwards;
@@ -2897,6 +2880,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
         */
        head = I915_READ_HEAD(engine) & HEAD_ADDR;
        backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
+       vaddr = (void __iomem *)engine->buffer->vaddr;
 
        for (i = backwards; i; --i) {
                /*
@@ -2907,7 +2891,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
                head &= engine->buffer->size - 1;
 
                /* This here seems to blow up */
-               cmd = ioread32(engine->buffer->virtual_start + head);
+               cmd = ioread32(vaddr + head);
                if (cmd == ipehr)
                        break;
 
@@ -2917,11 +2901,11 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
        if (!i)
                return NULL;
 
-       *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+       *seqno = ioread32(vaddr + head + 4) + 1;
        if (INTEL_GEN(dev_priv) >= 8) {
-               offset = ioread32(engine->buffer->virtual_start + head + 12);
+               offset = ioread32(vaddr + head + 12);
                offset <<= 32;
-               offset = ioread32(engine->buffer->virtual_start + head + 8);
+               offset |= ioread32(vaddr + head + 8);
        }
        return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
 }
@@ -2938,6 +2922,9 @@ static int semaphore_passed(struct intel_engine_cs *engine)
        if (signaller == NULL)
                return -1;
 
+       if (IS_ERR(signaller))
+               return 0;
+
        /* Prevent pathological recursion due to driver bugs */
        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
                return -1;
@@ -2990,7 +2977,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
        return stuck;
 }
 
-static enum intel_ring_hangcheck_action
+static enum intel_engine_hangcheck_action
 head_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
        if (acthd != engine->hangcheck.acthd) {
@@ -3008,11 +2995,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
        return HANGCHECK_HUNG;
 }
 
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *engine, u64 acthd)
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       enum intel_ring_hangcheck_action ha;
+       enum intel_engine_hangcheck_action ha;
        u32 tmp;
 
        ha = head_stuck(engine, acthd);
@@ -3054,22 +3041,6 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
        return HANGCHECK_HUNG;
 }
 
-static unsigned long kick_waiters(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *i915 = engine->i915;
-       unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
-
-       if (engine->hangcheck.user_interrupts == irq_count &&
-           !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
-               if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
-                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-                                 engine->name);
-
-               intel_engine_enable_fake_irq(engine);
-       }
-
-       return irq_count;
-}
 /*
  * This is called when the chip hasn't reported back with completed
  * batchbuffers in a long time. We keep track per ring seqno progress and
@@ -3107,7 +3078,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                bool busy = intel_engine_has_waiter(engine);
                u64 acthd;
                u32 seqno;
-               unsigned user_interrupts;
+               u32 submit;
 
                semaphore_clear_deadlocks(dev_priv);
 
@@ -3121,29 +3092,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                if (engine->irq_seqno_barrier)
                        engine->irq_seqno_barrier(engine);
 
-               acthd = intel_ring_get_active_head(engine);
+               acthd = intel_engine_get_active_head(engine);
                seqno = intel_engine_get_seqno(engine);
-
-               /* Reset stuck interrupts between batch advances */
-               user_interrupts = 0;
+               submit = READ_ONCE(engine->last_submitted_seqno);
 
                if (engine->hangcheck.seqno == seqno) {
-                       if (ring_idle(engine, seqno)) {
+                       if (i915_seqno_passed(seqno, submit)) {
                                engine->hangcheck.action = HANGCHECK_IDLE;
-                               if (busy) {
-                                       /* Safeguard against driver failure */
-                                       user_interrupts = kick_waiters(engine);
-                                       engine->hangcheck.score += BUSY;
-                               }
                        } else {
                                /* We always increment the hangcheck score
-                                * if the ring is busy and still processing
+                                * if the engine is busy and still processing
                                 * the same request, so that no single request
                                 * can run indefinitely (such as a chain of
                                 * batches). The only time we do not increment
                                 * the hangcheck score on this ring, if this
-                                * ring is in a legitimate wait for another
-                                * ring. In that case the waiting ring is a
+                                * engine is in a legitimate wait for another
+                                * engine. In that case the waiting engine is a
                                 * victim and we want to be sure we catch the
                                 * right culprit. Then every time we do kick
                                 * the ring, add a small increment to the
@@ -3151,8 +3115,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                                 * being repeatedly kicked and so responsible
                                 * for stalling the machine.
                                 */
-                               engine->hangcheck.action = ring_stuck(engine,
-                                                                     acthd);
+                               engine->hangcheck.action =
+                                       engine_stuck(engine, acthd);
 
                                switch (engine->hangcheck.action) {
                                case HANGCHECK_IDLE:
@@ -3195,12 +3159,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 
                engine->hangcheck.seqno = seqno;
                engine->hangcheck.acthd = acthd;
-               engine->hangcheck.user_interrupts = user_interrupts;
                busy_count += busy;
        }
 
        if (hung) {
                char msg[80];
+               unsigned int tmp;
                int len;
 
                /* If some rings hung but others were still busy, only
@@ -3210,7 +3174,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                        hung &= ~stuck;
                len = scnprintf(msg, sizeof(msg),
                                "%s on ", stuck == hung ? "No progress" : "Hang");
-               for_each_engine_masked(engine, dev_priv, hung)
+               for_each_engine_masked(engine, dev_priv, hung, tmp)
                        len += scnprintf(msg + len, sizeof(msg) - len,
                                         "%s, ", engine->name);
                msg[len-2] = '\0';
@@ -4536,14 +4500,15 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
 
        if (INTEL_INFO(dev_priv)->gen >= 8)
-               dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+               dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
 
        INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
                          i915_hangcheck_elapsed);
 
        if (IS_GEN2(dev_priv)) {
+               /* Gen2 doesn't have a hardware frame counter */
                dev->max_vblank_count = 0;
-               dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+               dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
        } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
                dev->driver->get_vblank_counter = g4x_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
new file mode 100644 (file)
index 0000000..49a0794
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <asm/fpu/api.h>
+
+#include "i915_drv.h"
+
+static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
+
+#ifdef CONFIG_AS_MOVNTDQA
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
+{
+       kernel_fpu_begin();
+
+       len >>= 4;
+       while (len >= 4) {
+               asm("movntdqa   (%0), %%xmm0\n"
+                   "movntdqa 16(%0), %%xmm1\n"
+                   "movntdqa 32(%0), %%xmm2\n"
+                   "movntdqa 48(%0), %%xmm3\n"
+                   "movaps %%xmm0,   (%1)\n"
+                   "movaps %%xmm1, 16(%1)\n"
+                   "movaps %%xmm2, 32(%1)\n"
+                   "movaps %%xmm3, 48(%1)\n"
+                   :: "r" (src), "r" (dst) : "memory");
+               src += 64;
+               dst += 64;
+               len -= 4;
+       }
+       while (len--) {
+               asm("movntdqa (%0), %%xmm0\n"
+                   "movaps %%xmm0, (%1)\n"
+                   :: "r" (src), "r" (dst) : "memory");
+               src += 16;
+               dst += 16;
+       }
+
+       kernel_fpu_end();
+}
+#endif
+
+/**
+ * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
+ * @dst: destination pointer
+ * @src: source pointer
+ * @len: how many bytes to copy
+ *
+ * i915_memcpy_from_wc copies @len bytes from @src to @dst using
+ * non-temporal instructions where available. Note that all arguments
+ * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
+ * of 16.
+ *
+ * To test whether accelerated reads from WC are supported, use
+ * i915_memcpy_from_wc(NULL, NULL, 0);
+ *
+ * Returns true if the copy was successful, false if the preconditions
+ * are not met.
+ */
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
+{
+       if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
+               return false;
+
+#ifdef CONFIG_AS_MOVNTDQA
+       if (static_branch_likely(&has_movntdqa)) {
+               if (likely(len))
+                       __memcpy_ntdqa(dst, src, len);
+               return true;
+       }
+#endif
+
+       return false;
+}
+
+void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
+{
+       if (static_cpu_has(X86_FEATURE_XMM4_1))
+               static_branch_enable(&has_movntdqa);
+}
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
new file mode 100644 (file)
index 0000000..e4935dd
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/io-mapping.h>
+
+#include <asm/pgtable.h>
+
+#include "i915_drv.h"
+
+struct remap_pfn {
+       struct mm_struct *mm;
+       unsigned long pfn;
+       pgprot_t prot;
+};
+
+static int remap_pfn(pte_t *pte, pgtable_t token,
+                    unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+
+       /* Special PTE are not associated with any struct page */
+       set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+       r->pfn++;
+
+       return 0;
+}
+
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ *  Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap)
+{
+       struct remap_pfn r;
+       int err;
+
+       GEM_BUG_ON((vma->vm_flags &
+                   (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) !=
+                  (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP));
+
+       /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+       r.mm = vma->vm_mm;
+       r.pfn = pfn;
+       r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+                         (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+       err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+       if (unlikely(err)) {
+               zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+               return err;
+       }
+
+       return 0;
+}
index b6e404c91eed5827f6c8692a57a32f374ee350b9..768ad89d9cd40cf93093713f7db08f7bc10e7ebd 100644 (file)
@@ -45,6 +45,7 @@ struct i915_params i915 __read_mostly = {
        .fastboot = 0,
        .prefault_disable = 0,
        .load_detect_test = 0,
+       .force_reset_modeset_test = 0,
        .reset = true,
        .invert_brightness = 0,
        .disable_display = 0,
@@ -161,6 +162,11 @@ MODULE_PARM_DESC(load_detect_test,
        "Force-enable the VGA load detect code for testing (default:false). "
        "For developers only.");
 
+module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600);
+MODULE_PARM_DESC(force_reset_modeset_test,
+       "Force a modeset during gpu reset for testing (default:false). "
+       "For developers only.");
+
 module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
 MODULE_PARM_DESC(invert_brightness,
        "Invert backlight brightness "
index 0ad020b4a925e51a9cea6325b1541c58d2709ea3..3a0dd78ddb38f88fa7b447a51702f3539c0590d1 100644 (file)
@@ -57,6 +57,7 @@ struct i915_params {
        bool fastboot;
        bool prefault_disable;
        bool load_detect_test;
+       bool force_reset_modeset_test;
        bool reset;
        bool disable_display;
        bool verbose_state_checks;
index 949c01686a6689fffc032db6d8439889a6686b32..687c768833b3e4e3d0ef87f4604a5838121d9fdd 100644 (file)
 #define CHV_COLORS \
        .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
 
+#define GEN2_FEATURES \
+       .gen = 2, .num_pipes = 1, \
+       .has_overlay = 1, .overlay_needs_physical = 1, \
+       .has_gmch_display = 1, \
+       .hws_needs_physical = 1, \
+       .ring_mask = RENDER_RING, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i830_info = {
-       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN2_FEATURES,
+       .is_mobile = 1, .cursor_needs_physical = 1,
+       .num_pipes = 2, /* legal, last one wins */
 };
 
 static const struct intel_device_info intel_845g_info = {
-       .gen = 2, .num_pipes = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN2_FEATURES,
 };
 
 static const struct intel_device_info intel_i85x_info = {
-       .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+       GEN2_FEATURES,
+       .is_i85x = 1, .is_mobile = 1,
+       .num_pipes = 2, /* legal, last one wins */
        .cursor_needs_physical = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
        .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-       .gen = 2, .num_pipes = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN2_FEATURES,
 };
 
+#define GEN3_FEATURES \
+       .gen = 3, .num_pipes = 2, \
+       .has_gmch_display = 1, \
+       .ring_mask = RENDER_RING, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i915g_info = {
-       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       GEN3_FEATURES,
+       .is_i915g = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i915gm_info = {
-       .gen = 3, .is_mobile = 1, .num_pipes = 2,
+       GEN3_FEATURES,
+       .is_mobile = 1,
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
        .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945g_info = {
-       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       GEN3_FEATURES,
+       .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945gm_info = {
-       .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+       GEN3_FEATURES,
+       .is_i945gm = 1, .is_mobile = 1,
        .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
        .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 
+#define GEN4_FEATURES \
+       .gen = 4, .num_pipes = 2, \
+       .has_hotplug = 1, \
+       .has_gmch_display = 1, \
+       .ring_mask = RENDER_RING, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i965g_info = {
-       .gen = 4, .is_broadwater = 1, .num_pipes = 2,
-       .has_hotplug = 1,
+       GEN4_FEATURES,
+       .is_broadwater = 1,
        .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-       .gen = 4, .is_crestline = 1, .num_pipes = 2,
-       .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+       GEN4_FEATURES,
+       .is_crestline = 1,
+       .is_mobile = 1, .has_fbc = 1,
        .has_overlay = 1,
        .supports_tv = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
-       .gen = 3, .is_g33 = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
+       GEN3_FEATURES,
+       .is_g33 = 1,
+       .has_hotplug = 1,
        .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_g45_info = {
-       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
-       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       GEN4_FEATURES,
+       .is_g4x = 1,
+       .has_pipe_cxsr = 1,
        .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_gm45_info = {
-       .gen = 4, .is_g4x = 1, .num_pipes = 2,
-       .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
-       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       GEN4_FEATURES,
+       .is_g4x = 1,
+       .is_mobile = 1, .has_fbc = 1,
+       .has_pipe_cxsr = 1,
        .supports_tv = 1,
        .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_pineview_info = {
-       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
+       GEN3_FEATURES,
+       .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+       .has_hotplug = 1,
        .has_overlay = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
+#define GEN5_FEATURES \
+       .gen = 5, .num_pipes = 2, \
+       .has_hotplug = 1, \
+       .has_gmbus_irq = 1, \
+       .ring_mask = RENDER_RING | BSD_RING, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_ironlake_d_info = {
-       .gen = 5, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN5_FEATURES,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
-       .gen = 5, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN5_FEATURES,
+       .is_mobile = 1,
 };
 
+#define GEN6_FEATURES \
+       .gen = 6, .num_pipes = 2, \
+       .has_hotplug = 1, \
+       .has_fbc = 1, \
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .has_llc = 1, \
+       .has_rc6 = 1, \
+       .has_rc6p = 1, \
+       .has_gmbus_irq = 1, \
+       .has_hw_contexts = 1, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_sandybridge_d_info = {
-       .gen = 6, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-       .has_llc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN6_FEATURES,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
-       .gen = 6, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-       .has_llc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN6_FEATURES,
+       .is_mobile = 1,
 };
 
 #define GEN7_FEATURES  \
        .gen = 7, .num_pipes = 3, \
-       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_hotplug = 1, \
        .has_fbc = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
        .has_llc = 1, \
+       .has_rc6 = 1, \
+       .has_rc6p = 1, \
+       .has_gmbus_irq = 1, \
+       .has_hw_contexts = 1, \
        GEN_DEFAULT_PIPEOFFSETS, \
        IVB_CURSOR_OFFSETS
 
 static const struct intel_device_info intel_ivybridge_d_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
+       .has_l3_dpf = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .is_mobile = 1,
+       .has_l3_dpf = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .num_pipes = 0, /* legal, last one wins */
+       .has_l3_dpf = 1,
 };
 
 #define VLV_FEATURES  \
        .gen = 7, .num_pipes = 2, \
-       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_psr = 1, \
+       .has_runtime_pm = 1, \
+       .has_rc6 = 1, \
+       .has_gmbus_irq = 1, \
+       .has_hw_contexts = 1, \
+       .has_gmch_display = 1, \
+       .has_hotplug = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
        .display_mmio_offset = VLV_DISPLAY_BASE, \
        GEN_DEFAULT_PIPEOFFSETS, \
        CURSOR_OFFSETS
 
-static const struct intel_device_info intel_valleyview_m_info = {
-       VLV_FEATURES,
-       .is_valleyview = 1,
-       .is_mobile = 1,
-};
-
-static const struct intel_device_info intel_valleyview_d_info = {
+static const struct intel_device_info intel_valleyview_info = {
        VLV_FEATURES,
        .is_valleyview = 1,
 };
@@ -263,54 +272,50 @@ static const struct intel_device_info intel_valleyview_d_info = {
        GEN7_FEATURES, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
        .has_ddi = 1, \
-       .has_fpga_dbg = 1
-
-static const struct intel_device_info intel_haswell_d_info = {
-       HSW_FEATURES,
-       .is_haswell = 1,
-};
-
-static const struct intel_device_info intel_haswell_m_info = {
+       .has_fpga_dbg = 1, \
+       .has_psr = 1, \
+       .has_resource_streamer = 1, \
+       .has_dp_mst = 1, \
+       .has_rc6p = 0 /* RC6p removed-by HSW */, \
+       .has_runtime_pm = 1
+
+static const struct intel_device_info intel_haswell_info = {
        HSW_FEATURES,
        .is_haswell = 1,
-       .is_mobile = 1,
+       .has_l3_dpf = 1,
 };
 
 #define BDW_FEATURES \
        HSW_FEATURES, \
-       BDW_COLORS
+       BDW_COLORS, \
+       .has_logical_ring_contexts = 1
 
-static const struct intel_device_info intel_broadwell_d_info = {
+static const struct intel_device_info intel_broadwell_info = {
        BDW_FEATURES,
        .gen = 8,
        .is_broadwell = 1,
 };
 
-static const struct intel_device_info intel_broadwell_m_info = {
-       BDW_FEATURES,
-       .gen = 8, .is_mobile = 1,
-       .is_broadwell = 1,
-};
-
-static const struct intel_device_info intel_broadwell_gt3d_info = {
+static const struct intel_device_info intel_broadwell_gt3_info = {
        BDW_FEATURES,
        .gen = 8,
        .is_broadwell = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
-static const struct intel_device_info intel_broadwell_gt3m_info = {
-       BDW_FEATURES,
-       .gen = 8, .is_mobile = 1,
-       .is_broadwell = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
-
 static const struct intel_device_info intel_cherryview_info = {
        .gen = 8, .num_pipes = 3,
-       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .is_cherryview = 1,
+       .has_psr = 1,
+       .has_runtime_pm = 1,
+       .has_resource_streamer = 1,
+       .has_rc6 = 1,
+       .has_gmbus_irq = 1,
+       .has_hw_contexts = 1,
+       .has_logical_ring_contexts = 1,
+       .has_gmch_display = 1,
        .display_mmio_offset = VLV_DISPLAY_BASE,
        GEN_CHV_PIPEOFFSETS,
        CURSOR_OFFSETS,
@@ -321,25 +326,41 @@ static const struct intel_device_info intel_skylake_info = {
        BDW_FEATURES,
        .is_skylake = 1,
        .gen = 9,
+       .has_csr = 1,
+       .has_guc = 1,
+       .ddb_size = 896,
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
        BDW_FEATURES,
        .is_skylake = 1,
        .gen = 9,
+       .has_csr = 1,
+       .has_guc = 1,
+       .ddb_size = 896,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
 static const struct intel_device_info intel_broxton_info = {
        .is_broxton = 1,
        .gen = 9,
-       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .num_pipes = 3,
        .has_ddi = 1,
        .has_fpga_dbg = 1,
        .has_fbc = 1,
+       .has_runtime_pm = 1,
        .has_pooled_eu = 0,
+       .has_csr = 1,
+       .has_resource_streamer = 1,
+       .has_rc6 = 1,
+       .has_dp_mst = 1,
+       .has_gmbus_irq = 1,
+       .has_hw_contexts = 1,
+       .has_logical_ring_contexts = 1,
+       .has_guc = 1,
+       .ddb_size = 512,
        GEN_DEFAULT_PIPEOFFSETS,
        IVB_CURSOR_OFFSETS,
        BDW_COLORS,
@@ -349,12 +370,18 @@ static const struct intel_device_info intel_kabylake_info = {
        BDW_FEATURES,
        .is_kabylake = 1,
        .gen = 9,
+       .has_csr = 1,
+       .has_guc = 1,
+       .ddb_size = 896,
 };
 
 static const struct intel_device_info intel_kabylake_gt3_info = {
        BDW_FEATURES,
        .is_kabylake = 1,
        .gen = 9,
+       .has_csr = 1,
+       .has_guc = 1,
+       .ddb_size = 896,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
@@ -386,14 +413,10 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
        INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
        INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
-       INTEL_HSW_D_IDS(&intel_haswell_d_info),
-       INTEL_HSW_M_IDS(&intel_haswell_m_info),
-       INTEL_VLV_M_IDS(&intel_valleyview_m_info),
-       INTEL_VLV_D_IDS(&intel_valleyview_d_info),
-       INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
-       INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
-       INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
-       INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+       INTEL_HSW_IDS(&intel_haswell_info),
+       INTEL_VLV_IDS(&intel_valleyview_info),
+       INTEL_BDW_GT12_IDS(&intel_broadwell_info),
+       INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
        INTEL_CHV_IDS(&intel_cherryview_info),
        INTEL_SKL_GT1_IDS(&intel_skylake_info),
        INTEL_SKL_GT2_IDS(&intel_skylake_info),
index ce14fe09d96236a32af675b5320aaefb9d508f8c..70d96162def66daef0722a0fb788e23da2e47ec8 100644 (file)
@@ -186,13 +186,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN9_GRDOM_GUC                        (1 << 5)
 #define  GEN8_GRDOM_MEDIA2             (1 << 7)
 
-#define RING_PP_DIR_BASE(ring)         _MMIO((ring)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(ring)    _MMIO((ring)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(ring)         _MMIO((ring)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine)       _MMIO((engine)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(engine)  _MMIO((engine)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(engine)       _MMIO((engine)->mmio_base+0x220)
 #define   PP_DIR_DCLV_2G               0xffffffff
 
-#define GEN8_RING_PDP_UDW(ring, n)     _MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(ring, n)     _MMIO((ring)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n)   _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n)   _MMIO((engine)->mmio_base+0x270 + (n) * 8)
 
 #define GEN8_R_PWR_CLK_STATE           _MMIO(0x20C8)
 #define   GEN8_RPCS_ENABLE             (1 << 31)
@@ -1536,6 +1536,7 @@ enum skl_disp_power_wells {
 #define BALANCE_LEG_MASK(port)         (7<<(8+3*(port)))
 /* Balance leg disable bits */
 #define BALANCE_LEG_DISABLE_SHIFT      23
+#define BALANCE_LEG_DISABLE(port)      (1 << (23 + (port)))
 
 /*
  * Fence registers
@@ -1647,7 +1648,7 @@ enum skl_disp_power_wells {
 #define   ARB_MODE_BWGTLB_DISABLE (1<<9)
 #define   ARB_MODE_SWIZZLE_BDW (1<<1)
 #define RENDER_HWS_PGA_GEN7    _MMIO(0x04080)
-#define RING_FAULT_REG(ring)   _MMIO(0x4094 + 0x100*(ring)->id)
+#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
 #define   RING_FAULT_GTTSEL_MASK (1<<11)
 #define   RING_FAULT_SRCID(x)  (((x) >> 3) & 0xff)
 #define   RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
@@ -1845,7 +1846,7 @@ enum skl_disp_power_wells {
 
 #define GFX_MODE       _MMIO(0x2520)
 #define GFX_MODE_GEN7  _MMIO(0x229c)
-#define RING_MODE_GEN7(ring)   _MMIO((ring)->mmio_base+0x29c)
+#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
 #define   GFX_RUN_LIST_ENABLE          (1<<15)
 #define   GFX_INTERRUPT_STEERING       (1<<14)
 #define   GFX_TLB_INVALIDATE_EXPLICIT  (1<<13)
@@ -3659,8 +3660,17 @@ enum {
 #define   VIDEO_DIP_ENABLE_SPD_HSW     (1 << 0)
 
 /* Panel power sequencing */
-#define PP_STATUS      _MMIO(0x61200)
-#define   PP_ON                (1 << 31)
+#define PPS_BASE                       0x61200
+#define VLV_PPS_BASE                   (VLV_DISPLAY_BASE + PPS_BASE)
+#define PCH_PPS_BASE                   0xC7200
+
+#define _MMIO_PPS(pps_idx, reg)                _MMIO(dev_priv->pps_mmio_base - \
+                                             PPS_BASE + (reg) +        \
+                                             (pps_idx) * 0x100)
+
+#define _PP_STATUS                     0x61200
+#define PP_STATUS(pps_idx)             _MMIO_PPS(pps_idx, _PP_STATUS)
+#define   PP_ON                                (1 << 31)
 /*
  * Indicates that all dependencies of the panel are on:
  *
@@ -3668,14 +3678,14 @@ enum {
  * - pipe enabled
  * - LVDS/DVOB/DVOC on
  */
-#define   PP_READY             (1 << 30)
-#define   PP_SEQUENCE_NONE     (0 << 28)
-#define   PP_SEQUENCE_POWER_UP (1 << 28)
-#define   PP_SEQUENCE_POWER_DOWN (2 << 28)
-#define   PP_SEQUENCE_MASK     (3 << 28)
-#define   PP_SEQUENCE_SHIFT    28
-#define   PP_CYCLE_DELAY_ACTIVE        (1 << 27)
-#define   PP_SEQUENCE_STATE_MASK 0x0000000f
+#define   PP_READY                     (1 << 30)
+#define   PP_SEQUENCE_NONE             (0 << 28)
+#define   PP_SEQUENCE_POWER_UP         (1 << 28)
+#define   PP_SEQUENCE_POWER_DOWN       (2 << 28)
+#define   PP_SEQUENCE_MASK             (3 << 28)
+#define   PP_SEQUENCE_SHIFT            28
+#define   PP_CYCLE_DELAY_ACTIVE                (1 << 27)
+#define   PP_SEQUENCE_STATE_MASK       0x0000000f
 #define   PP_SEQUENCE_STATE_OFF_IDLE   (0x0 << 0)
 #define   PP_SEQUENCE_STATE_OFF_S0_1   (0x1 << 0)
 #define   PP_SEQUENCE_STATE_OFF_S0_2   (0x2 << 0)
@@ -3685,11 +3695,46 @@ enum {
 #define   PP_SEQUENCE_STATE_ON_S1_2    (0xa << 0)
 #define   PP_SEQUENCE_STATE_ON_S1_3    (0xb << 0)
 #define   PP_SEQUENCE_STATE_RESET      (0xf << 0)
-#define PP_CONTROL     _MMIO(0x61204)
-#define   POWER_TARGET_ON      (1 << 0)
-#define PP_ON_DELAYS   _MMIO(0x61208)
-#define PP_OFF_DELAYS  _MMIO(0x6120c)
-#define PP_DIVISOR     _MMIO(0x61210)
+
+#define _PP_CONTROL                    0x61204
+#define PP_CONTROL(pps_idx)            _MMIO_PPS(pps_idx, _PP_CONTROL)
+#define  PANEL_UNLOCK_REGS             (0xabcd << 16)
+#define  PANEL_UNLOCK_MASK             (0xffff << 16)
+#define  BXT_POWER_CYCLE_DELAY_MASK    0x1f0
+#define  BXT_POWER_CYCLE_DELAY_SHIFT   4
+#define  EDP_FORCE_VDD                 (1 << 3)
+#define  EDP_BLC_ENABLE                        (1 << 2)
+#define  PANEL_POWER_RESET             (1 << 1)
+#define  PANEL_POWER_OFF               (0 << 0)
+#define  PANEL_POWER_ON                        (1 << 0)
+
+#define _PP_ON_DELAYS                  0x61208
+#define PP_ON_DELAYS(pps_idx)          _MMIO_PPS(pps_idx, _PP_ON_DELAYS)
+#define  PANEL_PORT_SELECT_SHIFT       30
+#define  PANEL_PORT_SELECT_MASK                (3 << 30)
+#define  PANEL_PORT_SELECT_LVDS                (0 << 30)
+#define  PANEL_PORT_SELECT_DPA         (1 << 30)
+#define  PANEL_PORT_SELECT_DPC         (2 << 30)
+#define  PANEL_PORT_SELECT_DPD         (3 << 30)
+#define  PANEL_PORT_SELECT_VLV(port)   ((port) << 30)
+#define  PANEL_POWER_UP_DELAY_MASK     0x1fff0000
+#define  PANEL_POWER_UP_DELAY_SHIFT    16
+#define  PANEL_LIGHT_ON_DELAY_MASK     0x1fff
+#define  PANEL_LIGHT_ON_DELAY_SHIFT    0
+
+#define _PP_OFF_DELAYS                 0x6120C
+#define PP_OFF_DELAYS(pps_idx)         _MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
+#define  PANEL_POWER_DOWN_DELAY_MASK   0x1fff0000
+#define  PANEL_POWER_DOWN_DELAY_SHIFT  16
+#define  PANEL_LIGHT_OFF_DELAY_MASK    0x1fff
+#define  PANEL_LIGHT_OFF_DELAY_SHIFT   0
+
+#define _PP_DIVISOR                    0x61210
+#define PP_DIVISOR(pps_idx)            _MMIO_PPS(pps_idx, _PP_DIVISOR)
+#define  PP_REFERENCE_DIVIDER_MASK     0xffffff00
+#define  PP_REFERENCE_DIVIDER_SHIFT    8
+#define  PANEL_POWER_CYCLE_DELAY_MASK  0x1f
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT 0
 
 /* Panel fitting */
 #define PFIT_CONTROL   _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
@@ -6132,6 +6177,7 @@ enum {
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC     ((1<<10) | (1<<26))
 # define GEN9_RHWO_OPTIMIZATION_DISABLE                (1<<14)
 #define COMMON_SLICE_CHICKEN2                  _MMIO(0x7014)
+# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
 # define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
 # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE  (1<<0)
 
@@ -6748,77 +6794,6 @@ enum {
 #define PCH_LVDS       _MMIO(0xe1180)
 #define  LVDS_DETECTED (1 << 1)
 
-/* vlv has 2 sets of panel control regs. */
-#define _PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
-#define _PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
-#define _PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
-#define  PANEL_PORT_SELECT_VLV(port)   ((port) << 30)
-#define _PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
-#define _PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
-
-#define _PIPEB_PP_STATUS         (VLV_DISPLAY_BASE + 0x61300)
-#define _PIPEB_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61304)
-#define _PIPEB_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61308)
-#define _PIPEB_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6130c)
-#define _PIPEB_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61310)
-
-#define VLV_PIPE_PP_STATUS(pipe)       _MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS)
-#define VLV_PIPE_PP_CONTROL(pipe)      _MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL)
-#define VLV_PIPE_PP_ON_DELAYS(pipe)    _MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS)
-#define VLV_PIPE_PP_OFF_DELAYS(pipe)   _MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS)
-#define VLV_PIPE_PP_DIVISOR(pipe)      _MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR)
-
-#define _PCH_PP_STATUS         0xc7200
-#define _PCH_PP_CONTROL                0xc7204
-#define  PANEL_UNLOCK_REGS     (0xabcd << 16)
-#define  PANEL_UNLOCK_MASK     (0xffff << 16)
-#define  BXT_POWER_CYCLE_DELAY_MASK    (0x1f0)
-#define  BXT_POWER_CYCLE_DELAY_SHIFT   4
-#define  EDP_FORCE_VDD         (1 << 3)
-#define  EDP_BLC_ENABLE                (1 << 2)
-#define  PANEL_POWER_RESET     (1 << 1)
-#define  PANEL_POWER_OFF       (0 << 0)
-#define  PANEL_POWER_ON                (1 << 0)
-#define _PCH_PP_ON_DELAYS      0xc7208
-#define  PANEL_PORT_SELECT_MASK        (3 << 30)
-#define  PANEL_PORT_SELECT_LVDS        (0 << 30)
-#define  PANEL_PORT_SELECT_DPA (1 << 30)
-#define  PANEL_PORT_SELECT_DPC (2 << 30)
-#define  PANEL_PORT_SELECT_DPD (3 << 30)
-#define  PANEL_POWER_UP_DELAY_MASK     (0x1fff0000)
-#define  PANEL_POWER_UP_DELAY_SHIFT    16
-#define  PANEL_LIGHT_ON_DELAY_MASK     (0x1fff)
-#define  PANEL_LIGHT_ON_DELAY_SHIFT    0
-
-#define _PCH_PP_OFF_DELAYS             0xc720c
-#define  PANEL_POWER_DOWN_DELAY_MASK   (0x1fff0000)
-#define  PANEL_POWER_DOWN_DELAY_SHIFT  16
-#define  PANEL_LIGHT_OFF_DELAY_MASK    (0x1fff)
-#define  PANEL_LIGHT_OFF_DELAY_SHIFT   0
-
-#define _PCH_PP_DIVISOR                        0xc7210
-#define  PP_REFERENCE_DIVIDER_MASK     (0xffffff00)
-#define  PP_REFERENCE_DIVIDER_SHIFT    8
-#define  PANEL_POWER_CYCLE_DELAY_MASK  (0x1f)
-#define  PANEL_POWER_CYCLE_DELAY_SHIFT 0
-
-#define PCH_PP_STATUS                  _MMIO(_PCH_PP_STATUS)
-#define PCH_PP_CONTROL                 _MMIO(_PCH_PP_CONTROL)
-#define PCH_PP_ON_DELAYS               _MMIO(_PCH_PP_ON_DELAYS)
-#define PCH_PP_OFF_DELAYS              _MMIO(_PCH_PP_OFF_DELAYS)
-#define PCH_PP_DIVISOR                 _MMIO(_PCH_PP_DIVISOR)
-
-/* BXT PPS changes - 2nd set of PPS registers */
-#define _BXT_PP_STATUS2        0xc7300
-#define _BXT_PP_CONTROL2       0xc7304
-#define _BXT_PP_ON_DELAYS2     0xc7308
-#define _BXT_PP_OFF_DELAYS2    0xc730c
-
-#define BXT_PP_STATUS(n)       _MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2)
-#define BXT_PP_CONTROL(n)      _MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2)
-#define BXT_PP_ON_DELAYS(n)    _MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
-#define BXT_PP_OFF_DELAYS(n)   _MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
-
 #define _PCH_DP_B              0xe4100
 #define PCH_DP_B               _MMIO(_PCH_DP_B)
 #define _PCH_DPB_AUX_CH_CTL    0xe4110
@@ -6958,6 +6933,9 @@ enum {
 #define  ECOBUS                                        _MMIO(0xa180)
 #define    FORCEWAKE_MT_ENABLE                 (1<<5)
 #define  VLV_SPAREG2H                          _MMIO(0xA194)
+#define  GEN9_PWRGT_DOMAIN_STATUS              _MMIO(0xA2A0)
+#define   GEN9_PWRGT_MEDIA_STATUS_MASK         (1 << 0)
+#define   GEN9_PWRGT_RENDER_STATUS_MASK                (1 << 1)
 
 #define  GTFIFODBG                             _MMIO(0x120000)
 #define    GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV   (0x1f << 20)
@@ -7058,12 +7036,13 @@ enum {
 #define GEN6_RP_UP_THRESHOLD                   _MMIO(0xA02C)
 #define GEN6_RP_DOWN_THRESHOLD                 _MMIO(0xA030)
 #define GEN6_RP_CUR_UP_EI                      _MMIO(0xA050)
-#define   GEN6_CURICONT_MASK                   0xffffff
+#define   GEN6_RP_EI_MASK                      0xffffff
+#define   GEN6_CURICONT_MASK                   GEN6_RP_EI_MASK
 #define GEN6_RP_CUR_UP                         _MMIO(0xA054)
-#define   GEN6_CURBSYTAVG_MASK                 0xffffff
+#define   GEN6_CURBSYTAVG_MASK                 GEN6_RP_EI_MASK
 #define GEN6_RP_PREV_UP                                _MMIO(0xA058)
 #define GEN6_RP_CUR_DOWN_EI                    _MMIO(0xA05C)
-#define   GEN6_CURIAVG_MASK                    0xffffff
+#define   GEN6_CURIAVG_MASK                    GEN6_RP_EI_MASK
 #define GEN6_RP_CUR_DOWN                       _MMIO(0xA060)
 #define GEN6_RP_PREV_DOWN                      _MMIO(0xA064)
 #define GEN6_RP_UP_EI                          _MMIO(0xA068)
@@ -7088,7 +7067,7 @@ enum {
 #define VLV_RCEDATA                            _MMIO(0xA0BC)
 #define GEN6_RC6pp_THRESHOLD                   _MMIO(0xA0C0)
 #define GEN6_PMINTRMSK                         _MMIO(0xA168)
-#define   GEN8_PMINTR_REDIRECT_TO_NON_DISP     (1<<31)
+#define   GEN8_PMINTR_REDIRECT_TO_GUC            (1<<31)
 #define GEN8_MISC_CTRL0                                _MMIO(0xA180)
 #define VLV_PWRDWNUPCTL                                _MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS          _MMIO(0xA0C4)
@@ -7144,6 +7123,15 @@ enum {
 
 #define GEN6_PCODE_MAILBOX                     _MMIO(0x138124)
 #define   GEN6_PCODE_READY                     (1<<31)
+#define   GEN6_PCODE_ERROR_MASK                        0xFF
+#define     GEN6_PCODE_SUCCESS                 0x0
+#define     GEN6_PCODE_ILLEGAL_CMD             0x1
+#define     GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define     GEN6_PCODE_TIMEOUT                 0x3
+#define     GEN6_PCODE_UNIMPLEMENTED_CMD       0xFF
+#define     GEN7_PCODE_TIMEOUT                 0x2
+#define     GEN7_PCODE_ILLEGAL_DATA            0x3
+#define     GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
 #define          GEN6_PCODE_WRITE_RC6VIDS              0x4
 #define          GEN6_PCODE_READ_RC6VIDS               0x5
 #define     GEN6_ENCODE_RC6_VID(mv)            (((mv) - 245) / 5)
@@ -7165,6 +7153,10 @@ enum {
 #define   HSW_PCODE_DE_WRITE_FREQ_REQ          0x17
 #define   DISPLAY_IPS_CONTROL                  0x19
 #define          HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL  0x1A
+#define   GEN9_PCODE_SAGV_CONTROL              0x21
+#define     GEN9_SAGV_DISABLE                  0x0
+#define     GEN9_SAGV_IS_DISABLED              0x1
+#define     GEN9_SAGV_ENABLE                   0x3
 #define GEN6_PCODE_DATA                                _MMIO(0x138128)
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT       8
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT     16
@@ -7485,6 +7477,7 @@ enum {
 #define _DDI_BUF_TRANS_A               0x64E00
 #define _DDI_BUF_TRANS_B               0x64E60
 #define DDI_BUF_TRANS_LO(port, i)      _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
+#define  DDI_BUF_BALANCE_LEG_ENABLE    (1 << 31)
 #define DDI_BUF_TRANS_HI(port, i)      _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
 
 /* Sideband Interface (SBI) is programmed indirectly, via
index 5cfe4c7716b4ec8a73a94ab66bdd27e9ae0abe76..a0af170062b1ea859f3d2297a500a8937f7b72df 100644 (file)
@@ -37,25 +37,6 @@ static void i915_save_display(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen <= 4)
                dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
 
-       /* LVDS state */
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-               dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
-       else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
-               dev_priv->regfile.saveLVDS = I915_READ(LVDS);
-
-       /* Panel power sequencer */
-       if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
-               dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
-               dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
-               dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
-       } else if (INTEL_INFO(dev)->gen <= 4) {
-               dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
-               dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
-               dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
-               dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
-       }
-
        /* save FBC interval */
        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
                dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
@@ -64,33 +45,11 @@ static void i915_save_display(struct drm_device *dev)
 static void i915_restore_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 mask = 0xffffffff;
 
        /* Display arbitration */
        if (INTEL_INFO(dev)->gen <= 4)
                I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
 
-       mask = ~LVDS_PORT_EN;
-
-       /* LVDS state */
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-               I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
-       else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
-               I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
-
-       /* Panel power sequencer */
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
-               I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
-               I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
-               I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
-       } else if (INTEL_INFO(dev)->gen <= 4) {
-               I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
-               I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
-               I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
-               I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
-       }
-
        /* only restore FBC info on the platform that supports FBC*/
        intel_fbc_global_disable(dev_priv);
 
@@ -104,6 +63,7 @@ static void i915_restore_display(struct drm_device *dev)
 int i915_save_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int i;
 
        mutex_lock(&dev->struct_mutex);
@@ -111,7 +71,7 @@ int i915_save_state(struct drm_device *dev)
        i915_save_display(dev);
 
        if (IS_GEN4(dev))
-               pci_read_config_word(dev->pdev, GCDGMBUS,
+               pci_read_config_word(pdev, GCDGMBUS,
                                     &dev_priv->regfile.saveGCDGMBUS);
 
        /* Cache mode state */
@@ -149,6 +109,7 @@ int i915_save_state(struct drm_device *dev)
 int i915_restore_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int i;
 
        mutex_lock(&dev->struct_mutex);
@@ -156,7 +117,7 @@ int i915_restore_state(struct drm_device *dev)
        i915_gem_restore_fences(dev);
 
        if (IS_GEN4(dev))
-               pci_write_config_word(dev->pdev, GCDGMBUS,
+               pci_write_config_word(pdev, GCDGMBUS,
                                      dev_priv->regfile.saveGCDGMBUS);
        i915_restore_display(dev);
 
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
new file mode 100644 (file)
index 0000000..1e5cbc5
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * (C) Copyright 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/slab.h>
+#include <linux/fence.h>
+#include <linux/reservation.h>
+
+#include "i915_sw_fence.h"
+
+static DEFINE_SPINLOCK(i915_sw_fence_lock);
+
+static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
+                                 enum i915_sw_fence_notify state)
+{
+       i915_sw_fence_notify_t fn;
+
+       fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
+       return fn(fence, state);
+}
+
+static void i915_sw_fence_free(struct kref *kref)
+{
+       struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
+
+       WARN_ON(atomic_read(&fence->pending) > 0);
+
+       if (fence->flags & I915_SW_FENCE_MASK)
+               __i915_sw_fence_notify(fence, FENCE_FREE);
+       else
+               kfree(fence);
+}
+
+static void i915_sw_fence_put(struct i915_sw_fence *fence)
+{
+       kref_put(&fence->kref, i915_sw_fence_free);
+}
+
+static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
+{
+       kref_get(&fence->kref);
+       return fence;
+}
+
+static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
+                                       struct list_head *continuation)
+{
+       wait_queue_head_t *x = &fence->wait;
+       wait_queue_t *pos, *next;
+       unsigned long flags;
+
+       atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
+
+       /*
+        * To prevent unbounded recursion as we traverse the graph of
+        * i915_sw_fences, we move the task_list from this, the next ready
+        * fence, to the tail of the original fence's task_list
+        * (and so added to the list to be woken).
+        */
+
+       spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
+       if (continuation) {
+               list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
+                       if (pos->func == autoremove_wake_function)
+                               pos->func(pos, TASK_NORMAL, 0, continuation);
+                       else
+                               list_move_tail(&pos->task_list, continuation);
+               }
+       } else {
+               LIST_HEAD(extra);
+
+               do {
+                       list_for_each_entry_safe(pos, next,
+                                                &x->task_list, task_list)
+                               pos->func(pos, TASK_NORMAL, 0, &extra);
+
+                       if (list_empty(&extra))
+                               break;
+
+                       list_splice_tail_init(&extra, &x->task_list);
+               } while (1);
+       }
+       spin_unlock_irqrestore(&x->lock, flags);
+}
+
+static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
+                                    struct list_head *continuation)
+{
+       if (!atomic_dec_and_test(&fence->pending))
+               return;
+
+       if (fence->flags & I915_SW_FENCE_MASK &&
+           __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
+               return;
+
+       __i915_sw_fence_wake_up_all(fence, continuation);
+}
+
+static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+{
+       if (WARN_ON(i915_sw_fence_done(fence)))
+               return;
+
+       __i915_sw_fence_complete(fence, NULL);
+}
+
+static void i915_sw_fence_await(struct i915_sw_fence *fence)
+{
+       WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+}
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+{
+       BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
+
+       init_waitqueue_head(&fence->wait);
+       kref_init(&fence->kref);
+       atomic_set(&fence->pending, 1);
+       fence->flags = (unsigned long)fn;
+}
+
+void i915_sw_fence_commit(struct i915_sw_fence *fence)
+{
+       i915_sw_fence_complete(fence);
+       i915_sw_fence_put(fence);
+}
+
+static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
+{
+       list_del(&wq->task_list);
+       __i915_sw_fence_complete(wq->private, key);
+       i915_sw_fence_put(wq->private);
+       return 0;
+}
+
+static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+                                   const struct i915_sw_fence * const signaler)
+{
+       wait_queue_t *wq;
+
+       if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+               return false;
+
+       if (fence == signaler)
+               return true;
+
+       list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+               if (wq->func != i915_sw_fence_wake)
+                       continue;
+
+               if (__i915_sw_fence_check_if_after(wq->private, signaler))
+                       return true;
+       }
+
+       return false;
+}
+
+static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
+{
+       wait_queue_t *wq;
+
+       if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+               return;
+
+       list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+               if (wq->func != i915_sw_fence_wake)
+                       continue;
+
+               __i915_sw_fence_clear_checked_bit(wq->private);
+       }
+}
+
+static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+                                 const struct i915_sw_fence * const signaler)
+{
+       unsigned long flags;
+       bool err;
+
+       if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG))
+               return false;
+
+       spin_lock_irqsave(&i915_sw_fence_lock, flags);
+       err = __i915_sw_fence_check_if_after(fence, signaler);
+       __i915_sw_fence_clear_checked_bit(fence);
+       spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
+
+       return err;
+}
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+                                struct i915_sw_fence *signaler,
+                                wait_queue_t *wq)
+{
+       unsigned long flags;
+       int pending;
+
+       if (i915_sw_fence_done(signaler))
+               return 0;
+
+       /* The dependency graph must be acyclic. */
+       if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
+               return -EINVAL;
+
+       INIT_LIST_HEAD(&wq->task_list);
+       wq->flags = 0;
+       wq->func = i915_sw_fence_wake;
+       wq->private = i915_sw_fence_get(fence);
+
+       i915_sw_fence_await(fence);
+
+       spin_lock_irqsave(&signaler->wait.lock, flags);
+       if (likely(!i915_sw_fence_done(signaler))) {
+               __add_wait_queue_tail(&signaler->wait, wq);
+               pending = 1;
+       } else {
+               i915_sw_fence_wake(wq, 0, 0, NULL);
+               pending = 0;
+       }
+       spin_unlock_irqrestore(&signaler->wait.lock, flags);
+
+       return pending;
+}
+
+struct dma_fence_cb {
+       struct fence_cb base;
+       struct i915_sw_fence *fence;
+       struct fence *dma;
+       struct timer_list timer;
+};
+
+static void timer_i915_sw_fence_wake(unsigned long data)
+{
+       struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+
+       printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
+              cb->dma->ops->get_driver_name(cb->dma),
+              cb->dma->ops->get_timeline_name(cb->dma),
+              cb->dma->seqno);
+       fence_put(cb->dma);
+       cb->dma = NULL;
+
+       i915_sw_fence_commit(cb->fence);
+       cb->timer.function = NULL;
+}
+
+static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+{
+       struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+       del_timer_sync(&cb->timer);
+       if (cb->timer.function)
+               i915_sw_fence_commit(cb->fence);
+       fence_put(cb->dma);
+
+       kfree(cb);
+}
+
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+                                 struct fence *dma,
+                                 unsigned long timeout,
+                                 gfp_t gfp)
+{
+       struct dma_fence_cb *cb;
+       int ret;
+
+       if (fence_is_signaled(dma))
+               return 0;
+
+       cb = kmalloc(sizeof(*cb), gfp);
+       if (!cb) {
+               if (!gfpflags_allow_blocking(gfp))
+                       return -ENOMEM;
+
+               return fence_wait(dma, false);
+       }
+
+       cb->fence = i915_sw_fence_get(fence);
+       i915_sw_fence_await(fence);
+
+       cb->dma = NULL;
+       __setup_timer(&cb->timer,
+                     timer_i915_sw_fence_wake, (unsigned long)cb,
+                     TIMER_IRQSAFE);
+       if (timeout) {
+               cb->dma = fence_get(dma);
+               mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
+       }
+
+       ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+       if (ret == 0) {
+               ret = 1;
+       } else {
+               dma_i915_sw_fence_wake(dma, &cb->base);
+               if (ret == -ENOENT) /* fence already signaled */
+                       ret = 0;
+       }
+
+       return ret;
+}
+
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+                                   struct reservation_object *resv,
+                                   const struct fence_ops *exclude,
+                                   bool write,
+                                   unsigned long timeout,
+                                   gfp_t gfp)
+{
+       struct fence *excl;
+       int ret = 0, pending;
+
+       if (write) {
+               struct fence **shared;
+               unsigned int count, i;
+
+               ret = reservation_object_get_fences_rcu(resv,
+                                                       &excl, &count, &shared);
+               if (ret)
+                       return ret;
+
+               for (i = 0; i < count; i++) {
+                       if (shared[i]->ops == exclude)
+                               continue;
+
+                       pending = i915_sw_fence_await_dma_fence(fence,
+                                                               shared[i],
+                                                               timeout,
+                                                               gfp);
+                       if (pending < 0) {
+                               ret = pending;
+                               break;
+                       }
+
+                       ret |= pending;
+               }
+
+               for (i = 0; i < count; i++)
+                       fence_put(shared[i]);
+               kfree(shared);
+       } else {
+               excl = reservation_object_get_excl_rcu(resv);
+       }
+
+       if (ret >= 0 && excl && excl->ops != exclude) {
+               pending = i915_sw_fence_await_dma_fence(fence,
+                                                       excl,
+                                                       timeout,
+                                                       gfp);
+               if (pending < 0)
+                       ret = pending;
+               else
+                       ret |= pending;
+       }
+
+       fence_put(excl);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
new file mode 100644 (file)
index 0000000..3731416
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * i915_sw_fence.h - library routines for N:M synchronisation points
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _I915_SW_FENCE_H_
+#define _I915_SW_FENCE_H_
+
+#include <linux/gfp.h>
+#include <linux/kref.h>
+#include <linux/notifier.h> /* for NOTIFY_DONE */
+#include <linux/wait.h>
+
+struct completion;
+struct fence;
+struct fence_ops;
+struct reservation_object;
+
+struct i915_sw_fence {
+       wait_queue_head_t wait;
+       unsigned long flags;
+       struct kref kref;
+       atomic_t pending;
+};
+
+#define I915_SW_FENCE_CHECKED_BIT      0 /* used internally for DAG checking */
+#define I915_SW_FENCE_PRIVATE_BIT      1 /* available for use by owner */
+#define I915_SW_FENCE_MASK             (~3)
+
+enum i915_sw_fence_notify {
+       FENCE_COMPLETE,
+       FENCE_FREE
+};
+
+typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
+                                     enum i915_sw_fence_notify state);
+#define __i915_sw_fence_call __aligned(4)
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void i915_sw_fence_commit(struct i915_sw_fence *fence);
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+                                struct i915_sw_fence *after,
+                                wait_queue_t *wq);
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+                                 struct fence *dma,
+                                 unsigned long timeout,
+                                 gfp_t gfp);
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+                                   struct reservation_object *resv,
+                                   const struct fence_ops *exclude,
+                                   bool write,
+                                   unsigned long timeout,
+                                   gfp_t gfp);
+
+static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
+{
+       return atomic_read(&fence->pending) < 0;
+}
+
+#endif /* _I915_SW_FENCE_H_ */
index d61829e54f93d74b4b1b7aab81c9c704c472d38f..1012eeea132434c30e100c0b5fa16d0b26bd361d 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-#define dev_to_drm_minor(d) dev_get_drvdata((d))
+static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
+{
+       struct drm_minor *minor = dev_get_drvdata(kdev);
+       return to_i915(minor->dev);
+}
 
 #ifdef CONFIG_PM
-static u32 calc_residency(struct drm_device *dev,
+static u32 calc_residency(struct drm_i915_private *dev_priv,
                          i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u64 raw_time; /* 32b value may overflow during fixed point math */
        u64 units = 128ULL, div = 100000ULL;
        u32 ret;
@@ -49,13 +52,13 @@ static u32 calc_residency(struct drm_device *dev,
        intel_runtime_pm_get(dev_priv);
 
        /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                units = 1;
                div = dev_priv->czclk_freq;
 
                if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
                        units <<= 8;
-       } else if (IS_BROXTON(dev)) {
+       } else if (IS_BROXTON(dev_priv)) {
                units = 1;
                div = 1200;             /* 833.33ns */
        }
@@ -76,32 +79,32 @@ show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_get_drvdata(kdev);
-       u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
 static ssize_t
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_to_drm_minor(kdev);
-       u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 
 static ssize_t
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_to_drm_minor(kdev);
-       u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 
 static ssize_t
 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_get_drvdata(kdev);
-       u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
@@ -144,9 +147,9 @@ static struct attribute_group media_rc6_attr_group = {
 };
 #endif
 
-static int l3_access_valid(struct drm_device *dev, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
 {
-       if (!HAS_L3_DPF(dev))
+       if (!HAS_L3_DPF(dev_priv))
                return -EPERM;
 
        if (offset % 4 != 0)
@@ -163,22 +166,21 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
             struct bin_attribute *attr, char *buf,
             loff_t offset, size_t count)
 {
-       struct device *dev = kobj_to_dev(kobj);
-       struct drm_minor *dminor = dev_to_drm_minor(dev);
-       struct drm_device *drm_dev = dminor->dev;
-       struct drm_i915_private *dev_priv = to_i915(drm_dev);
+       struct device *kdev = kobj_to_dev(kobj);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct drm_device *dev = &dev_priv->drm;
        int slice = (int)(uintptr_t)attr->private;
        int ret;
 
        count = round_down(count, 4);
 
-       ret = l3_access_valid(drm_dev, offset);
+       ret = l3_access_valid(dev_priv, offset);
        if (ret)
                return ret;
 
        count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
 
-       ret = i915_mutex_lock_interruptible(drm_dev);
+       ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
@@ -189,7 +191,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
        else
                memset(buf, 0, count);
 
-       mutex_unlock(&drm_dev->struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        return count;
 }
@@ -199,30 +201,29 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
              struct bin_attribute *attr, char *buf,
              loff_t offset, size_t count)
 {
-       struct device *dev = kobj_to_dev(kobj);
-       struct drm_minor *dminor = dev_to_drm_minor(dev);
-       struct drm_device *drm_dev = dminor->dev;
-       struct drm_i915_private *dev_priv = to_i915(drm_dev);
+       struct device *kdev = kobj_to_dev(kobj);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_gem_context *ctx;
        u32 *temp = NULL; /* Just here to make handling failures easy */
        int slice = (int)(uintptr_t)attr->private;
        int ret;
 
-       if (!HAS_HW_CONTEXTS(drm_dev))
+       if (!HAS_HW_CONTEXTS(dev_priv))
                return -ENXIO;
 
-       ret = l3_access_valid(drm_dev, offset);
+       ret = l3_access_valid(dev_priv, offset);
        if (ret)
                return ret;
 
-       ret = i915_mutex_lock_interruptible(drm_dev);
+       ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
        if (!dev_priv->l3_parity.remap_info[slice]) {
                temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
                if (!temp) {
-                       mutex_unlock(&drm_dev->struct_mutex);
+                       mutex_unlock(&dev->struct_mutex);
                        return -ENOMEM;
                }
        }
@@ -240,7 +241,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
        list_for_each_entry(ctx, &dev_priv->context_list, link)
                ctx->remap_slice |= (1<<slice);
 
-       mutex_unlock(&drm_dev->struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        return count;
 }
@@ -266,13 +267,9 @@ static struct bin_attribute dpf_attrs_1 = {
 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
                                    struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        int ret;
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
        intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->rps.hw_lock);
@@ -300,59 +297,70 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
                                    struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret;
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       intel_gpu_freq(dev_priv,
+                                      dev_priv->rps.cur_freq));
+}
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-       intel_runtime_pm_get(dev_priv);
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       intel_gpu_freq(dev_priv,
+                                      dev_priv->rps.boost_freq));
+}
+
+static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
+                                      struct device_attribute *attr,
+                                      const char *buf, size_t count)
+{
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 val;
+       ssize_t ret;
+
+       ret = kstrtou32(buf, 0, &val);
+       if (ret)
+               return ret;
+
+       /* Validate against (static) hardware limits */
+       val = intel_freq_opcode(dev_priv, val);
+       if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
+               return -EINVAL;
 
        mutex_lock(&dev_priv->rps.hw_lock);
-       ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
+       dev_priv->rps.boost_freq = val;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       intel_runtime_pm_put(dev_priv);
-
-       return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+       return count;
 }
 
 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
                                     struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-       return snprintf(buf, PAGE_SIZE,
-                       "%d\n",
-                       intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       intel_gpu_freq(dev_priv,
+                                      dev_priv->rps.efficient_freq));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret;
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-       mutex_lock(&dev_priv->rps.hw_lock);
-       ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-
-       return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       intel_gpu_freq(dev_priv,
+                                      dev_priv->rps.max_freq_softlimit));
 }
 
 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        u32 val;
        ssize_t ret;
 
@@ -360,8 +368,6 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
        intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->rps.hw_lock);
@@ -400,27 +406,18 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret;
-
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
-       ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-
-       return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       intel_gpu_freq(dev_priv,
+                                      dev_priv->rps.min_freq_softlimit));
 }
 
 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        u32 val;
        ssize_t ret;
 
@@ -428,8 +425,6 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
        intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->rps.hw_lock);
@@ -465,6 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
 
@@ -478,9 +474,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 /* For now we have a static number of RP states */
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        u32 val;
 
        if (attr == &dev_attr_gt_RP0_freq_mhz)
@@ -498,6 +492,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
 static const struct attribute *gen6_attrs[] = {
        &dev_attr_gt_act_freq_mhz.attr,
        &dev_attr_gt_cur_freq_mhz.attr,
+       &dev_attr_gt_boost_freq_mhz.attr,
        &dev_attr_gt_max_freq_mhz.attr,
        &dev_attr_gt_min_freq_mhz.attr,
        &dev_attr_gt_RP0_freq_mhz.attr,
@@ -509,6 +504,7 @@ static const struct attribute *gen6_attrs[] = {
 static const struct attribute *vlv_attrs[] = {
        &dev_attr_gt_act_freq_mhz.attr,
        &dev_attr_gt_cur_freq_mhz.attr,
+       &dev_attr_gt_boost_freq_mhz.attr,
        &dev_attr_gt_max_freq_mhz.attr,
        &dev_attr_gt_min_freq_mhz.attr,
        &dev_attr_gt_RP0_freq_mhz.attr,
@@ -524,8 +520,8 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 {
 
        struct device *kdev = kobj_to_dev(kobj);
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_error_state_file_priv error_priv;
        struct drm_i915_error_state_buf error_str;
        ssize_t ret_count = 0;
@@ -559,18 +555,10 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
                                 loff_t off, size_t count)
 {
        struct device *kdev = kobj_to_dev(kobj);
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       int ret;
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
        DRM_DEBUG_DRIVER("Resetting error state\n");
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       i915_destroy_error_state(dev);
-       mutex_unlock(&dev->struct_mutex);
+       i915_destroy_error_state(&dev_priv->drm);
 
        return count;
 }
@@ -583,37 +571,38 @@ static struct bin_attribute error_state_attr = {
        .write = error_state_write,
 };
 
-void i915_setup_sysfs(struct drm_device *dev)
+void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 {
+       struct device *kdev = dev_priv->drm.primary->kdev;
        int ret;
 
 #ifdef CONFIG_PM
-       if (HAS_RC6(dev)) {
-               ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+       if (HAS_RC6(dev_priv)) {
+               ret = sysfs_merge_group(&kdev->kobj,
                                        &rc6_attr_group);
                if (ret)
                        DRM_ERROR("RC6 residency sysfs setup failed\n");
        }
-       if (HAS_RC6p(dev)) {
-               ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+       if (HAS_RC6p(dev_priv)) {
+               ret = sysfs_merge_group(&kdev->kobj,
                                        &rc6p_attr_group);
                if (ret)
                        DRM_ERROR("RC6p residency sysfs setup failed\n");
        }
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-               ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               ret = sysfs_merge_group(&kdev->kobj,
                                        &media_rc6_attr_group);
                if (ret)
                        DRM_ERROR("Media RC6 residency sysfs setup failed\n");
        }
 #endif
-       if (HAS_L3_DPF(dev)) {
-               ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
+       if (HAS_L3_DPF(dev_priv)) {
+               ret = device_create_bin_file(kdev, &dpf_attrs);
                if (ret)
                        DRM_ERROR("l3 parity sysfs setup failed\n");
 
-               if (NUM_L3_SLICES(dev) > 1) {
-                       ret = device_create_bin_file(dev->primary->kdev,
+               if (NUM_L3_SLICES(dev_priv) > 1) {
+                       ret = device_create_bin_file(kdev,
                                                     &dpf_attrs_1);
                        if (ret)
                                DRM_ERROR("l3 parity slice 1 setup failed\n");
@@ -621,30 +610,32 @@ void i915_setup_sysfs(struct drm_device *dev)
        }
 
        ret = 0;
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
-       else if (INTEL_INFO(dev)->gen >= 6)
-               ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
+       else if (INTEL_GEN(dev_priv) >= 6)
+               ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
        if (ret)
                DRM_ERROR("RPS sysfs setup failed\n");
 
-       ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
+       ret = sysfs_create_bin_file(&kdev->kobj,
                                    &error_state_attr);
        if (ret)
                DRM_ERROR("error_state sysfs setup failed\n");
 }
 
-void i915_teardown_sysfs(struct drm_device *dev)
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 {
-       sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
+       struct device *kdev = dev_priv->drm.primary->kdev;
+
+       sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               sysfs_remove_files(&kdev->kobj, vlv_attrs);
        else
-               sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
-       device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
-       device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
+               sysfs_remove_files(&kdev->kobj, gen6_attrs);
+       device_remove_bin_file(kdev,  &dpf_attrs_1);
+       device_remove_bin_file(kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
-       sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
-       sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
+       sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
+       sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
 #endif
 }
index 534154e05fbe47b70870e4149e8265caf4a89572..178798002a7335694ddc391f8f4fe20260aebc4b 100644 (file)
@@ -394,25 +394,27 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 );
 
 TRACE_EVENT(i915_gem_evict,
-           TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
-           TP_ARGS(dev, size, align, flags),
+           TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
+           TP_ARGS(vm, size, align, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
+                            __field(struct i915_address_space *, vm)
                             __field(u32, size)
                             __field(u32, align)
-                            __field(unsigned, flags)
+                            __field(unsigned int, flags)
                            ),
 
            TP_fast_assign(
-                          __entry->dev = dev->primary->index;
+                          __entry->dev = vm->dev->primary->index;
+                          __entry->vm = vm;
                           __entry->size = size;
                           __entry->align = align;
                           __entry->flags = flags;
                          ),
 
-           TP_printk("dev=%d, size=%d, align=%d %s",
-                     __entry->dev, __entry->size, __entry->align,
+           TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
+                     __entry->dev, __entry->vm, __entry->size, __entry->align,
                      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
 );
 
@@ -449,10 +451,9 @@ TRACE_EVENT(i915_gem_evict_vm,
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-           TP_PROTO(struct drm_i915_gem_request *to_req,
-                    struct intel_engine_cs *from,
-                    struct drm_i915_gem_request *req),
-           TP_ARGS(to_req, from, req),
+           TP_PROTO(struct drm_i915_gem_request *to,
+                    struct drm_i915_gem_request *from),
+           TP_ARGS(to, from),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -463,9 +464,9 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 
            TP_fast_assign(
                           __entry->dev = from->i915->drm.primary->index;
-                          __entry->sync_from = from->id;
-                          __entry->sync_to = to_req->engine->id;
-                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->sync_from = from->engine->id;
+                          __entry->sync_to = to->engine->id;
+                          __entry->seqno = from->fence.seqno;
                           ),
 
            TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -488,9 +489,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
            TP_fast_assign(
                           __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
-                          __entry->seqno = req->seqno;
+                          __entry->seqno = req->fence.seqno;
                           __entry->flags = flags;
-                          intel_engine_enable_signaling(req);
+                          fence_enable_sw_signaling(&req->fence);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -533,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
            TP_fast_assign(
                           __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
-                          __entry->seqno = req->seqno;
+                          __entry->seqno = req->fence.seqno;
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -595,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
            TP_fast_assign(
                           __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
-                          __entry->seqno = req->seqno;
+                          __entry->seqno = req->fence.seqno;
                           __entry->blocking =
                                     mutex_is_locked(&req->i915->drm.struct_mutex);
                           ),
index f6acb5a0e7015f8112277c26002f1c4cbbcc85c3..dae340cfc6c76f617795e248b74550fcc54bb0ae 100644 (file)
@@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
 
        BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
-       if (!IS_HASWELL(dev_priv))
-               return;
-
        magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
        if (magic != VGT_MAGIC)
                return;
@@ -97,6 +94,7 @@ static struct _balloon_info_ bl_info;
 
 /**
  * intel_vgt_deballoon - deballoon reserved graphics address trunks
+ * @dev_priv: i915 device private data
  *
  * This function is called to deallocate the ballooned-out graphic memory, when
  * driver is unloaded or when ballooning fails.
@@ -138,7 +136,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
 
 /**
  * intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev: drm device
+ * @dev_priv: i915 device private data
  *
  * This function is called at the initialization stage, to balloon out the
  * graphic address space allocated to other vGPUs, by marking these spaces as
@@ -155,27 +153,27 @@ static int vgt_balloon_space(struct drm_mm *mm,
  * host point of view, the graphic address space is partitioned by multiple
  * vGPUs in different VMs. ::
  *
- *                        vGPU1 view         Host view
- *             0 ------> +-----------+     +-----------+
- *               ^       |###########|     |   vGPU3   |
- *               |       |###########|     +-----------+
- *               |       |###########|     |   vGPU2   |
- *               |       +-----------+     +-----------+
- *        mappable GM    | available | ==> |   vGPU1   |
- *               |       +-----------+     +-----------+
- *               |       |###########|     |           |
- *               v       |###########|     |   Host    |
- *               +=======+===========+     +===========+
- *               ^       |###########|     |   vGPU3   |
- *               |       |###########|     +-----------+
- *               |       |###########|     |   vGPU2   |
- *               |       +-----------+     +-----------+
- *      unmappable GM    | available | ==> |   vGPU1   |
- *               |       +-----------+     +-----------+
- *               |       |###########|     |           |
- *               |       |###########|     |   Host    |
- *               v       |###########|     |           |
- * total GM size ------> +-----------+     +-----------+
+ *                         vGPU1 view         Host view
+ *              0 ------> +-----------+     +-----------+
+ *                ^       |###########|     |   vGPU3   |
+ *                |       |###########|     +-----------+
+ *                |       |###########|     |   vGPU2   |
+ *                |       +-----------+     +-----------+
+ *         mappable GM    | available | ==> |   vGPU1   |
+ *                |       +-----------+     +-----------+
+ *                |       |###########|     |           |
+ *                v       |###########|     |   Host    |
+ *                +=======+===========+     +===========+
+ *                ^       |###########|     |   vGPU3   |
+ *                |       |###########|     +-----------+
+ *                |       |###########|     |   vGPU2   |
+ *                |       +-----------+     +-----------+
+ *       unmappable GM    | available | ==> |   vGPU1   |
+ *                |       +-----------+     +-----------+
+ *                |       |###########|     |           |
+ *                |       |###########|     |   Host    |
+ *                v       |###########|     |           |
+ *  total GM size ------> +-----------+     +-----------+
  *
  * Returns:
  * zero on success, non-zero if configuration invalid or ballooning failed
index 7de7721f65bcee9f2d9df2736396809b078b965b..b82de3072d4f802653127e46a2e141871d2d0e94 100644 (file)
@@ -55,7 +55,7 @@ intel_create_plane_state(struct drm_plane *plane)
                return NULL;
 
        state->base.plane = plane;
-       state->base.rotation = BIT(DRM_ROTATE_0);
+       state->base.rotation = DRM_ROTATE_0;
        state->ckey.flags = I915_SET_COLORKEY_NONE;
 
        return state;
@@ -134,20 +134,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 
        crtc_state = to_intel_crtc_state(drm_crtc_state);
 
-       /*
-        * The original src/dest coordinates are stored in state->base, but
-        * we want to keep another copy internal to our driver that we can
-        * clip/modify ourselves.
-        */
-       intel_state->src.x1 = state->src_x;
-       intel_state->src.y1 = state->src_y;
-       intel_state->src.x2 = state->src_x + state->src_w;
-       intel_state->src.y2 = state->src_y + state->src_h;
-       intel_state->dst.x1 = state->crtc_x;
-       intel_state->dst.y1 = state->crtc_y;
-       intel_state->dst.x2 = state->crtc_x + state->crtc_w;
-       intel_state->dst.y2 = state->crtc_y + state->crtc_h;
-
        /* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
        intel_state->clip.x1 = 0;
        intel_state->clip.y1 = 0;
@@ -157,6 +143,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
                crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
 
        if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+               char *format_name;
                if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
                        state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
                        DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
@@ -171,8 +158,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
                switch (state->fb->pixel_format) {
                case DRM_FORMAT_C8:
                case DRM_FORMAT_RGB565:
-                       DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
-                                       drm_get_format_name(state->fb->pixel_format));
+                       format_name = drm_get_format_name(state->fb->pixel_format);
+                       DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name);
+                       kfree(format_name);
                        return -EINVAL;
 
                default:
@@ -180,7 +168,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
                }
        }
 
-       intel_state->visible = false;
+       intel_state->base.visible = false;
        ret = intel_plane->check_plane(plane, crtc_state, intel_state);
        if (ret)
                return ret;
@@ -196,7 +184,7 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
                to_intel_plane_state(plane->state);
        struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
 
-       if (intel_state->visible)
+       if (intel_state->base.visible)
                intel_plane->update_plane(plane,
                                          to_intel_crtc_state(crtc->state),
                                          intel_state);
index 6700a7be7f78755675d6a8c1c17c69377d0117b4..6c70a5bfd7d896a316eda26c82444338fedf8622 100644 (file)
  * related registers. (The notable exception is the power management, not
  * covered here.)
  *
- * The struct i915_audio_component is used to interact between the graphics
- * and audio drivers. The struct i915_audio_component_ops *ops in it is
+ * The struct &i915_audio_component is used to interact between the graphics
+ * and audio drivers. The struct &i915_audio_component_ops @ops in it is
  * defined in graphics driver and called in audio driver. The
- * struct i915_audio_component_audio_ops *audio_ops is called from i915 driver.
+ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
  */
 
 static const struct {
@@ -359,9 +359,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       struct intel_digital_port *intel_dig_port =
-               enc_to_dig_port(&encoder->base);
-       enum port port = intel_dig_port->port;
+       enum port port = enc_to_dig_port(&encoder->base)->port;
        enum pipe pipe = intel_crtc->pipe;
        uint32_t tmp, eldv;
        i915_reg_t aud_config, aud_cntrl_st2;
@@ -407,13 +405,10 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       struct intel_digital_port *intel_dig_port =
-               enc_to_dig_port(&encoder->base);
-       enum port port = intel_dig_port->port;
+       enum port port = enc_to_dig_port(&encoder->base)->port;
        enum pipe pipe = intel_crtc->pipe;
        uint8_t *eld = connector->eld;
-       uint32_t eldv;
-       uint32_t tmp;
+       uint32_t tmp, eldv;
        int len, i;
        i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
 
@@ -581,25 +576,27 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
        }
 }
 
-static void i915_audio_component_get_power(struct device *dev)
+static void i915_audio_component_get_power(struct device *kdev)
 {
-       intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+       intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_put_power(struct device *dev)
+static void i915_audio_component_put_power(struct device *kdev)
 {
-       intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+       intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_codec_wake_override(struct device *dev,
+static void i915_audio_component_codec_wake_override(struct device *kdev,
                                                     bool enable)
 {
-       struct drm_i915_private *dev_priv = dev_to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        u32 tmp;
 
        if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
                return;
 
+       i915_audio_component_get_power(kdev);
+
        /*
         * Enable/disable generating the codec wake signal, overriding the
         * internal logic to generate the codec wake to controller.
@@ -615,12 +612,14 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
                I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
                usleep_range(1000, 1500);
        }
+
+       i915_audio_component_put_power(kdev);
 }
 
 /* Get CDCLK in kHz  */
-static int i915_audio_component_get_cdclk_freq(struct device *dev)
+static int i915_audio_component_get_cdclk_freq(struct device *kdev)
 {
-       struct drm_i915_private *dev_priv = dev_to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 
        if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
                return -ENODEV;
@@ -628,10 +627,10 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev)
        return dev_priv->cdclk_freq;
 }
 
-static int i915_audio_component_sync_audio_rate(struct device *dev,
+static int i915_audio_component_sync_audio_rate(struct device *kdev,
                                                int port, int rate)
 {
-       struct drm_i915_private *dev_priv = dev_to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        struct intel_encoder *intel_encoder;
        struct intel_crtc *crtc;
        struct drm_display_mode *mode;
@@ -648,6 +647,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
            !IS_HASWELL(dev_priv))
                return 0;
 
+       i915_audio_component_get_power(kdev);
        mutex_lock(&dev_priv->av_mutex);
        /* 1. get the pipe */
        intel_encoder = dev_priv->dig_port_map[port];
@@ -698,14 +698,15 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
 
  unlock:
        mutex_unlock(&dev_priv->av_mutex);
+       i915_audio_component_put_power(kdev);
        return err;
 }
 
-static int i915_audio_component_get_eld(struct device *dev, int port,
+static int i915_audio_component_get_eld(struct device *kdev, int port,
                                        bool *enabled,
                                        unsigned char *buf, int max_bytes)
 {
-       struct drm_i915_private *dev_priv = dev_to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        struct intel_encoder *intel_encoder;
        struct intel_digital_port *intel_dig_port;
        const u8 *eld;
@@ -739,11 +740,11 @@ static const struct i915_audio_component_ops i915_audio_component_ops = {
        .get_eld        = i915_audio_component_get_eld,
 };
 
-static int i915_audio_component_bind(struct device *i915_dev,
-                                    struct device *hda_dev, void *data)
+static int i915_audio_component_bind(struct device *i915_kdev,
+                                    struct device *hda_kdev, void *data)
 {
        struct i915_audio_component *acomp = data;
-       struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
        int i;
 
        if (WARN_ON(acomp->ops || acomp->dev))
@@ -751,7 +752,7 @@ static int i915_audio_component_bind(struct device *i915_dev,
 
        drm_modeset_lock_all(&dev_priv->drm);
        acomp->ops = &i915_audio_component_ops;
-       acomp->dev = i915_dev;
+       acomp->dev = i915_kdev;
        BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
        for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
                acomp->aud_sample_rate[i] = 0;
@@ -761,11 +762,11 @@ static int i915_audio_component_bind(struct device *i915_dev,
        return 0;
 }
 
-static void i915_audio_component_unbind(struct device *i915_dev,
-                                       struct device *hda_dev, void *data)
+static void i915_audio_component_unbind(struct device *i915_kdev,
+                                       struct device *hda_kdev, void *data)
 {
        struct i915_audio_component *acomp = data;
-       struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 
        drm_modeset_lock_all(&dev_priv->drm);
        acomp->ops = NULL;
index b074f3d6d127fded56cdc49ea9326ced2407d51a..9bad14d22c95729023374ad860931348a5de07d5 100644 (file)
 
 #include "i915_drv.h"
 
+static void intel_breadcrumbs_hangcheck(unsigned long data)
+{
+       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+       if (!b->irq_enabled)
+               return;
+
+       if (time_before(jiffies, b->timeout)) {
+               mod_timer(&b->hangcheck, b->timeout);
+               return;
+       }
+
+       DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
+       set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+       mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+
+       /* Ensure that even if the GPU hangs, we get woken up.
+        *
+        * However, note that if no one is waiting, we never notice
+        * a gpu hang. Eventually, we will have to wait for a resource
+        * held by the GPU and so trigger a hangcheck. In the most
+        * pathological case, this will be upon memory starvation! To
+        * prevent this, we also queue the hangcheck from the retire
+        * worker.
+        */
+       i915_queue_hangcheck(engine->i915);
+}
+
+static unsigned long wait_timeout(void)
+{
+       return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+}
+
 static void intel_breadcrumbs_fake_irq(unsigned long data)
 {
        struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
@@ -37,10 +71,8 @@ static void intel_breadcrumbs_fake_irq(unsigned long data)
         * every jiffie in order to kick the oldest waiter to do the
         * coherent seqno check.
         */
-       rcu_read_lock();
        if (intel_engine_wakeup(engine))
                mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
-       rcu_read_unlock();
 }
 
 static void irq_enable(struct intel_engine_cs *engine)
@@ -91,17 +123,13 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
        }
 
        if (!b->irq_enabled ||
-           test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
+           test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
                mod_timer(&b->fake_irq, jiffies + 1);
-
-       /* Ensure that even if the GPU hangs, we get woken up.
-        *
-        * However, note that if no one is waiting, we never notice
-        * a gpu hang. Eventually, we will have to wait for a resource
-        * held by the GPU and so trigger a hangcheck. In the most
-        * pathological case, this will be upon memory starvation!
-        */
-       i915_queue_hangcheck(i915);
+       } else {
+               /* Ensure we never sleep indefinitely */
+               GEM_BUG_ON(!time_after(b->timeout, jiffies));
+               mod_timer(&b->hangcheck, b->timeout);
+       }
 }
 
 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
@@ -204,7 +232,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
        }
        rb_link_node(&wait->node, parent, p);
        rb_insert_color(&wait->node, &b->waiters);
-       GEM_BUG_ON(!first && !b->irq_seqno_bh);
+       GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
 
        if (completed) {
                struct rb_node *next = rb_next(completed);
@@ -212,8 +240,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
                GEM_BUG_ON(!next && !first);
                if (next && next != &wait->node) {
                        GEM_BUG_ON(first);
+                       b->timeout = wait_timeout();
                        b->first_wait = to_wait(next);
-                       smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+                       rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
                        /* As there is a delay between reading the current
                         * seqno, processing the completed tasks and selecting
                         * the next waiter, we may have missed the interrupt
@@ -238,8 +267,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
 
        if (first) {
                GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
+               b->timeout = wait_timeout();
                b->first_wait = wait;
-               smp_store_mb(b->irq_seqno_bh, wait->tsk);
+               rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
                /* After assigning ourselves as the new bottom-half, we must
                 * perform a cursory check to prevent a missed interrupt.
                 * Either we miss the interrupt whilst programming the hardware,
@@ -250,7 +280,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
                 */
                __intel_breadcrumbs_enable_irq(b);
        }
-       GEM_BUG_ON(!b->irq_seqno_bh);
+       GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
        GEM_BUG_ON(!b->first_wait);
        GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
 
@@ -270,11 +300,6 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
        return first;
 }
 
-void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
-{
-       mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
-}
-
 static inline bool chain_wakeup(struct rb_node *rb, int priority)
 {
        return rb && to_wait(rb)->tsk->prio <= priority;
@@ -310,7 +335,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
                const int priority = wakeup_priority(b, wait->tsk);
                struct rb_node *next;
 
-               GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
+               GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
 
                /* We are the current bottom-half. Find the next candidate,
                 * the first waiter in the queue on the remaining oldest
@@ -352,14 +377,15 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
                         * the interrupt, or if we have to handle an
                         * exception rather than a seqno completion.
                         */
+                       b->timeout = wait_timeout();
                        b->first_wait = to_wait(next);
-                       smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+                       rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
                        if (b->first_wait->seqno != wait->seqno)
                                __intel_breadcrumbs_enable_irq(b);
-                       wake_up_process(b->irq_seqno_bh);
+                       wake_up_process(b->first_wait->tsk);
                } else {
                        b->first_wait = NULL;
-                       WRITE_ONCE(b->irq_seqno_bh, NULL);
+                       rcu_assign_pointer(b->irq_seqno_bh, NULL);
                        __intel_breadcrumbs_disable_irq(b);
                }
        } else {
@@ -373,7 +399,7 @@ out_unlock:
        GEM_BUG_ON(b->first_wait == wait);
        GEM_BUG_ON(rb_first(&b->waiters) !=
                   (b->first_wait ? &b->first_wait->node : NULL));
-       GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
+       GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
        spin_unlock(&b->lock);
 }
 
@@ -437,6 +463,10 @@ static int intel_breadcrumbs_signaler(void *arg)
                        intel_engine_remove_wait(engine,
                                                 &request->signaling.wait);
 
+                       local_bh_disable();
+                       fence_signal(&request->fence);
+                       local_bh_enable(); /* kick start the tasklets */
+
                        /* Find the next oldest signal. Note that as we have
                         * not been holding the lock, another client may
                         * have installed an even older signal than the one
@@ -452,7 +482,7 @@ static int intel_breadcrumbs_signaler(void *arg)
                        rb_erase(&request->signaling.node, &b->signals);
                        spin_unlock(&b->lock);
 
-                       i915_gem_request_unreference(request);
+                       i915_gem_request_put(request);
                } else {
                        if (kthread_should_stop())
                                break;
@@ -472,18 +502,14 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
        struct rb_node *parent, **p;
        bool first, wakeup;
 
-       if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
-               return;
-
-       spin_lock(&b->lock);
-       if (unlikely(request->signaling.wait.tsk)) {
-               wakeup = false;
-               goto unlock;
-       }
+       /* locked by fence_enable_sw_signaling() */
+       assert_spin_locked(&request->lock);
 
        request->signaling.wait.tsk = b->signaler;
-       request->signaling.wait.seqno = request->seqno;
-       i915_gem_request_reference(request);
+       request->signaling.wait.seqno = request->fence.seqno;
+       i915_gem_request_get(request);
+
+       spin_lock(&b->lock);
 
        /* First add ourselves into the list of waiters, but register our
         * bottom-half as the signaller thread. As per usual, only the oldest
@@ -504,8 +530,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
        p = &b->signals.rb_node;
        while (*p) {
                parent = *p;
-               if (i915_seqno_passed(request->seqno,
-                                     to_signaler(parent)->seqno)) {
+               if (i915_seqno_passed(request->fence.seqno,
+                                     to_signaler(parent)->fence.seqno)) {
                        p = &parent->rb_right;
                        first = false;
                } else {
@@ -517,7 +543,6 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
        if (first)
                smp_store_mb(b->first_signal, request);
 
-unlock:
        spin_unlock(&b->lock);
 
        if (wakeup)
@@ -533,6 +558,9 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
        setup_timer(&b->fake_irq,
                    intel_breadcrumbs_fake_irq,
                    (unsigned long)engine);
+       setup_timer(&b->hangcheck,
+                   intel_breadcrumbs_hangcheck,
+                   (unsigned long)engine);
 
        /* Spawn a thread to provide a common bottom-half for all signals.
         * As this is an asynchronous interface we cannot steal the current
@@ -557,6 +585,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
        if (!IS_ERR_OR_NULL(b->signaler))
                kthread_stop(b->signaler);
 
+       del_timer_sync(&b->hangcheck);
        del_timer_sync(&b->fake_irq);
 }
 
@@ -570,11 +599,9 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915)
         * RCU lock, i.e. as we call wake_up_process() we must be holding the
         * rcu_read_lock().
         */
-       rcu_read_lock();
        for_each_engine(engine, i915)
                if (unlikely(intel_engine_wakeup(engine)))
                        mask |= intel_engine_flag(engine);
-       rcu_read_unlock();
 
        return mask;
 }
index bc0fef3d33356a1fb39f543ec04c31577694ecdb..95a72771eea61a2d49d1ab403a92392810bb6b18 100644 (file)
@@ -100,13 +100,14 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int i, pipe = intel_crtc->pipe;
        uint16_t coeffs[9] = { 0, };
+       struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
 
        if (crtc_state->ctm) {
                struct drm_color_ctm *ctm =
                        (struct drm_color_ctm *)crtc_state->ctm->data;
                uint64_t input[9] = { 0, };
 
-               if (intel_crtc->config->limited_color_range) {
+               if (intel_crtc_state->limited_color_range) {
                        ctm_mult_by_limited(input, ctm->matrix);
                } else {
                        for (i = 0; i < ARRAY_SIZE(input); i++)
@@ -158,7 +159,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
                 * into consideration.
                 */
                for (i = 0; i < 3; i++) {
-                       if (intel_crtc->config->limited_color_range)
+                       if (intel_crtc_state->limited_color_range)
                                coeffs[i * 3 + i] =
                                        I9XX_CSC_COEFF_LIMITED_RANGE;
                        else
@@ -182,7 +183,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        if (INTEL_INFO(dev)->gen > 6) {
                uint16_t postoff = 0;
 
-               if (intel_crtc->config->limited_color_range)
+               if (intel_crtc_state->limited_color_range)
                        postoff = (16 * (1 << 12) / 255) & 0x1fff;
 
                I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -193,7 +194,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        } else {
                uint32_t mode = CSC_MODE_YUV_TO_RGB;
 
-               if (intel_crtc->config->limited_color_range)
+               if (intel_crtc_state->limited_color_range)
                        mode |= CSC_BLACK_SCREEN_OFFSET;
 
                I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -263,7 +264,8 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
 
 /* Loads the legacy palette/gamma unit for the CRTC. */
 static void i9xx_load_luts_internal(struct drm_crtc *crtc,
-                                   struct drm_property_blob *blob)
+                                   struct drm_property_blob *blob,
+                                   struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -272,7 +274,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
        int i;
 
        if (HAS_GMCH_DISPLAY(dev)) {
-               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
+               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
@@ -305,7 +307,8 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
 
 static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
 {
-       i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
+       i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
+                               to_intel_crtc_state(crtc_state));
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
@@ -323,7 +326,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
         * Workaround : Do not read or write the pipe palette/gamma data while
         * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
         */
-       if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+       if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
            (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
                hsw_disable_ips(intel_crtc);
                reenable_ips = true;
@@ -436,7 +439,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
                /* Turn off degamma/gamma on CGM block. */
                I915_WRITE(CGM_PIPE_MODE(pipe),
                           (state->ctm ? CGM_PIPE_MODE_CSC : 0));
-               i9xx_load_luts_internal(crtc, state->gamma_lut);
+               i9xx_load_luts_internal(crtc, state->gamma_lut,
+                                       to_intel_crtc_state(state));
                return;
        }
 
@@ -479,7 +483,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
         * Also program a linear LUT in the legacy block (behind the
         * CGM block).
         */
-       i9xx_load_luts_internal(crtc, NULL);
+       i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
 }
 
 void intel_color_load_luts(struct drm_crtc_state *crtc_state)
index 827b6ef4e9aedfa669c91942d71f84e800841c38..dfbcf16b41dfb956dddfff8f8cc90aef9d1b579c 100644 (file)
@@ -143,13 +143,15 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
 
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
-static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+static void intel_crt_set_dpms(struct intel_encoder *encoder,
+                              struct intel_crtc_state *crtc_state,
+                              int mode)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
        u32 adpa;
 
        if (INTEL_INFO(dev)->gen >= 5)
@@ -193,23 +195,45 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
        I915_WRITE(crt->adpa_reg, adpa);
 }
 
-static void intel_disable_crt(struct intel_encoder *encoder)
+static void intel_disable_crt(struct intel_encoder *encoder,
+                             struct intel_crtc_state *old_crtc_state,
+                             struct drm_connector_state *old_conn_state)
 {
-       intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+       intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
 }
 
-static void pch_disable_crt(struct intel_encoder *encoder)
+static void pch_disable_crt(struct intel_encoder *encoder,
+                           struct intel_crtc_state *old_crtc_state,
+                           struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_crt(struct intel_encoder *encoder)
+static void pch_post_disable_crt(struct intel_encoder *encoder,
+                                struct intel_crtc_state *old_crtc_state,
+                                struct drm_connector_state *old_conn_state)
 {
-       intel_disable_crt(encoder);
+       intel_disable_crt(encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_crt(struct intel_encoder *encoder)
+static void hsw_post_disable_crt(struct intel_encoder *encoder,
+                                struct intel_crtc_state *old_crtc_state,
+                                struct drm_connector_state *old_conn_state)
 {
-       intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+
+       lpt_disable_pch_transcoder(dev_priv);
+       lpt_disable_iclkip(dev_priv);
+
+       intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder,
+                            struct intel_crtc_state *pipe_config,
+                            struct drm_connector_state *conn_state)
+{
+       intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
 }
 
 static enum drm_mode_status
@@ -253,7 +277,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_crt_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config)
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
 
@@ -894,6 +919,7 @@ void intel_crt_init(struct drm_device *dev)
        if (HAS_DDI(dev)) {
                crt->base.get_config = hsw_crt_get_config;
                crt->base.get_hw_state = intel_ddi_get_hw_state;
+               crt->base.post_disable = hsw_post_disable_crt;
        } else {
                crt->base.get_config = intel_crt_get_config;
                crt->base.get_hw_state = intel_crt_get_hw_state;
index 3edb9580928e5b30a35d855a87f6bcd66158b454..1ea0e1f433971ea201debcff43e36d6cd75a35f4 100644 (file)
  * onwards to drive newly added DMC (Display microcontroller) in display
  * engine to save and restore the state of display engine when it enter into
  * low-power state and comes back to normal.
- *
- * Firmware loading status will be one of the below states: FW_UNINITIALIZED,
- * FW_LOADED, FW_FAILED.
- *
- * Once the firmware is written into the registers status will be moved from
- * FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will
- * be moved to FW_FAILED.
  */
 
-#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
 MODULE_FIRMWARE(I915_CSR_KBL);
 #define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
 
-#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
 MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
+#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 26)
 
-#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
 MODULE_FIRMWARE(I915_CSR_BXT);
 #define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
 
index dd1d6fe122976edb5f6dd78428f34bb2b4313adb..15d47c87def67bf8418684160ad7decabce0db2e 100644 (file)
@@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
 static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
        { 0x0000201B, 0x000000A2, 0x0 },
        { 0x00005012, 0x00000088, 0x0 },
-       { 0x80007011, 0x000000CD, 0x0 },
+       { 0x80007011, 0x000000CD, 0x1 },
        { 0x80009010, 0x000000C0, 0x1 },
        { 0x0000201B, 0x0000009D, 0x0 },
        { 0x80005012, 0x000000C0, 0x1 },
@@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
 static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
        { 0x00000018, 0x000000A2, 0x0 },
        { 0x00005012, 0x00000088, 0x0 },
-       { 0x80007011, 0x000000CD, 0x0 },
+       { 0x80007011, 0x000000CD, 0x3 },
        { 0x80009010, 0x000000C0, 0x3 },
        { 0x00000018, 0x0000009D, 0x0 },
        { 0x80005012, 0x000000C0, 0x3 },
@@ -301,45 +301,34 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
        { 154, 0x9A, 1, 128, true },    /* 9:   1200            0   */
 };
 
-static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
-                                   u32 level, enum port port, int type);
-
-static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
-                                struct intel_digital_port **dig_port,
-                                enum port *port)
+enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
 {
-       struct drm_encoder *encoder = &intel_encoder->base;
-
-       switch (intel_encoder->type) {
+       switch (encoder->type) {
        case INTEL_OUTPUT_DP_MST:
-               *dig_port = enc_to_mst(encoder)->primary;
-               *port = (*dig_port)->port;
-               break;
-       default:
-               WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
-               /* fallthrough and treat as unknown */
+               return enc_to_mst(&encoder->base)->primary->port;
        case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_EDP:
        case INTEL_OUTPUT_HDMI:
        case INTEL_OUTPUT_UNKNOWN:
-               *dig_port = enc_to_dig_port(encoder);
-               *port = (*dig_port)->port;
-               break;
+               return enc_to_dig_port(&encoder->base)->port;
        case INTEL_OUTPUT_ANALOG:
-               *dig_port = NULL;
-               *port = PORT_E;
-               break;
+               return PORT_E;
+       default:
+               MISSING_CASE(encoder->type);
+               return PORT_A;
        }
 }
 
-enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+static const struct ddi_buf_trans *
+bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-       struct intel_digital_port *dig_port;
-       enum port port;
-
-       ddi_get_encoder_port(intel_encoder, &dig_port, &port);
-
-       return port;
+       if (dev_priv->vbt.edp.low_vswing) {
+               *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+               return bdw_ddi_translations_edp;
+       } else {
+               *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+               return bdw_ddi_translations_dp;
+       }
 }
 
 static const struct ddi_buf_trans *
@@ -388,39 +377,58 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
        }
 }
 
+static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+{
+       int n_hdmi_entries;
+       int hdmi_level;
+       int hdmi_default_entry;
+
+       hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+       if (IS_BROXTON(dev_priv))
+               return hdmi_level;
+
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+               hdmi_default_entry = 8;
+       } else if (IS_BROADWELL(dev_priv)) {
+               n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+               hdmi_default_entry = 7;
+       } else if (IS_HASWELL(dev_priv)) {
+               n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+               hdmi_default_entry = 6;
+       } else {
+               WARN(1, "ddi translation table missing\n");
+               n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+               hdmi_default_entry = 7;
+       }
+
+       /* Choose a good default if VBT is badly populated */
+       if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
+           hdmi_level >= n_hdmi_entries)
+               hdmi_level = hdmi_default_entry;
+
+       return hdmi_level;
+}
+
 /*
  * Starting with Haswell, DDI port buffers must be programmed with correct
- * values in advance. The buffer values are different for FDI and DP modes,
- * but the HDMI/DVI fields are shared among those. So we program the DDI
- * in either FDI or DP modes only, as HDMI connections will work with both
- * of those
+ * values in advance. This function programs the correct values for
+ * DP/eDP/FDI use cases.
  */
-void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 iboost_bit = 0;
-       int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
-           size;
-       int hdmi_level;
-       enum port port;
+       int i, n_dp_entries, n_edp_entries, size;
+       enum port port = intel_ddi_get_encoder_port(encoder);
        const struct ddi_buf_trans *ddi_translations_fdi;
        const struct ddi_buf_trans *ddi_translations_dp;
        const struct ddi_buf_trans *ddi_translations_edp;
-       const struct ddi_buf_trans *ddi_translations_hdmi;
        const struct ddi_buf_trans *ddi_translations;
 
-       port = intel_ddi_get_encoder_port(encoder);
-       hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
-
-       if (IS_BROXTON(dev_priv)) {
-               if (encoder->type != INTEL_OUTPUT_HDMI)
-                       return;
-
-               /* Vswing programming for HDMI */
-               bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
-                                       INTEL_OUTPUT_HDMI);
+       if (IS_BROXTON(dev_priv))
                return;
-       }
 
        if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                ddi_translations_fdi = NULL;
@@ -428,13 +436,10 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
                                skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
                ddi_translations_edp =
                                skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
-               ddi_translations_hdmi =
-                               skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
-               hdmi_default_entry = 8;
+
                /* If we're boosting the current, set bit 31 of trans1 */
-               if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
-                   dev_priv->vbt.ddi_port_info[port].dp_boost_level)
-                       iboost_bit = 1<<31;
+               if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+                       iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
 
                if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
                            port != PORT_A && port != PORT_E &&
@@ -443,38 +448,20 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
        } else if (IS_BROADWELL(dev_priv)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
-
-               if (dev_priv->vbt.edp.low_vswing) {
-                       ddi_translations_edp = bdw_ddi_translations_edp;
-                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
-               } else {
-                       ddi_translations_edp = bdw_ddi_translations_dp;
-                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
-               }
-
-               ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-
+               ddi_translations_edp = bdw_get_buf_trans_edp(dev_priv, &n_edp_entries);
                n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
-               n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
-               hdmi_default_entry = 7;
        } else if (IS_HASWELL(dev_priv)) {
                ddi_translations_fdi = hsw_ddi_translations_fdi;
                ddi_translations_dp = hsw_ddi_translations_dp;
                ddi_translations_edp = hsw_ddi_translations_dp;
-               ddi_translations_hdmi = hsw_ddi_translations_hdmi;
                n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
-               n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
-               hdmi_default_entry = 6;
        } else {
                WARN(1, "ddi translation table missing\n");
                ddi_translations_edp = bdw_ddi_translations_dp;
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
-               ddi_translations_hdmi = bdw_ddi_translations_hdmi;
                n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
                n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
-               n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
-               hdmi_default_entry = 7;
        }
 
        switch (encoder->type) {
@@ -483,7 +470,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
                size = n_edp_entries;
                break;
        case INTEL_OUTPUT_DP:
-       case INTEL_OUTPUT_HDMI:
                ddi_translations = ddi_translations_dp;
                size = n_dp_entries;
                break;
@@ -501,19 +487,48 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
                I915_WRITE(DDI_BUF_TRANS_HI(port, i),
                           ddi_translations[i].trans2);
        }
+}
 
-       if (encoder->type != INTEL_OUTPUT_HDMI)
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. This function programs the correct values for
+ * HDMI/DVI use cases.
+ */
+static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       u32 iboost_bit = 0;
+       int n_hdmi_entries, hdmi_level;
+       enum port port = intel_ddi_get_encoder_port(encoder);
+       const struct ddi_buf_trans *ddi_translations_hdmi;
+
+       if (IS_BROXTON(dev_priv))
                return;
 
-       /* Choose a good default if VBT is badly populated */
-       if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
-           hdmi_level >= n_hdmi_entries)
-               hdmi_level = hdmi_default_entry;
+       hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
+
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+
+               /* If we're boosting the current, set bit 31 of trans1 */
+               if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+                       iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+       } else if (IS_BROADWELL(dev_priv)) {
+               ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+               n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+       } else if (IS_HASWELL(dev_priv)) {
+               ddi_translations_hdmi = hsw_ddi_translations_hdmi;
+               n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+       } else {
+               WARN(1, "ddi translation table missing\n");
+               ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+               n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+       }
 
        /* Entry 9 is for HDMI: */
-       I915_WRITE(DDI_BUF_TRANS_LO(port, i),
+       I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
                   ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
-       I915_WRITE(DDI_BUF_TRANS_HI(port, i),
+       I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
                   ddi_translations_hdmi[hdmi_level].trans2);
 }
 
@@ -531,6 +546,27 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
        DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
 }
 
+static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+{
+       switch (pll->id) {
+       case DPLL_ID_WRPLL1:
+               return PORT_CLK_SEL_WRPLL1;
+       case DPLL_ID_WRPLL2:
+               return PORT_CLK_SEL_WRPLL2;
+       case DPLL_ID_SPLL:
+               return PORT_CLK_SEL_SPLL;
+       case DPLL_ID_LCPLL_810:
+               return PORT_CLK_SEL_LCPLL_810;
+       case DPLL_ID_LCPLL_1350:
+               return PORT_CLK_SEL_LCPLL_1350;
+       case DPLL_ID_LCPLL_2700:
+               return PORT_CLK_SEL_LCPLL_2700;
+       default:
+               MISSING_CASE(pll->id);
+               return PORT_CLK_SEL_NONE;
+       }
+}
+
 /* Starting with Haswell, different DDI ports can work in FDI mode for
  * connection to the PCH-located connectors. For this, it is necessary to train
  * both the DDI port and PCH receiver for the desired DDI buffer settings.
@@ -546,11 +582,11 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
-       u32 temp, i, rx_ctl_val;
+       u32 temp, i, rx_ctl_val, ddi_pll_sel;
 
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
-               intel_prepare_ddi_buffer(encoder);
+               intel_prepare_dp_ddi_buffers(encoder);
        }
 
        /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
@@ -577,8 +613,9 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
        I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
 
        /* Configure Port Clock Select */
-       I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
-       WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
+       ddi_pll_sel = hsw_pll_to_ddi_pll_sel(intel_crtc->config->shared_dpll);
+       I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+       WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
 
        /* Start the training iterating through available voltages and emphasis,
         * testing each value twice. */
@@ -855,7 +892,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
        int link_clock = 0;
        uint32_t dpll_ctl1, dpll;
 
-       dpll = pipe_config->ddi_pll_sel;
+       dpll = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 
        dpll_ctl1 = I915_READ(DPLL_CTRL1);
 
@@ -903,7 +940,7 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
        int link_clock = 0;
        u32 val, pll;
 
-       val = pipe_config->ddi_pll_sel;
+       val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
        switch (val & PORT_CLK_SEL_MASK) {
        case PORT_CLK_SEL_LCPLL_810:
                link_clock = 81000;
@@ -1111,7 +1148,6 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-       struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_crtc->pipe;
@@ -1177,29 +1213,15 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
                        temp |= TRANS_DDI_MODE_SELECT_HDMI;
                else
                        temp |= TRANS_DDI_MODE_SELECT_DVI;
-
        } else if (type == INTEL_OUTPUT_ANALOG) {
                temp |= TRANS_DDI_MODE_SELECT_FDI;
                temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
-
        } else if (type == INTEL_OUTPUT_DP ||
                   type == INTEL_OUTPUT_EDP) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-               if (intel_dp->is_mst) {
-                       temp |= TRANS_DDI_MODE_SELECT_DP_MST;
-               } else
-                       temp |= TRANS_DDI_MODE_SELECT_DP_SST;
-
+               temp |= TRANS_DDI_MODE_SELECT_DP_SST;
                temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
        } else if (type == INTEL_OUTPUT_DP_MST) {
-               struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
-
-               if (intel_dp->is_mst) {
-                       temp |= TRANS_DDI_MODE_SELECT_DP_MST;
-               } else
-                       temp |= TRANS_DDI_MODE_SELECT_DP_SST;
-
+               temp |= TRANS_DDI_MODE_SELECT_DP_MST;
                temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
        } else {
                WARN(1, "Invalid encoder type %d for pipe %c\n",
@@ -1379,14 +1401,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
                           TRANS_CLK_SEL_DISABLED);
 }
 
-static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
-                              u32 level, enum port port, int type)
+static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+                               enum port port, uint8_t iboost)
+{
+       u32 tmp;
+
+       tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+       tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
+       if (iboost)
+               tmp |= iboost << BALANCE_LEG_SHIFT(port);
+       else
+               tmp |= BALANCE_LEG_DISABLE(port);
+       I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+}
+
+static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
 {
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       enum port port = intel_dig_port->port;
+       int type = encoder->type;
        const struct ddi_buf_trans *ddi_translations;
        uint8_t iboost;
        uint8_t dp_iboost, hdmi_iboost;
        int n_entries;
-       u32 reg;
 
        /* VBT may override standard boost values */
        dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
@@ -1428,16 +1466,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
                return;
        }
 
-       reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
-       reg &= ~BALANCE_LEG_MASK(port);
-       reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
+       _skl_ddi_set_iboost(dev_priv, port, iboost);
 
-       if (iboost)
-               reg |= iboost << BALANCE_LEG_SHIFT(port);
-       else
-               reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
-
-       I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+       if (port == PORT_A && intel_dig_port->max_lanes == 4)
+               _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
 }
 
 static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
@@ -1568,7 +1600,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
        level = translate_signal_level(signal_levels);
 
        if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-               skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+               skl_ddi_set_iboost(encoder, level);
        else if (IS_BROXTON(dev_priv))
                bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
 
@@ -1576,13 +1608,15 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 }
 
 void intel_ddi_clk_select(struct intel_encoder *encoder,
-                         const struct intel_crtc_state *pipe_config)
+                         struct intel_shared_dpll *pll)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_ddi_get_encoder_port(encoder);
 
+       if (WARN_ON(!pll))
+               return;
+
        if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-               uint32_t dpll = pipe_config->ddi_pll_sel;
                uint32_t val;
 
                /* DDI -> PLL mapping  */
@@ -1590,61 +1624,91 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
 
                val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
                        DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
-               val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+               val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
                        DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
 
                I915_WRITE(DPLL_CTRL2, val);
 
        } else if (INTEL_INFO(dev_priv)->gen < 9) {
-               WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
-               I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
+               I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
        }
 }
 
-static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+                                   int link_rate, uint32_t lane_count,
+                                   struct intel_shared_dpll *pll,
+                                   bool link_mst)
 {
-       struct drm_encoder *encoder = &intel_encoder->base;
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
-       enum port port = intel_ddi_get_encoder_port(intel_encoder);
-       int type = intel_encoder->type;
-
-       if (type == INTEL_OUTPUT_HDMI) {
-               struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
-               intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
-       }
-
-       intel_prepare_ddi_buffer(intel_encoder);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = intel_ddi_get_encoder_port(encoder);
 
-       if (type == INTEL_OUTPUT_EDP) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       intel_dp_set_link_params(intel_dp, link_rate, lane_count,
+                                link_mst);
+       if (encoder->type == INTEL_OUTPUT_EDP)
                intel_edp_panel_on(intel_dp);
-       }
 
-       intel_ddi_clk_select(intel_encoder, crtc->config);
+       intel_ddi_clk_select(encoder, pll);
+       intel_prepare_dp_ddi_buffers(encoder);
+       intel_ddi_init_dp_buf_reg(encoder);
+       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       intel_dp_start_link_train(intel_dp);
+       if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+               intel_dp_stop_link_train(intel_dp);
+}
 
-       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+                                     bool has_hdmi_sink,
+                                     struct drm_display_mode *adjusted_mode,
+                                     struct intel_shared_dpll *pll)
+{
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_encoder *drm_encoder = &encoder->base;
+       enum port port = intel_ddi_get_encoder_port(encoder);
+       int level = intel_ddi_hdmi_level(dev_priv, port);
 
-               intel_dp_set_link_params(intel_dp, crtc->config);
+       intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+       intel_ddi_clk_select(encoder, pll);
+       intel_prepare_hdmi_ddi_buffers(encoder);
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+               skl_ddi_set_iboost(encoder, level);
+       else if (IS_BROXTON(dev_priv))
+               bxt_ddi_vswing_sequence(dev_priv, level, port,
+                                       INTEL_OUTPUT_HDMI);
 
-               intel_ddi_init_dp_buf_reg(intel_encoder);
+       intel_hdmi->set_infoframes(drm_encoder,
+                                  has_hdmi_sink,
+                                  adjusted_mode);
+}
 
-               intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
-               intel_dp_start_link_train(intel_dp);
-               if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
-                       intel_dp_stop_link_train(intel_dp);
-       } else if (type == INTEL_OUTPUT_HDMI) {
-               struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
+                                struct intel_crtc_state *pipe_config,
+                                struct drm_connector_state *conn_state)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+       int type = intel_encoder->type;
 
-               intel_hdmi->set_infoframes(encoder,
-                                          crtc->config->has_hdmi_sink,
-                                          &crtc->config->base.adjusted_mode);
+       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
+               intel_ddi_pre_enable_dp(intel_encoder,
+                                       crtc->config->port_clock,
+                                       crtc->config->lane_count,
+                                       crtc->config->shared_dpll,
+                                       intel_crtc_has_type(crtc->config,
+                                                           INTEL_OUTPUT_DP_MST));
+       }
+       if (type == INTEL_OUTPUT_HDMI) {
+               intel_ddi_pre_enable_hdmi(intel_encoder,
+                                         crtc->config->has_hdmi_sink,
+                                         &crtc->config->base.adjusted_mode,
+                                         crtc->config->shared_dpll);
        }
 }
 
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
+                                  struct intel_crtc_state *old_crtc_state,
+                                  struct drm_connector_state *old_conn_state)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
@@ -1654,6 +1718,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
        uint32_t val;
        bool wait = false;
 
+       /* old_crtc_state and old_conn_state are NULL when called from DP_MST */
+
        val = I915_READ(DDI_BUF_CTL(port));
        if (val & DDI_BUF_CTL_ENABLE) {
                val &= ~DDI_BUF_CTL_ENABLE;
@@ -1689,7 +1755,42 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
        }
 }
 
-static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+       uint32_t val;
+
+       /*
+        * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+        * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+        * step 13 is the correct place for it. Step 18 is where it was
+        * originally before the BUN.
+        */
+       val = I915_READ(FDI_RX_CTL(PIPE_A));
+       val &= ~FDI_RX_ENABLE;
+       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+       intel_ddi_post_disable(intel_encoder, old_crtc_state, old_conn_state);
+
+       val = I915_READ(FDI_RX_MISC(PIPE_A));
+       val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+       val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+       I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+
+       val = I915_READ(FDI_RX_CTL(PIPE_A));
+       val &= ~FDI_PCDCLK;
+       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+       val = I915_READ(FDI_RX_CTL(PIPE_A));
+       val &= ~FDI_RX_PLL_ENABLE;
+       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder,
+                            struct intel_crtc_state *pipe_config,
+                            struct drm_connector_state *conn_state)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
@@ -1718,7 +1819,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
 
                intel_edp_backlight_on(intel_dp);
                intel_psr_enable(intel_dp);
-               intel_edp_drrs_enable(intel_dp);
+               intel_edp_drrs_enable(intel_dp, pipe_config);
        }
 
        if (intel_crtc->config->has_audio) {
@@ -1727,7 +1828,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        }
 }
 
-static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+static void intel_disable_ddi(struct intel_encoder *intel_encoder,
+                             struct intel_crtc_state *old_crtc_state,
+                             struct drm_connector_state *old_conn_state)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
@@ -1744,7 +1847,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-               intel_edp_drrs_disable(intel_dp);
+               intel_edp_drrs_disable(intel_dp, old_crtc_state);
                intel_psr_disable(intel_dp);
                intel_edp_backlight_off(intel_dp);
        }
@@ -2033,7 +2136,9 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
        }
 }
 
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
+static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
+                                  struct intel_crtc_state *pipe_config,
+                                  struct drm_connector_state *conn_state)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@@ -2105,7 +2210,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
 
        val = DP_TP_CTL_ENABLE |
              DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
-       if (intel_dp->is_mst)
+       if (intel_dp->link_mst)
                val |= DP_TP_CTL_MODE_MST;
        else {
                val |= DP_TP_CTL_MODE_SST;
@@ -2122,38 +2227,6 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
        udelay(600);
 }
 
-void intel_ddi_fdi_disable(struct drm_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-       uint32_t val;
-
-       /*
-        * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
-        * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
-        * step 13 is the correct place for it. Step 18 is where it was
-        * originally before the BUN.
-        */
-       val = I915_READ(FDI_RX_CTL(PIPE_A));
-       val &= ~FDI_RX_ENABLE;
-       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
-       intel_ddi_post_disable(intel_encoder);
-
-       val = I915_READ(FDI_RX_MISC(PIPE_A));
-       val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
-       val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
-       I915_WRITE(FDI_RX_MISC(PIPE_A), val);
-
-       val = I915_READ(FDI_RX_CTL(PIPE_A));
-       val &= ~FDI_PCDCLK;
-       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
-       val = I915_READ(FDI_RX_CTL(PIPE_A));
-       val &= ~FDI_RX_PLL_ENABLE;
-       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
 {
@@ -2253,7 +2326,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 }
 
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config)
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int type = encoder->type;
@@ -2266,9 +2340,9 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                pipe_config->cpu_transcoder = TRANSCODER_EDP;
 
        if (type == INTEL_OUTPUT_HDMI)
-               ret = intel_hdmi_compute_config(encoder, pipe_config);
+               ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
        else
-               ret = intel_dp_compute_config(encoder, pipe_config);
+               ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
 
        if (IS_BROXTON(dev_priv) && ret)
                pipe_config->lane_lat_optim_mask =
@@ -2319,6 +2393,45 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
        return connector;
 }
 
+struct intel_shared_dpll *
+intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
+{
+       struct intel_connector *connector = intel_dp->attached_connector;
+       struct intel_encoder *encoder = connector->encoder;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_shared_dpll *pll = NULL;
+       struct intel_shared_dpll_config tmp_pll_config;
+       enum intel_dpll_id dpll_id;
+
+       if (IS_BROXTON(dev_priv)) {
+               dpll_id =  (enum intel_dpll_id)dig_port->port;
+               /*
+                * Select the required PLL. This works for platforms where
+                * there is no shared DPLL.
+                */
+               pll = &dev_priv->shared_dplls[dpll_id];
+               if (WARN_ON(pll->active_mask)) {
+
+                       DRM_ERROR("Shared DPLL in use. active_mask:%x\n",
+                                 pll->active_mask);
+                       return NULL;
+               }
+               tmp_pll_config = pll->config;
+               if (!bxt_ddi_dp_set_dpll_hw_state(clock,
+                                                 &pll->config.hw_state)) {
+                       DRM_ERROR("Could not setup DPLL\n");
+                       pll->config = tmp_pll_config;
+                       return NULL;
+               }
+       } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               pll = skl_find_link_pll(dev_priv, clock);
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               pll = hsw_ddi_dp_get_dpll(encoder, clock);
+       }
+       return pll;
+}
+
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
index cba137f9ad3e7a1c8374b1ee8312510442f59db8..73b6858600acf56b30ef75e62c9a63804ed305e3 100644 (file)
@@ -46,71 +46,70 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
 
 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
        u32 fuse, eu_dis;
 
        fuse = I915_READ(CHV_FUSE_GT);
 
-       info->slice_total = 1;
+       sseu->slice_mask = BIT(0);
 
        if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-               info->subslice_per_slice++;
+               sseu->subslice_mask |= BIT(0);
                eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
                                 CHV_FGT_EU_DIS_SS0_R1_MASK);
-               info->eu_total += 8 - hweight32(eu_dis);
+               sseu->eu_total += 8 - hweight32(eu_dis);
        }
 
        if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-               info->subslice_per_slice++;
+               sseu->subslice_mask |= BIT(1);
                eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
                                 CHV_FGT_EU_DIS_SS1_R1_MASK);
-               info->eu_total += 8 - hweight32(eu_dis);
+               sseu->eu_total += 8 - hweight32(eu_dis);
        }
 
-       info->subslice_total = info->subslice_per_slice;
        /*
         * CHV expected to always have a uniform distribution of EU
         * across subslices.
        */
-       info->eu_per_subslice = info->subslice_total ?
-                               info->eu_total / info->subslice_total :
+       sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+                               sseu->eu_total / sseu_subslice_total(sseu) :
                                0;
        /*
         * CHV supports subslice power gating on devices with more than
         * one subslice, and supports EU power gating on devices with
         * more than one EU pair per subslice.
        */
-       info->has_slice_pg = 0;
-       info->has_subslice_pg = (info->subslice_total > 1);
-       info->has_eu_pg = (info->eu_per_subslice > 2);
+       sseu->has_slice_pg = 0;
+       sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
+       sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
 }
 
 static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 {
        struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       struct sseu_dev_info *sseu = &info->sseu;
        int s_max = 3, ss_max = 4, eu_max = 8;
        int s, ss;
-       u32 fuse2, s_enable, ss_disable, eu_disable;
+       u32 fuse2, eu_disable;
        u8 eu_mask = 0xff;
 
        fuse2 = I915_READ(GEN8_FUSE2);
-       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-       ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+       sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 
-       info->slice_total = hweight32(s_enable);
        /*
         * The subslice disable field is global, i.e. it applies
         * to each of the enabled slices.
        */
-       info->subslice_per_slice = ss_max - hweight32(ss_disable);
-       info->subslice_total = info->slice_total * info->subslice_per_slice;
+       sseu->subslice_mask = (1 << ss_max) - 1;
+       sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+                                GEN9_F2_SS_DIS_SHIFT);
 
        /*
         * Iterate through enabled slices and subslices to
         * count the total enabled EU.
        */
        for (s = 0; s < s_max; s++) {
-               if (!(s_enable & BIT(s)))
+               if (!(sseu->slice_mask & BIT(s)))
                        /* skip disabled slice */
                        continue;
 
@@ -118,7 +117,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
                for (ss = 0; ss < ss_max; ss++) {
                        int eu_per_ss;
 
-                       if (ss_disable & BIT(ss))
+                       if (!(sseu->subslice_mask & BIT(ss)))
                                /* skip disabled subslice */
                                continue;
 
@@ -131,9 +130,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
                         * subslices if they are unbalanced.
                         */
                        if (eu_per_ss == 7)
-                               info->subslice_7eu[s] |= BIT(ss);
+                               sseu->subslice_7eu[s] |= BIT(ss);
 
-                       info->eu_total += eu_per_ss;
+                       sseu->eu_total += eu_per_ss;
                }
        }
 
@@ -144,9 +143,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
         * recovery. BXT is expected to be perfectly uniform in EU
         * distribution.
        */
-       info->eu_per_subslice = info->subslice_total ?
-                               DIV_ROUND_UP(info->eu_total,
-                                            info->subslice_total) : 0;
+       sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+                               DIV_ROUND_UP(sseu->eu_total,
+                                            sseu_subslice_total(sseu)) : 0;
        /*
         * SKL supports slice power gating on devices with more than
         * one slice, and supports EU power gating on devices with
@@ -155,15 +154,15 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
         * supports EU power gating on devices with more than one EU
         * pair per subslice.
        */
-       info->has_slice_pg =
+       sseu->has_slice_pg =
                (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
-               info->slice_total > 1;
-       info->has_subslice_pg =
-               IS_BROXTON(dev_priv) && info->subslice_total > 1;
-       info->has_eu_pg = info->eu_per_subslice > 2;
+               hweight8(sseu->slice_mask) > 1;
+       sseu->has_subslice_pg =
+               IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
+       sseu->has_eu_pg = sseu->eu_per_subslice > 2;
 
        if (IS_BROXTON(dev_priv)) {
-#define IS_SS_DISABLED(_ss_disable, ss)    (_ss_disable & BIT(ss))
+#define IS_SS_DISABLED(ss)     (!(sseu->subslice_mask & BIT(ss)))
                /*
                 * There is a HW issue in 2x6 fused down parts that requires
                 * Pooled EU to be enabled as a WA. The pool configuration
@@ -171,19 +170,18 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
                 * doesn't affect if the device has all 3 subslices enabled.
                 */
                /* WaEnablePooledEuFor2x6:bxt */
-               info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
-                                      (info->subslice_per_slice == 2 &&
+               info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
+                                      (hweight8(sseu->subslice_mask) == 2 &&
                                        INTEL_REVID(dev_priv) < BXT_REVID_C0));
 
-               info->min_eu_in_pool = 0;
+               sseu->min_eu_in_pool = 0;
                if (info->has_pooled_eu) {
-                       if (IS_SS_DISABLED(ss_disable, 0) ||
-                           IS_SS_DISABLED(ss_disable, 2))
-                               info->min_eu_in_pool = 3;
-                       else if (IS_SS_DISABLED(ss_disable, 1))
-                               info->min_eu_in_pool = 6;
+                       if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+                               sseu->min_eu_in_pool = 3;
+                       else if (IS_SS_DISABLED(1))
+                               sseu->min_eu_in_pool = 6;
                        else
-                               info->min_eu_in_pool = 9;
+                               sseu->min_eu_in_pool = 9;
                }
 #undef IS_SS_DISABLED
        }
@@ -191,14 +189,20 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 
 static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
        const int s_max = 3, ss_max = 3, eu_max = 8;
        int s, ss;
-       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+       u32 fuse2, eu_disable[s_max];
 
        fuse2 = I915_READ(GEN8_FUSE2);
-       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+       sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       sseu->subslice_mask = BIT(ss_max) - 1;
+       sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+                                GEN8_F2_SS_DIS_SHIFT);
 
        eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
        eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
@@ -208,28 +212,19 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
                        ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
                         (32 - GEN8_EU_DIS1_S2_SHIFT));
 
-       info->slice_total = hweight32(s_enable);
-
-       /*
-        * The subslice disable field is global, i.e. it applies
-        * to each of the enabled slices.
-        */
-       info->subslice_per_slice = ss_max - hweight32(ss_disable);
-       info->subslice_total = info->slice_total * info->subslice_per_slice;
-
        /*
         * Iterate through enabled slices and subslices to
         * count the total enabled EU.
         */
        for (s = 0; s < s_max; s++) {
-               if (!(s_enable & (0x1 << s)))
+               if (!(sseu->slice_mask & BIT(s)))
                        /* skip disabled slice */
                        continue;
 
                for (ss = 0; ss < ss_max; ss++) {
                        u32 n_disabled;
 
-                       if (ss_disable & (0x1 << ss))
+                       if (!(sseu->subslice_mask & BIT(ss)))
                                /* skip disabled subslice */
                                continue;
 
@@ -239,9 +234,9 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
                         * Record which subslices have 7 EUs.
                         */
                        if (eu_max - n_disabled == 7)
-                               info->subslice_7eu[s] |= 1 << ss;
+                               sseu->subslice_7eu[s] |= 1 << ss;
 
-                       info->eu_total += eu_max - n_disabled;
+                       sseu->eu_total += eu_max - n_disabled;
                }
        }
 
@@ -250,16 +245,17 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
         * subslices with the exception that any one EU in any one subslice may
         * be fused off for die recovery.
         */
-       info->eu_per_subslice = info->subslice_total ?
-               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+       sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+                               DIV_ROUND_UP(sseu->eu_total,
+                                            sseu_subslice_total(sseu)) : 0;
 
        /*
         * BDW supports slice power gating on devices with more than
         * one slice.
         */
-       info->has_slice_pg = (info->slice_total > 1);
-       info->has_subslice_pg = 0;
-       info->has_eu_pg = 0;
+       sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+       sseu->has_subslice_pg = 0;
+       sseu->has_eu_pg = 0;
 }
 
 /*
@@ -374,15 +370,19 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                info->has_snoop = false;
 
-       DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
-       DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
-       DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
-       DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
-       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+       DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
+       DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
+       DRM_DEBUG_DRIVER("subslice total: %u\n",
+                        sseu_subslice_total(&info->sseu));
+       DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
+       DRM_DEBUG_DRIVER("subslice per slice: %u\n",
+                        hweight8(info->sseu.subslice_mask));
+       DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
+       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
        DRM_DEBUG_DRIVER("has slice power gating: %s\n",
-                        info->has_slice_pg ? "y" : "n");
+                        info->sseu.has_slice_pg ? "y" : "n");
        DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
-                        info->has_subslice_pg ? "y" : "n");
+                        info->sseu.has_subslice_pg ? "y" : "n");
        DRM_DEBUG_DRIVER("has EU power gating: %s\n",
-                        info->has_eu_pg ? "y" : "n");
+                        info->sseu.has_eu_pg ? "y" : "n");
 }
index c457eed76f1f7a1fca441d4714c2aebf95534d05..8d4c35d55b1bbd4e2848b5bf4d69a0abbdd05cbc 100644 (file)
@@ -34,6 +34,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_gem_dmabuf.h"
@@ -1201,8 +1202,8 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
        if (HAS_PCH_SPLIT(dev)) {
                u32 port_sel;
 
-               pp_reg = PCH_PP_CONTROL;
-               port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
+               pp_reg = PP_CONTROL(0);
+               port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
 
                if (port_sel == PANEL_PORT_SELECT_LVDS &&
                    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
@@ -1210,10 +1211,10 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
                /* XXX: else fix for eDP */
        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
                /* presumably write lock depends on pipe, not port select */
-               pp_reg = VLV_PIPE_PP_CONTROL(pipe);
+               pp_reg = PP_CONTROL(pipe);
                panel_pipe = pipe;
        } else {
-               pp_reg = PP_CONTROL;
+               pp_reg = PP_CONTROL(0);
                if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
                        panel_pipe = PIPE_B;
        }
@@ -1906,7 +1907,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
        }
 }
 
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 {
        u32 val;
 
@@ -1958,12 +1959,12 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
         * need the check.
         */
-       if (HAS_GMCH_DISPLAY(dev_priv))
+       if (HAS_GMCH_DISPLAY(dev_priv)) {
                if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
-       else {
+       else {
                if (crtc->config->has_pch_encoder) {
                        /* if driving the PCH, we need FDI enabled */
                        assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
@@ -2146,33 +2147,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
        }
 }
 
-static void
-intel_fill_fb_info(struct drm_i915_private *dev_priv,
-                  struct drm_framebuffer *fb)
-{
-       struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
-       unsigned int tile_size, tile_width, tile_height, cpp;
-
-       tile_size = intel_tile_size(dev_priv);
-
-       cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-       intel_tile_dims(dev_priv, &tile_width, &tile_height,
-                       fb->modifier[0], cpp);
-
-       info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
-       info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
-
-       if (info->pixel_format == DRM_FORMAT_NV12) {
-               cpp = drm_format_plane_cpp(fb->pixel_format, 1);
-               intel_tile_dims(dev_priv, &tile_width, &tile_height,
-                               fb->modifier[1], cpp);
-
-               info->uv_offset = fb->offsets[1];
-               info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
-               info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
-       }
-}
-
 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
 {
        if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2205,16 +2179,15 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv
        }
 }
 
-int
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
-                          unsigned int rotation)
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 {
        struct drm_device *dev = fb->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct i915_ggtt_view view;
+       struct i915_vma *vma;
        u32 alignment;
-       int ret;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
@@ -2239,75 +2212,112 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
         */
        intel_runtime_pm_get(dev_priv);
 
-       ret = i915_gem_object_pin_to_display_plane(obj, alignment,
-                                                  &view);
-       if (ret)
-               goto err_pm;
-
-       /* Install a fence for tiled scan-out. Pre-i965 always needs a
-        * fence, whereas 965+ only requires a fence if using
-        * framebuffer compression.  For simplicity, we always install
-        * a fence as the cost is not that onerous.
-        */
-       if (view.type == I915_GGTT_VIEW_NORMAL) {
-               ret = i915_gem_object_get_fence(obj);
-               if (ret == -EDEADLK) {
-                       /*
-                        * -EDEADLK means there are no free fences
-                        * no pending flips.
-                        *
-                        * This is propagated to atomic, but it uses
-                        * -EDEADLK to force a locking recovery, so
-                        * change the returned error to -EBUSY.
-                        */
-                       ret = -EBUSY;
-                       goto err_unpin;
-               } else if (ret)
-                       goto err_unpin;
+       vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
+       if (IS_ERR(vma))
+               goto err;
 
-               i915_gem_object_pin_fence(obj);
+       if (i915_vma_is_map_and_fenceable(vma)) {
+               /* Install a fence for tiled scan-out. Pre-i965 always needs a
+                * fence, whereas 965+ only requires a fence if using
+                * framebuffer compression.  For simplicity, we always, when
+                * possible, install a fence as the cost is not that onerous.
+                *
+                * If we fail to fence the tiled scanout, then either the
+                * modeset will reject the change (which is highly unlikely as
+                * the affected systems, all but one, do not have unmappable
+                * space) or we will not be able to enable full powersaving
+                * techniques (also likely not to apply due to various limits
+                * FBC and the like impose on the size of the buffer, which
+                * presumably we violated anyway with this unmappable buffer).
+                * Anyway, it is presumably better to stumble onwards with
+                * something and try to run the system in a "less than optimal"
+                * mode that matches the user configuration.
+                */
+               if (i915_vma_get_fence(vma) == 0)
+                       i915_vma_pin_fence(vma);
        }
 
+err:
        intel_runtime_pm_put(dev_priv);
-       return 0;
-
-err_unpin:
-       i915_gem_object_unpin_from_display_plane(obj, &view);
-err_pm:
-       intel_runtime_pm_put(dev_priv);
-       return ret;
+       return vma;
 }
 
 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 {
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct i915_ggtt_view view;
+       struct i915_vma *vma;
 
        WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
        intel_fill_fb_ggtt_view(&view, fb, rotation);
+       vma = i915_gem_object_to_ggtt(obj, &view);
 
-       if (view.type == I915_GGTT_VIEW_NORMAL)
-               i915_gem_object_unpin_fence(obj);
+       i915_vma_unpin_fence(vma);
+       i915_gem_object_unpin_from_display_plane(vma);
+}
 
-       i915_gem_object_unpin_from_display_plane(obj, &view);
+static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
+                         unsigned int rotation)
+{
+       if (intel_rotation_90_or_270(rotation))
+               return to_intel_framebuffer(fb)->rotated[plane].pitch;
+       else
+               return fb->pitches[plane];
+}
+
+/*
+ * Convert the x/y offsets into a linear offset.
+ * Only valid with 0/180 degree rotation, which is fine since linear
+ * offset is only used with linear buffers on pre-hsw and tiled buffers
+ * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
+ */
+u32 intel_fb_xy_to_linear(int x, int y,
+                         const struct intel_plane_state *state,
+                         int plane)
+{
+       const struct drm_framebuffer *fb = state->base.fb;
+       unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+       unsigned int pitch = fb->pitches[plane];
+
+       return y * pitch + x * cpp;
+}
+
+/*
+ * Add the x/y offsets derived from fb->offsets[] to the user
+ * specified plane src x/y offsets. The resulting x/y offsets
+ * specify the start of scanout from the beginning of the gtt mapping.
+ */
+void intel_add_fb_offsets(int *x, int *y,
+                         const struct intel_plane_state *state,
+                         int plane)
+
+{
+       const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
+       unsigned int rotation = state->base.rotation;
+
+       if (intel_rotation_90_or_270(rotation)) {
+               *x += intel_fb->rotated[plane].x;
+               *y += intel_fb->rotated[plane].y;
+       } else {
+               *x += intel_fb->normal[plane].x;
+               *y += intel_fb->normal[plane].y;
+       }
 }
 
 /*
- * Adjust the tile offset by moving the difference into
- * the x/y offsets.
- *
  * Input tile dimensions and pitch must already be
  * rotated to match x and y, and in pixel units.
  */
-static u32 intel_adjust_tile_offset(int *x, int *y,
-                                   unsigned int tile_width,
-                                   unsigned int tile_height,
-                                   unsigned int tile_size,
-                                   unsigned int pitch_tiles,
-                                   u32 old_offset,
-                                   u32 new_offset)
-{
+static u32 _intel_adjust_tile_offset(int *x, int *y,
+                                    unsigned int tile_width,
+                                    unsigned int tile_height,
+                                    unsigned int tile_size,
+                                    unsigned int pitch_tiles,
+                                    u32 old_offset,
+                                    u32 new_offset)
+{
+       unsigned int pitch_pixels = pitch_tiles * tile_width;
        unsigned int tiles;
 
        WARN_ON(old_offset & (tile_size - 1));
@@ -2319,6 +2329,54 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
        *y += tiles / pitch_tiles * tile_height;
        *x += tiles % pitch_tiles * tile_width;
 
+       /* minimize x in case it got needlessly big */
+       *y += *x / pitch_pixels * tile_height;
+       *x %= pitch_pixels;
+
+       return new_offset;
+}
+
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ */
+static u32 intel_adjust_tile_offset(int *x, int *y,
+                                   const struct intel_plane_state *state, int plane,
+                                   u32 old_offset, u32 new_offset)
+{
+       const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
+       const struct drm_framebuffer *fb = state->base.fb;
+       unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+       unsigned int rotation = state->base.rotation;
+       unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
+
+       WARN_ON(new_offset > old_offset);
+
+       if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
+               unsigned int tile_size, tile_width, tile_height;
+               unsigned int pitch_tiles;
+
+               tile_size = intel_tile_size(dev_priv);
+               intel_tile_dims(dev_priv, &tile_width, &tile_height,
+                               fb->modifier[plane], cpp);
+
+               if (intel_rotation_90_or_270(rotation)) {
+                       pitch_tiles = pitch / tile_height;
+                       swap(tile_width, tile_height);
+               } else {
+                       pitch_tiles = pitch / (tile_width * cpp);
+               }
+
+               _intel_adjust_tile_offset(x, y, tile_width, tile_height,
+                                         tile_size, pitch_tiles,
+                                         old_offset, new_offset);
+       } else {
+               old_offset += *y * pitch + *x * cpp;
+
+               *y = (old_offset - new_offset) / pitch;
+               *x = ((old_offset - new_offset) - *y * pitch) / cpp;
+       }
+
        return new_offset;
 }
 
@@ -2329,18 +2387,24 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
  * In the 90/270 rotated case, x and y are assumed
  * to be already rotated to match the rotated GTT view, and
  * pitch is the tile_height aligned framebuffer height.
+ *
+ * This function is used when computing the derived information
+ * under intel_framebuffer, so using any of that information
+ * here is not allowed. Anything under drm_framebuffer can be
+ * used. This is why the user has to pass in the pitch since it
+ * is specified in the rotated orientation.
  */
-u32 intel_compute_tile_offset(int *x, int *y,
-                             const struct drm_framebuffer *fb, int plane,
-                             unsigned int pitch,
-                             unsigned int rotation)
+static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
+                                     int *x, int *y,
+                                     const struct drm_framebuffer *fb, int plane,
+                                     unsigned int pitch,
+                                     unsigned int rotation,
+                                     u32 alignment)
 {
-       const struct drm_i915_private *dev_priv = to_i915(fb->dev);
        uint64_t fb_modifier = fb->modifier[plane];
        unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
-       u32 offset, offset_aligned, alignment;
+       u32 offset, offset_aligned;
 
-       alignment = intel_surf_alignment(dev_priv, fb_modifier);
        if (alignment)
                alignment--;
 
@@ -2368,9 +2432,9 @@ u32 intel_compute_tile_offset(int *x, int *y,
                offset = (tile_rows * pitch_tiles + tiles) * tile_size;
                offset_aligned = offset & ~alignment;
 
-               intel_adjust_tile_offset(x, y, tile_width, tile_height,
-                                        tile_size, pitch_tiles,
-                                        offset, offset_aligned);
+               _intel_adjust_tile_offset(x, y, tile_width, tile_height,
+                                         tile_size, pitch_tiles,
+                                         offset, offset_aligned);
        } else {
                offset = *y * pitch + *x * cpp;
                offset_aligned = offset & ~alignment;
@@ -2382,6 +2446,177 @@ u32 intel_compute_tile_offset(int *x, int *y,
        return offset_aligned;
 }
 
+u32 intel_compute_tile_offset(int *x, int *y,
+                             const struct intel_plane_state *state,
+                             int plane)
+{
+       const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
+       const struct drm_framebuffer *fb = state->base.fb;
+       unsigned int rotation = state->base.rotation;
+       int pitch = intel_fb_pitch(fb, plane, rotation);
+       u32 alignment;
+
+       /* AUX_DIST needs only 4K alignment */
+       if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
+               alignment = 4096;
+       else
+               alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
+
+       return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
+                                         rotation, alignment);
+}
+
+/* Convert the fb->offset[] linear offset into x/y offsets */
+static void intel_fb_offset_to_xy(int *x, int *y,
+                                 const struct drm_framebuffer *fb, int plane)
+{
+       unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+       unsigned int pitch = fb->pitches[plane];
+       u32 linear_offset = fb->offsets[plane];
+
+       *y = linear_offset / pitch;
+       *x = linear_offset % pitch / cpp;
+}
+
+static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
+{
+       switch (fb_modifier) {
+       case I915_FORMAT_MOD_X_TILED:
+               return I915_TILING_X;
+       case I915_FORMAT_MOD_Y_TILED:
+               return I915_TILING_Y;
+       default:
+               return I915_TILING_NONE;
+       }
+}
+
+static int
+intel_fill_fb_info(struct drm_i915_private *dev_priv,
+                  struct drm_framebuffer *fb)
+{
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+       u32 gtt_offset_rotated = 0;
+       unsigned int max_size = 0;
+       uint32_t format = fb->pixel_format;
+       int i, num_planes = drm_format_num_planes(format);
+       unsigned int tile_size = intel_tile_size(dev_priv);
+
+       for (i = 0; i < num_planes; i++) {
+               unsigned int width, height;
+               unsigned int cpp, size;
+               u32 offset;
+               int x, y;
+
+               cpp = drm_format_plane_cpp(format, i);
+               width = drm_format_plane_width(fb->width, format, i);
+               height = drm_format_plane_height(fb->height, format, i);
+
+               intel_fb_offset_to_xy(&x, &y, fb, i);
+
+               /*
+                * The fence (if used) is aligned to the start of the object
+                * so having the framebuffer wrap around across the edge of the
+                * fenced region doesn't really work. We have no API to configure
+                * the fence start offset within the object (nor could we probably
+                * on gen2/3). So it's just easier if we just require that the
+                * fb layout agrees with the fence layout. We already check that the
+                * fb stride matches the fence stride elsewhere.
+                */
+               if (i915_gem_object_is_tiled(intel_fb->obj) &&
+                   (x + width) * cpp > fb->pitches[i]) {
+                       DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
+                                 i, fb->offsets[i]);
+                       return -EINVAL;
+               }
+
+               /*
+                * First pixel of the framebuffer from
+                * the start of the normal gtt mapping.
+                */
+               intel_fb->normal[i].x = x;
+               intel_fb->normal[i].y = y;
+
+               offset = _intel_compute_tile_offset(dev_priv, &x, &y,
+                                                   fb, 0, fb->pitches[i],
+                                                   DRM_ROTATE_0, tile_size);
+               offset /= tile_size;
+
+               if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
+                       unsigned int tile_width, tile_height;
+                       unsigned int pitch_tiles;
+                       struct drm_rect r;
+
+                       intel_tile_dims(dev_priv, &tile_width, &tile_height,
+                                       fb->modifier[i], cpp);
+
+                       rot_info->plane[i].offset = offset;
+                       rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
+                       rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
+                       rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
+
+                       intel_fb->rotated[i].pitch =
+                               rot_info->plane[i].height * tile_height;
+
+                       /* how many tiles does this plane need */
+                       size = rot_info->plane[i].stride * rot_info->plane[i].height;
+                       /*
+                        * If the plane isn't horizontally tile aligned,
+                        * we need one more tile.
+                        */
+                       if (x != 0)
+                               size++;
+
+                       /* rotate the x/y offsets to match the GTT view */
+                       r.x1 = x;
+                       r.y1 = y;
+                       r.x2 = x + width;
+                       r.y2 = y + height;
+                       drm_rect_rotate(&r,
+                                       rot_info->plane[i].width * tile_width,
+                                       rot_info->plane[i].height * tile_height,
+                                       DRM_ROTATE_270);
+                       x = r.x1;
+                       y = r.y1;
+
+                       /* rotate the tile dimensions to match the GTT view */
+                       pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
+                       swap(tile_width, tile_height);
+
+                       /*
+                        * We only keep the x/y offsets, so push all of the
+                        * gtt offset into the x/y offsets.
+                        */
+                       _intel_adjust_tile_offset(&x, &y, tile_size,
+                                                 tile_width, tile_height, pitch_tiles,
+                                                 gtt_offset_rotated * tile_size, 0);
+
+                       gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
+
+                       /*
+                        * First pixel of the framebuffer from
+                        * the start of the rotated gtt mapping.
+                        */
+                       intel_fb->rotated[i].x = x;
+                       intel_fb->rotated[i].y = y;
+               } else {
+                       size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
+                                           x * cpp, tile_size);
+               }
+
+               /* how many tiles in total needed in the bo */
+               max_size = max(max_size, offset + size);
+       }
+
+       if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
+               DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
+                         max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int i9xx_format_to_fourcc(int format)
 {
        switch (format) {
@@ -2465,9 +2700,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
                return false;
        }
 
-       obj->tiling_mode = plane_config->tiling;
-       if (obj->tiling_mode == I915_TILING_X)
-               obj->stride = fb->pitches[0];
+       if (plane_config->tiling == I915_TILING_X)
+               obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
 
        mode_cmd.pixel_format = fb->pixel_format;
        mode_cmd.width = fb->width;
@@ -2488,7 +2722,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
        return true;
 
 out_unref_obj:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
        return false;
 }
@@ -2552,7 +2786,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
                        continue;
 
                obj = intel_fb_obj(fb);
-               if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+               if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
                        drm_framebuffer_reference(fb);
                        goto valid_fb;
                }
@@ -2565,7 +2799,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * simplest solution is to just disable the primary plane now and
         * pretend the BIOS never had it enabled.
         */
-       to_intel_plane_state(plane_state)->visible = false;
+       to_intel_plane_state(plane_state)->base.visible = false;
        crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
        intel_pre_disable_primary_noatomic(&intel_crtc->base);
        intel_plane->disable_plane(primary, &intel_crtc->base);
@@ -2583,24 +2817,188 @@ valid_fb:
        plane_state->crtc_w = fb->width;
        plane_state->crtc_h = fb->height;
 
-       intel_state->src.x1 = plane_state->src_x;
-       intel_state->src.y1 = plane_state->src_y;
-       intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
-       intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
-       intel_state->dst.x1 = plane_state->crtc_x;
-       intel_state->dst.y1 = plane_state->crtc_y;
-       intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
-       intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+       intel_state->base.src.x1 = plane_state->src_x;
+       intel_state->base.src.y1 = plane_state->src_y;
+       intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w;
+       intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h;
+       intel_state->base.dst.x1 = plane_state->crtc_x;
+       intel_state->base.dst.y1 = plane_state->crtc_y;
+       intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
+       intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
 
        obj = intel_fb_obj(fb);
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (i915_gem_object_is_tiled(obj))
                dev_priv->preserve_bios_swizzle = true;
 
        drm_framebuffer_reference(fb);
        primary->fb = primary->state->fb = fb;
        primary->crtc = primary->state->crtc = &intel_crtc->base;
        intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
-       obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
+       atomic_or(to_intel_plane(primary)->frontbuffer_bit,
+                 &obj->frontbuffer_bits);
+}
+
+static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
+                              unsigned int rotation)
+{
+       int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+
+       switch (fb->modifier[plane]) {
+       case DRM_FORMAT_MOD_NONE:
+       case I915_FORMAT_MOD_X_TILED:
+               switch (cpp) {
+               case 8:
+                       return 4096;
+               case 4:
+               case 2:
+               case 1:
+                       return 8192;
+               default:
+                       MISSING_CASE(cpp);
+                       break;
+               }
+               break;
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               switch (cpp) {
+               case 8:
+                       return 2048;
+               case 4:
+                       return 4096;
+               case 2:
+               case 1:
+                       return 8192;
+               default:
+                       MISSING_CASE(cpp);
+                       break;
+               }
+               break;
+       default:
+               MISSING_CASE(fb->modifier[plane]);
+       }
+
+       return 2048;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+       const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       int x = plane_state->base.src.x1 >> 16;
+       int y = plane_state->base.src.y1 >> 16;
+       int w = drm_rect_width(&plane_state->base.src) >> 16;
+       int h = drm_rect_height(&plane_state->base.src) >> 16;
+       int max_width = skl_max_plane_width(fb, 0, rotation);
+       int max_height = 4096;
+       u32 alignment, offset, aux_offset = plane_state->aux.offset;
+
+       if (w > max_width || h > max_height) {
+               DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
+                             w, h, max_width, max_height);
+               return -EINVAL;
+       }
+
+       intel_add_fb_offsets(&x, &y, plane_state, 0);
+       offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
+
+       alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+
+       /*
+        * AUX surface offset is specified as the distance from the
+        * main surface offset, and it must be non-negative. Make
+        * sure that is what we will get.
+        */
+       if (offset > aux_offset)
+               offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
+                                                 offset, aux_offset & ~(alignment - 1));
+
+       /*
+        * When using an X-tiled surface, the plane blows up
+        * if the x offset + width exceed the stride.
+        *
+        * TODO: linear and Y-tiled seem fine, Yf untested,
+        */
+       if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
+               int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
+               while ((x + w) * cpp > fb->pitches[0]) {
+                       if (offset == 0) {
+                               DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
+                               return -EINVAL;
+                       }
+
+                       offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
+                                                         offset, offset - alignment);
+               }
+       }
+
+       plane_state->main.offset = offset;
+       plane_state->main.x = x;
+       plane_state->main.y = y;
+
+       return 0;
+}
+
+static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       int max_width = skl_max_plane_width(fb, 1, rotation);
+       int max_height = 4096;
+       int x = plane_state->base.src.x1 >> 17;
+       int y = plane_state->base.src.y1 >> 17;
+       int w = drm_rect_width(&plane_state->base.src) >> 17;
+       int h = drm_rect_height(&plane_state->base.src) >> 17;
+       u32 offset;
+
+       intel_add_fb_offsets(&x, &y, plane_state, 1);
+       offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
+
+       /* FIXME not quite sure how/if these apply to the chroma plane */
+       if (w > max_width || h > max_height) {
+               DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
+                             w, h, max_width, max_height);
+               return -EINVAL;
+       }
+
+       plane_state->aux.offset = offset;
+       plane_state->aux.x = x;
+       plane_state->aux.y = y;
+
+       return 0;
+}
+
+int skl_check_plane_surface(struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       int ret;
+
+       /* Rotate src coordinates to match rotated GTT view */
+       if (intel_rotation_90_or_270(rotation))
+               drm_rect_rotate(&plane_state->base.src,
+                               fb->width, fb->height, DRM_ROTATE_270);
+
+       /*
+        * Handle the AUX surface first since
+        * the main surface setup depends on it.
+        */
+       if (fb->pixel_format == DRM_FORMAT_NV12) {
+               ret = skl_check_nv12_aux_surface(plane_state);
+               if (ret)
+                       return ret;
+       } else {
+               plane_state->aux.offset = ~0xfff;
+               plane_state->aux.x = 0;
+               plane_state->aux.y = 0;
+       }
+
+       ret = skl_check_main_surface(plane_state);
+       if (ret)
+               return ret;
+
+       return 0;
 }
 
 static void i9xx_update_primary_plane(struct drm_plane *primary,
@@ -2617,9 +3015,8 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
        u32 dspcntr;
        i915_reg_t reg = DSPCNTR(plane);
        unsigned int rotation = plane_state->base.rotation;
-       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-       int x = plane_state->src.x1 >> 16;
-       int y = plane_state->src.y1 >> 16;
+       int x = plane_state->base.src.x1 >> 16;
+       int y = plane_state->base.src.y1 >> 16;
 
        dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -2670,37 +3067,31 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
                BUG();
        }
 
-       if (INTEL_INFO(dev)->gen >= 4 &&
-           obj->tiling_mode != I915_TILING_NONE)
+       if (INTEL_GEN(dev_priv) >= 4 &&
+           fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                dspcntr |= DISPPLANE_TILED;
 
        if (IS_G4X(dev))
                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-       linear_offset = y * fb->pitches[0] + x * cpp;
+       intel_add_fb_offsets(&x, &y, plane_state, 0);
 
-       if (INTEL_INFO(dev)->gen >= 4) {
+       if (INTEL_INFO(dev)->gen >= 4)
                intel_crtc->dspaddr_offset =
-                       intel_compute_tile_offset(&x, &y, fb, 0,
-                                                 fb->pitches[0], rotation);
-               linear_offset -= intel_crtc->dspaddr_offset;
-       } else {
-               intel_crtc->dspaddr_offset = linear_offset;
-       }
+                       intel_compute_tile_offset(&x, &y, plane_state, 0);
 
-       if (rotation == BIT(DRM_ROTATE_180)) {
+       if (rotation == DRM_ROTATE_180) {
                dspcntr |= DISPPLANE_ROTATE_180;
 
                x += (crtc_state->pipe_src_w - 1);
                y += (crtc_state->pipe_src_h - 1);
-
-               /* Finding the last pixel of the last line of the display
-               data and adding to linear_offset*/
-               linear_offset +=
-                       (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
-                       (crtc_state->pipe_src_w - 1) * cpp;
        }
 
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+       if (INTEL_INFO(dev)->gen < 4)
+               intel_crtc->dspaddr_offset = linear_offset;
+
        intel_crtc->adjusted_x = x;
        intel_crtc->adjusted_y = y;
 
@@ -2709,11 +3100,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_INFO(dev)->gen >= 4) {
                I915_WRITE(DSPSURF(plane),
-                          i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else
-               I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
+               I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
        POSTING_READ(reg);
 }
 
@@ -2741,15 +3133,13 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        int plane = intel_crtc->plane;
        u32 linear_offset;
        u32 dspcntr;
        i915_reg_t reg = DSPCNTR(plane);
        unsigned int rotation = plane_state->base.rotation;
-       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-       int x = plane_state->src.x1 >> 16;
-       int y = plane_state->src.y1 >> 16;
+       int x = plane_state->base.src.x1 >> 16;
+       int y = plane_state->base.src.y1 >> 16;
 
        dspcntr = DISPPLANE_GAMMA_ENABLE;
        dspcntr |= DISPLAY_PLANE_ENABLE;
@@ -2780,32 +3170,28 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
                BUG();
        }
 
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                dspcntr |= DISPPLANE_TILED;
 
        if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-       linear_offset = y * fb->pitches[0] + x * cpp;
+       intel_add_fb_offsets(&x, &y, plane_state, 0);
+
        intel_crtc->dspaddr_offset =
-               intel_compute_tile_offset(&x, &y, fb, 0,
-                                         fb->pitches[0], rotation);
-       linear_offset -= intel_crtc->dspaddr_offset;
-       if (rotation == BIT(DRM_ROTATE_180)) {
+               intel_compute_tile_offset(&x, &y, plane_state, 0);
+
+       if (rotation == DRM_ROTATE_180) {
                dspcntr |= DISPPLANE_ROTATE_180;
 
                if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
                        x += (crtc_state->pipe_src_w - 1);
                        y += (crtc_state->pipe_src_h - 1);
-
-                       /* Finding the last pixel of the last line of the display
-                       data and adding to linear_offset*/
-                       linear_offset +=
-                               (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
-                               (crtc_state->pipe_src_w - 1) * cpp;
                }
        }
 
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
        intel_crtc->adjusted_x = x;
        intel_crtc->adjusted_y = y;
 
@@ -2813,7 +3199,8 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane),
-                  i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+                  intel_fb_gtt_offset(fb, rotation) +
+                  intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
        } else {
@@ -2835,32 +3222,21 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
        }
 }
 
-u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
-                          struct drm_i915_gem_object *obj,
-                          unsigned int plane)
+u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
+                       unsigned int rotation)
 {
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct i915_ggtt_view view;
        struct i915_vma *vma;
-       u64 offset;
 
-       intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
-                               intel_plane->base.state->rotation);
+       intel_fill_fb_ggtt_view(&view, fb, rotation);
 
-       vma = i915_gem_obj_to_ggtt_view(obj, &view);
+       vma = i915_gem_object_to_ggtt(obj, &view);
        if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-               view.type))
+                view.type))
                return -1;
 
-       offset = vma->node.start;
-
-       if (plane == 1) {
-               offset += vma->ggtt_view.params.rotated.uv_start_page *
-                         PAGE_SIZE;
-       }
-
-       WARN_ON(upper_32_bits(offset));
-
-       return lower_32_bits(offset);
+       return i915_ggtt_offset(vma);
 }
 
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -2890,6 +3266,28 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
        }
 }
 
+u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
+                    unsigned int rotation)
+{
+       const struct drm_i915_private *dev_priv = to_i915(fb->dev);
+       u32 stride = intel_fb_pitch(fb, plane, rotation);
+
+       /*
+        * The stride is either expressed as a multiple of 64 bytes chunks for
+        * linear buffers or in number of tiles for tiled buffers.
+        */
+       if (intel_rotation_90_or_270(rotation)) {
+               int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+
+               stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
+       } else {
+               stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+                                                   fb->pixel_format);
+       }
+
+       return stride;
+}
+
 u32 skl_plane_ctl_format(uint32_t pixel_format)
 {
        switch (pixel_format) {
@@ -2952,17 +3350,17 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
 u32 skl_plane_ctl_rotation(unsigned int rotation)
 {
        switch (rotation) {
-       case BIT(DRM_ROTATE_0):
+       case DRM_ROTATE_0:
                break;
        /*
         * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
         * while i915 HW rotation is clockwise, thats why this swapping.
         */
-       case BIT(DRM_ROTATE_90):
+       case DRM_ROTATE_90:
                return PLANE_CTL_ROTATE_270;
-       case BIT(DRM_ROTATE_180):
+       case DRM_ROTATE_180:
                return PLANE_CTL_ROTATE_180;
-       case BIT(DRM_ROTATE_270):
+       case DRM_ROTATE_270:
                return PLANE_CTL_ROTATE_90;
        default:
                MISSING_CASE(rotation);
@@ -2979,22 +3377,21 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
        int pipe = intel_crtc->pipe;
-       u32 plane_ctl, stride_div, stride;
-       u32 tile_height, plane_offset, plane_size;
+       u32 plane_ctl;
        unsigned int rotation = plane_state->base.rotation;
-       int x_offset, y_offset;
-       u32 surf_addr;
+       u32 stride = skl_plane_stride(fb, 0, rotation);
+       u32 surf_addr = plane_state->main.offset;
        int scaler_id = plane_state->scaler_id;
-       int src_x = plane_state->src.x1 >> 16;
-       int src_y = plane_state->src.y1 >> 16;
-       int src_w = drm_rect_width(&plane_state->src) >> 16;
-       int src_h = drm_rect_height(&plane_state->src) >> 16;
-       int dst_x = plane_state->dst.x1;
-       int dst_y = plane_state->dst.y1;
-       int dst_w = drm_rect_width(&plane_state->dst);
-       int dst_h = drm_rect_height(&plane_state->dst);
+       int src_x = plane_state->main.x;
+       int src_y = plane_state->main.y;
+       int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       int dst_x = plane_state->base.dst.x1;
+       int dst_y = plane_state->base.dst.y1;
+       int dst_w = drm_rect_width(&plane_state->base.dst);
+       int dst_h = drm_rect_height(&plane_state->base.dst);
 
        plane_ctl = PLANE_CTL_ENABLE |
                    PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -3005,36 +3402,22 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
        plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-       stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
-                                              fb->pixel_format);
-       surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       dst_w--;
+       dst_h--;
 
-       WARN_ON(drm_rect_width(&plane_state->src) == 0);
+       intel_crtc->adjusted_x = src_x;
+       intel_crtc->adjusted_y = src_y;
 
-       if (intel_rotation_90_or_270(rotation)) {
-               int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-
-               /* stride = Surface height in tiles */
-               tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
-               stride = DIV_ROUND_UP(fb->height, tile_height);
-               x_offset = stride * tile_height - src_y - src_h;
-               y_offset = src_x;
-               plane_size = (src_w - 1) << 16 | (src_h - 1);
-       } else {
-               stride = fb->pitches[0] / stride_div;
-               x_offset = src_x;
-               y_offset = src_y;
-               plane_size = (src_h - 1) << 16 | (src_w - 1);
-       }
-       plane_offset = y_offset << 16 | x_offset;
-
-       intel_crtc->adjusted_x = x_offset;
-       intel_crtc->adjusted_y = y_offset;
+       if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
+               skl_write_plane_wm(intel_crtc, wm, 0);
 
        I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
-       I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
-       I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
+       I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
        I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+       I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w);
 
        if (scaler_id >= 0) {
                uint32_t ps_ctrl = 0;
@@ -3051,7 +3434,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
                I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
        }
 
-       I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
+       I915_WRITE(PLANE_SURF(pipe, 0),
+                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
 
        POSTING_READ(PLANE_SURF(pipe, 0));
 }
@@ -3061,7 +3445,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = to_intel_crtc(crtc)->pipe;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       /*
+        * We only populate skl_results on watermark updates, and if the
+        * plane's visiblity isn't actually changing neither is its watermarks.
+        */
+       if (!crtc->primary->state->visible)
+               skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
 
        I915_WRITE(PLANE_CTL(pipe, 0), 0);
        I915_WRITE(PLANE_SURF(pipe, 0), 0);
@@ -3093,40 +3485,113 @@ static void intel_update_primary_planes(struct drm_device *dev)
 
        for_each_crtc(dev, crtc) {
                struct intel_plane *plane = to_intel_plane(crtc->primary);
-               struct intel_plane_state *plane_state;
-
-               drm_modeset_lock_crtc(crtc, &plane->base);
-               plane_state = to_intel_plane_state(plane->base.state);
+               struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
 
-               if (plane_state->visible)
+               if (plane_state->base.visible)
                        plane->update_plane(&plane->base,
                                            to_intel_crtc_state(crtc->state),
                                            plane_state);
+       }
+}
 
-               drm_modeset_unlock_crtc(crtc);
+static int
+__intel_display_resume(struct drm_device *dev,
+                      struct drm_atomic_state *state)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       int i, ret;
+
+       intel_modeset_setup_hw_state(dev);
+       i915_redisable_vga(dev);
+
+       if (!state)
+               return 0;
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               /*
+                * Force recalculation even if we restore
+                * current state. With fast modeset this may not result
+                * in a modeset when the state is compatible.
+                */
+               crtc_state->mode_changed = true;
        }
+
+       /* ignore any reset values/BIOS leftovers in the WM registers */
+       to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
+       ret = drm_atomic_commit(state);
+
+       WARN_ON(ret == -EDEADLK);
+       return ret;
+}
+
+static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+{
+       return intel_has_gpu_reset(dev_priv) &&
+               INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
 }
 
 void intel_prepare_reset(struct drm_i915_private *dev_priv)
 {
-       /* no reset support for gen2 */
-       if (IS_GEN2(dev_priv))
-               return;
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+       struct drm_atomic_state *state;
+       int ret;
 
-       /* reset doesn't touch the display */
-       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+       /*
+        * Need mode_config.mutex so that we don't
+        * trample ongoing ->detect() and whatnot.
+        */
+       mutex_lock(&dev->mode_config.mutex);
+       drm_modeset_acquire_init(ctx, 0);
+       while (1) {
+               ret = drm_modeset_lock_all_ctx(dev, ctx);
+               if (ret != -EDEADLK)
+                       break;
+
+               drm_modeset_backoff(ctx);
+       }
+
+       /* reset doesn't touch the display, but flips might get nuked anyway, */
+       if (!i915.force_reset_modeset_test &&
+           !gpu_reset_clobbers_display(dev_priv))
                return;
 
-       drm_modeset_lock_all(&dev_priv->drm);
        /*
         * Disabling the crtcs gracefully seems nicer. Also the
         * g33 docs say we should at least disable all the planes.
         */
-       intel_display_suspend(&dev_priv->drm);
+       state = drm_atomic_helper_duplicate_state(dev, ctx);
+       if (IS_ERR(state)) {
+               ret = PTR_ERR(state);
+               state = NULL;
+               DRM_ERROR("Duplicating state failed with %i\n", ret);
+               goto err;
+       }
+
+       ret = drm_atomic_helper_disable_all(dev, ctx);
+       if (ret) {
+               DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+               goto err;
+       }
+
+       dev_priv->modeset_restore_state = state;
+       state->acquire_ctx = ctx;
+       return;
+
+err:
+       drm_atomic_state_free(state);
 }
 
 void intel_finish_reset(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+       struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+       int ret;
+
        /*
         * Flips in the rings will be nuked by the reset,
         * so complete all pending flips so that user space
@@ -3134,55 +3599,75 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
         */
        intel_complete_page_flips(dev_priv);
 
-       /* no reset support for gen2 */
-       if (IS_GEN2(dev_priv))
-               return;
+       dev_priv->modeset_restore_state = NULL;
+
+       dev_priv->modeset_restore_state = NULL;
 
        /* reset doesn't touch the display */
-       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
+       if (!gpu_reset_clobbers_display(dev_priv)) {
+               if (!state) {
+                       /*
+                        * Flips in the rings have been nuked by the reset,
+                        * so update the base address of all primary
+                        * planes to the the last fb to make sure we're
+                        * showing the correct fb after a reset.
+                        *
+                        * FIXME: Atomic will make this obsolete since we won't schedule
+                        * CS-based flips (which might get lost in gpu resets) any more.
+                        */
+                       intel_update_primary_planes(dev);
+               } else {
+                       ret = __intel_display_resume(dev, state);
+                       if (ret)
+                               DRM_ERROR("Restoring old state failed with %i\n", ret);
+               }
+       } else {
                /*
-                * Flips in the rings have been nuked by the reset,
-                * so update the base address of all primary
-                * planes to the the last fb to make sure we're
-                * showing the correct fb after a reset.
-                *
-                * FIXME: Atomic will make this obsolete since we won't schedule
-                * CS-based flips (which might get lost in gpu resets) any more.
+                * The display has been reset as well,
+                * so need a full re-initialization.
                 */
-               intel_update_primary_planes(&dev_priv->drm);
-               return;
-       }
+               intel_runtime_pm_disable_interrupts(dev_priv);
+               intel_runtime_pm_enable_interrupts(dev_priv);
 
-       /*
-        * The display has been reset as well,
-        * so need a full re-initialization.
-        */
-       intel_runtime_pm_disable_interrupts(dev_priv);
-       intel_runtime_pm_enable_interrupts(dev_priv);
+               intel_modeset_init_hw(dev);
 
-       intel_modeset_init_hw(&dev_priv->drm);
+               spin_lock_irq(&dev_priv->irq_lock);
+               if (dev_priv->display.hpd_irq_setup)
+                       dev_priv->display.hpd_irq_setup(dev_priv);
+               spin_unlock_irq(&dev_priv->irq_lock);
 
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
+               ret = __intel_display_resume(dev, state);
+               if (ret)
+                       DRM_ERROR("Restoring old state failed with %i\n", ret);
 
-       intel_display_resume(&dev_priv->drm);
+               intel_hpd_init(dev_priv);
+       }
 
-       intel_hpd_init(dev_priv);
+       drm_modeset_drop_locks(ctx);
+       drm_modeset_acquire_fini(ctx);
+       mutex_unlock(&dev->mode_config.mutex);
+}
 
-       drm_modeset_unlock_all(&dev_priv->drm);
+static bool abort_flip_on_reset(struct intel_crtc *crtc)
+{
+       struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
+
+       if (i915_reset_in_progress(error))
+               return true;
+
+       if (crtc->reset_count != i915_reset_count(error))
+               return true;
+
+       return false;
 }
 
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned reset_counter;
        bool pending;
 
-       reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
-       if (intel_crtc->reset_counter != reset_counter)
+       if (abort_flip_on_reset(intel_crtc))
                return false;
 
        spin_lock_irq(&dev->event_lock);
@@ -3825,7 +4310,7 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
        return 0;
 }
 
-static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 {
        u32 temp;
 
@@ -4248,7 +4733,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
                      intel_crtc->pipe, SKL_CRTC_INDEX);
 
        return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
-               &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
+               &state->scaler_state.scaler_id, DRM_ROTATE_0,
                state->pipe_src_w, state->pipe_src_h,
                adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
 }
@@ -4273,7 +4758,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
        struct drm_framebuffer *fb = plane_state->base.fb;
        int ret;
 
-       bool force_detach = !fb || !plane_state->visible;
+       bool force_detach = !fb || !plane_state->base.visible;
 
        DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
                      intel_plane->base.base.id, intel_plane->base.name,
@@ -4283,10 +4768,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
                                drm_plane_index(&intel_plane->base),
                                &plane_state->scaler_id,
                                plane_state->base.rotation,
-                               drm_rect_width(&plane_state->src) >> 16,
-                               drm_rect_height(&plane_state->src) >> 16,
-                               drm_rect_width(&plane_state->dst),
-                               drm_rect_height(&plane_state->dst));
+                               drm_rect_width(&plane_state->base.src) >> 16,
+                               drm_rect_height(&plane_state->base.src) >> 16,
+                               drm_rect_width(&plane_state->base.dst),
+                               drm_rect_height(&plane_state->base.dst));
 
        if (ret || plane_state->scaler_id < 0)
                return ret;
@@ -4564,12 +5049,11 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
        struct drm_atomic_state *old_state = old_crtc_state->base.state;
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc->base.state);
-       struct drm_device *dev = crtc->base.dev;
        struct drm_plane *primary = crtc->base.primary;
        struct drm_plane_state *old_pri_state =
                drm_atomic_get_existing_plane_state(old_state, primary);
 
-       intel_frontbuffer_flip(dev, pipe_config->fb_bits);
+       intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
 
        crtc->wm.cxsr_allowed = true;
 
@@ -4584,9 +5068,9 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 
                intel_fbc_post_update(crtc);
 
-               if (primary_state->visible &&
+               if (primary_state->base.visible &&
                    (needs_modeset(&pipe_config->base) ||
-                    !old_primary_state->visible))
+                    !old_primary_state->base.visible))
                        intel_post_enable_primary(&crtc->base);
        }
 }
@@ -4612,8 +5096,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 
                intel_fbc_pre_update(crtc, pipe_config, primary_state);
 
-               if (old_primary_state->visible &&
-                   (modeset || !primary_state->visible))
+               if (old_primary_state->base.visible &&
+                   (modeset || !primary_state->base.visible))
                        intel_pre_disable_primary(&crtc->base);
        }
 
@@ -4692,18 +5176,140 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
         * to compute the mask of flip planes precisely. For the time being
         * consider this a flip to a NULL plane.
         */
-       intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+       intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
+}
+
+static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+                                         struct intel_crtc_state *crtc_state,
+                                         struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct drm_connector_state *conn_state = conn->state;
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->pre_pll_enable)
+                       encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+       }
+}
+
+static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+                                     struct intel_crtc_state *crtc_state,
+                                     struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct drm_connector_state *conn_state = conn->state;
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->pre_enable)
+                       encoder->pre_enable(encoder, crtc_state, conn_state);
+       }
+}
+
+static void intel_encoders_enable(struct drm_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state,
+                                 struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct drm_connector_state *conn_state = conn->state;
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               encoder->enable(encoder, crtc_state, conn_state);
+               intel_opregion_notify_encoder(encoder, true);
+       }
+}
+
+static void intel_encoders_disable(struct drm_crtc *crtc,
+                                  struct intel_crtc_state *old_crtc_state,
+                                  struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               intel_opregion_notify_encoder(encoder, false);
+               encoder->disable(encoder, old_crtc_state, old_conn_state);
+       }
 }
 
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+static void intel_encoders_post_disable(struct drm_crtc *crtc,
+                                       struct intel_crtc_state *old_crtc_state,
+                                       struct drm_atomic_state *old_state)
 {
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->post_disable)
+                       encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+       }
+}
+
+static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+                                           struct intel_crtc_state *old_crtc_state,
+                                           struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->post_pll_disable)
+                       encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+       }
+}
+
+static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
+                                struct drm_atomic_state *old_state)
+{
+       struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc->state);
 
        if (WARN_ON(intel_crtc->active))
                return;
@@ -4741,9 +5347,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc->active = true;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->has_pch_encoder) {
                /* Note: FDI PLL enabling _must_ be done before we enable the
@@ -4773,8 +5377,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->enable(encoder);
+       intel_encoders_enable(crtc, pipe_config, old_state);
 
        if (HAS_PCH_CPT(dev))
                cpt_verify_modeset(dev, intel_crtc->pipe);
@@ -4792,16 +5395,15 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
        return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-static void haswell_crtc_enable(struct drm_crtc *crtc)
+static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
+                               struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe, hsw_workaround_pipe;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc->state);
 
        if (WARN_ON(intel_crtc->active))
                return;
@@ -4810,9 +5412,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
                                                      false);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
+       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->shared_dpll)
                intel_enable_shared_dpll(intel_crtc);
@@ -4850,10 +5450,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        else
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
-       }
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->has_pch_encoder)
                dev_priv->display.fdi_link_train(crtc);
@@ -4894,10 +5491,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               encoder->enable(encoder);
-               intel_opregion_notify_encoder(encoder, true);
-       }
+       intel_encoders_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->has_pch_encoder) {
                intel_wait_for_vblank(dev, pipe);
@@ -4931,12 +5525,13 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
        }
 }
 
-static void ironlake_crtc_disable(struct drm_crtc *crtc)
+static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                                 struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
 
        /*
@@ -4949,8 +5544,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
                intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
        }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
 
        drm_crtc_vblank_off(crtc);
        assert_vblank_disabled(crtc);
@@ -4962,9 +5556,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        if (intel_crtc->config->has_pch_encoder)
                ironlake_fdi_disable(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_disable)
-                       encoder->post_disable(encoder);
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
        if (intel_crtc->config->has_pch_encoder) {
                ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -4994,22 +5586,20 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
-static void haswell_crtc_disable(struct drm_crtc *crtc)
+static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                                struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
        if (intel_crtc->config->has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
                                                      false);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               intel_opregion_notify_encoder(encoder, false);
-               encoder->disable(encoder);
-       }
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
 
        drm_crtc_vblank_off(crtc);
        assert_vblank_disabled(crtc);
@@ -5032,18 +5622,11 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_pipe_clock(intel_crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_disable)
-                       encoder->post_disable(encoder);
-
-       if (intel_crtc->config->has_pch_encoder) {
-               lpt_disable_pch_transcoder(dev_priv);
-               lpt_disable_iclkip(dev_priv);
-               intel_ddi_fdi_disable(crtc);
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
+       if (old_crtc_state->has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
                                                      true);
-       }
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5691,15 +6274,7 @@ static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
 
 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
 {
-       unsigned int i;
-
-       for (i = 0; i < 15; i++) {
-               if (skl_cdclk_pcu_ready(dev_priv))
-                       return true;
-               udelay(10);
-       }
-
-       return false;
+       return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
 }
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
@@ -6107,14 +6682,13 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
        intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
 }
 
-static void valleyview_crtc_enable(struct drm_crtc *crtc)
+static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
+                                  struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc->state);
        int pipe = intel_crtc->pipe;
 
        if (WARN_ON(intel_crtc->active))
@@ -6139,9 +6713,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
+       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
        if (IS_CHERRYVIEW(dev)) {
                chv_prepare_pll(intel_crtc, intel_crtc->config);
@@ -6151,9 +6723,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
                vlv_enable_pll(intel_crtc, intel_crtc->config);
        }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
        i9xx_pfit_enable(intel_crtc);
 
@@ -6165,8 +6735,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->enable(encoder);
+       intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -6178,14 +6747,13 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
        I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
 }
 
-static void i9xx_crtc_enable(struct drm_crtc *crtc)
+static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
+                            struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc->state);
        enum pipe pipe = intel_crtc->pipe;
 
        if (WARN_ON(intel_crtc->active))
@@ -6206,9 +6774,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        if (!IS_GEN2(dev))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
        i9xx_enable_pll(intel_crtc);
 
@@ -6222,8 +6788,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->enable(encoder);
+       intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6241,12 +6806,13 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
        I915_WRITE(PFIT_CONTROL, 0);
 }
 
-static void i9xx_crtc_disable(struct drm_crtc *crtc)
+static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                             struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
 
        /*
@@ -6256,8 +6822,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        if (IS_GEN2(dev))
                intel_wait_for_vblank(dev, pipe);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
 
        drm_crtc_vblank_off(crtc);
        assert_vblank_disabled(crtc);
@@ -6266,9 +6831,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
        i9xx_pfit_disable(intel_crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_disable)
-                       encoder->post_disable(encoder);
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
        if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
                if (IS_CHERRYVIEW(dev))
@@ -6279,9 +6842,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                        i9xx_disable_pll(intel_crtc);
        }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_pll_disable)
-                       encoder->post_pll_disable(encoder);
+       intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 
        if (!IS_GEN2(dev))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -6294,20 +6855,34 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        enum intel_display_power_domain domain;
        unsigned long domains;
+       struct drm_atomic_state *state;
+       struct intel_crtc_state *crtc_state;
+       int ret;
 
        if (!intel_crtc->active)
                return;
 
-       if (to_intel_plane_state(crtc->primary->state)->visible) {
+       if (to_intel_plane_state(crtc->primary->state)->base.visible) {
                WARN_ON(intel_crtc->flip_work);
 
                intel_pre_disable_primary_noatomic(crtc);
 
                intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
-               to_intel_plane_state(crtc->primary->state)->visible = false;
+               to_intel_plane_state(crtc->primary->state)->base.visible = false;
        }
 
-       dev_priv->display.crtc_disable(crtc);
+       state = drm_atomic_state_alloc(crtc->dev);
+       state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+
+       /* Everything's already locked, -EDEADLK can't happen. */
+       crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+       ret = drm_atomic_add_affected_connectors(state, crtc);
+
+       WARN_ON(IS_ERR(crtc_state) || ret);
+
+       dev_priv->display.crtc_disable(crtc_state, state);
+
+       drm_atomic_state_free(state);
 
        DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
                      crtc->base.id, crtc->name);
@@ -6822,9 +7397,10 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
 
 static int pnv_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        u16 gcfgc = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+       pci_read_config_word(pdev, GCFGC, &gcfgc);
 
        switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
        case GC_DISPLAY_CLOCK_267_MHZ_PNV:
@@ -6846,9 +7422,10 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
 
 static int i915gm_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        u16 gcfgc = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+       pci_read_config_word(pdev, GCFGC, &gcfgc);
 
        if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
                return 133333;
@@ -6870,6 +7447,7 @@ static int i865_get_display_clock_speed(struct drm_device *dev)
 
 static int i85x_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        u16 hpllcc = 0;
 
        /*
@@ -6877,10 +7455,10 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
         * encoding is different :(
         * FIXME is this the right way to detect 852GM/852GMV?
         */
-       if (dev->pdev->revision == 0x1)
+       if (pdev->revision == 0x1)
                return 133333;
 
-       pci_bus_read_config_word(dev->pdev->bus,
+       pci_bus_read_config_word(pdev->bus,
                                 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
 
        /* Assume that the hardware is in the high speed state.  This
@@ -6981,10 +7559,11 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
 
 static int gm45_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
        uint16_t tmp = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+       pci_read_config_word(pdev, GCFGC, &tmp);
 
        cdclk_sel = (tmp >> 12) & 0x1;
 
@@ -7003,6 +7582,7 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
 
 static int i965gm_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        static const uint8_t div_3200[] = { 16, 10,  8 };
        static const uint8_t div_4000[] = { 20, 12, 10 };
        static const uint8_t div_5333[] = { 24, 16, 14 };
@@ -7010,7 +7590,7 @@ static int i965gm_get_display_clock_speed(struct drm_device *dev)
        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
        uint16_t tmp = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+       pci_read_config_word(pdev, GCFGC, &tmp);
 
        cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
 
@@ -7040,6 +7620,7 @@ fail:
 
 static int g33_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
        static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
        static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -7048,7 +7629,7 @@ static int g33_get_display_clock_speed(struct drm_device *dev)
        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
        uint16_t tmp = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+       pci_read_config_word(pdev, GCFGC, &tmp);
 
        cdclk_sel = (tmp >> 4) & 0x7;
 
@@ -9214,7 +9795,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
        return;
 
 error:
-       kfree(fb);
+       kfree(intel_fb);
 }
 
 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -9420,7 +10001,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
        I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
        I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
        I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
-       I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+       I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
        I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
             "CPU PWM1 enabled\n");
        if (IS_HASWELL(dev))
@@ -9459,7 +10040,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
                mutex_lock(&dev_priv->rps.hw_lock);
                if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
                                            val))
-                       DRM_ERROR("Failed to write to D_COMP\n");
+                       DRM_DEBUG_KMS("Failed to write to D_COMP\n");
                mutex_unlock(&dev_priv->rps.hw_lock);
        } else {
                I915_WRITE(D_COMP_BDW, val);
@@ -9867,15 +10448,12 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
 
        switch (port) {
        case PORT_A:
-               pipe_config->ddi_pll_sel = SKL_DPLL0;
                id = DPLL_ID_SKL_DPLL0;
                break;
        case PORT_B:
-               pipe_config->ddi_pll_sel = SKL_DPLL1;
                id = DPLL_ID_SKL_DPLL1;
                break;
        case PORT_C:
-               pipe_config->ddi_pll_sel = SKL_DPLL2;
                id = DPLL_ID_SKL_DPLL2;
                break;
        default:
@@ -9894,25 +10472,10 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
        u32 temp;
 
        temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
-       pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
+       id = temp >> (port * 3 + 1);
 
-       switch (pipe_config->ddi_pll_sel) {
-       case SKL_DPLL0:
-               id = DPLL_ID_SKL_DPLL0;
-               break;
-       case SKL_DPLL1:
-               id = DPLL_ID_SKL_DPLL1;
-               break;
-       case SKL_DPLL2:
-               id = DPLL_ID_SKL_DPLL2;
-               break;
-       case SKL_DPLL3:
-               id = DPLL_ID_SKL_DPLL3;
-               break;
-       default:
-               MISSING_CASE(pipe_config->ddi_pll_sel);
+       if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
                return;
-       }
 
        pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
@@ -9922,10 +10485,9 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
                                struct intel_crtc_state *pipe_config)
 {
        enum intel_dpll_id id;
+       uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
-       pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
-       switch (pipe_config->ddi_pll_sel) {
+       switch (ddi_pll_sel) {
        case PORT_CLK_SEL_WRPLL1:
                id = DPLL_ID_WRPLL1;
                break;
@@ -9945,7 +10507,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
                id = DPLL_ID_LCPLL_2700;
                break;
        default:
-               MISSING_CASE(pipe_config->ddi_pll_sel);
+               MISSING_CASE(ddi_pll_sel);
                /* fall through */
        case PORT_CLK_SEL_NONE:
                return;
@@ -10178,7 +10740,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t cntl = 0, size = 0;
 
-       if (plane_state && plane_state->visible) {
+       if (plane_state && plane_state->base.visible) {
                unsigned int width = plane_state->base.crtc_w;
                unsigned int height = plane_state->base.crtc_h;
                unsigned int stride = roundup_pow_of_two(width) * 4;
@@ -10239,10 +10801,14 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
        int pipe = intel_crtc->pipe;
        uint32_t cntl = 0;
 
-       if (plane_state && plane_state->visible) {
+       if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
+               skl_write_cursor_wm(intel_crtc, wm);
+
+       if (plane_state && plane_state->base.visible) {
                cntl = MCURSOR_GAMMA_ENABLE;
                switch (plane_state->base.crtc_w) {
                        case 64:
@@ -10263,7 +10829,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
                if (HAS_DDI(dev))
                        cntl |= CURSOR_PIPE_CSC_ENABLE;
 
-               if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
+               if (plane_state->base.rotation == DRM_ROTATE_180)
                        cntl |= CURSOR_ROTATE_180;
        }
 
@@ -10309,7 +10875,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 
                /* ILK+ do this automagically */
                if (HAS_GMCH_DISPLAY(dev) &&
-                   plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+                   plane_state->base.rotation == DRM_ROTATE_180) {
                        base += (plane_state->base.crtc_h *
                                 plane_state->base.crtc_w - 1) * 4;
                }
@@ -10442,7 +11008,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
 
        fb = intel_framebuffer_create(dev, &mode_cmd, obj);
        if (IS_ERR(fb))
-               drm_gem_object_unreference_unlocked(&obj->base);
+               i915_gem_object_put_unlocked(obj);
 
        return fb;
 }
@@ -10953,13 +11519,13 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 
        mutex_lock(&dev->struct_mutex);
        intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
-       drm_gem_object_unreference(&work->pending_flip_obj->base);
-
-       if (work->flip_queued_req)
-               i915_gem_request_assign(&work->flip_queued_req, NULL);
+       i915_gem_object_put(work->pending_flip_obj);
        mutex_unlock(&dev->struct_mutex);
 
-       intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+       i915_gem_request_put(work->flip_queued_req);
+
+       intel_frontbuffer_flip_complete(to_i915(dev),
+                                       to_intel_plane(primary)->frontbuffer_bit);
        intel_fbc_post_update(crtc);
        drm_framebuffer_unreference(work->old_fb);
 
@@ -10980,10 +11546,8 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       unsigned reset_counter;
 
-       reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-       if (crtc->reset_counter != reset_counter)
+       if (abort_flip_on_reset(crtc))
                return true;
 
        /*
@@ -11124,7 +11688,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 flip_mask;
        int ret;
@@ -11140,13 +11704,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
                flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
        else
                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-       intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_emit(engine, MI_DISPLAY_FLIP |
+       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_emit(ring, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-       intel_ring_emit(engine, 0); /* aux display base address, unused */
+       intel_ring_emit(ring, fb->pitches[0]);
+       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+       intel_ring_emit(ring, 0); /* aux display base address, unused */
 
        return 0;
 }
@@ -11158,7 +11722,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 flip_mask;
        int ret;
@@ -11171,13 +11735,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
                flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
        else
                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-       intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
+       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_emit(ring, fb->pitches[0]);
+       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+       intel_ring_emit(ring, MI_NOOP);
 
        return 0;
 }
@@ -11189,7 +11753,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
@@ -11203,11 +11767,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
         * Display Registers (which do not change across a page-flip)
         * so we need only reprogram the base address.
         */
-       intel_ring_emit(engine, MI_DISPLAY_FLIP |
+       intel_ring_emit(ring, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
-                       obj->tiling_mode);
+       intel_ring_emit(ring, fb->pitches[0]);
+       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
+                       intel_fb_modifier_to_tiling(fb->modifier[0]));
 
        /* XXX Enabling the panel-fitter across page-flip is so far
         * untested on non-native modes, so ignore it for now.
@@ -11215,7 +11779,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
         */
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-       intel_ring_emit(engine, pf | pipesrc);
+       intel_ring_emit(ring, pf | pipesrc);
 
        return 0;
 }
@@ -11227,7 +11791,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
@@ -11237,10 +11801,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_DISPLAY_FLIP |
+       intel_ring_emit(ring, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
-       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
+       intel_ring_emit(ring, fb->pitches[0] |
+                       intel_fb_modifier_to_tiling(fb->modifier[0]));
+       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
 
        /* Contrary to the suggestions in the documentation,
         * "Enable Panel Fitter" does not seem to be required when page
@@ -11250,7 +11815,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
         */
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-       intel_ring_emit(engine, pf | pipesrc);
+       intel_ring_emit(ring, pf | pipesrc);
 
        return 0;
 }
@@ -11262,7 +11827,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t plane_bit = 0;
        int len, ret;
@@ -11283,7 +11848,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        }
 
        len = 4;
-       if (engine->id == RCS) {
+       if (req->engine->id == RCS) {
                len += 6;
                /*
                 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11321,30 +11886,32 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
         * for the RCS also doesn't appear to drop events. Setting the DERRMR
         * to zero does lead to lockups within MI_DISPLAY_FLIP.
         */
-       if (engine->id == RCS) {
-               intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(engine, DERRMR);
-               intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+       if (req->engine->id == RCS) {
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit_reg(ring, DERRMR);
+               intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
                                          DERRMR_PIPEB_PRI_FLIP_DONE |
                                          DERRMR_PIPEC_PRI_FLIP_DONE));
                if (IS_GEN8(dev))
-                       intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
+                       intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
                                              MI_SRM_LRM_GLOBAL_GTT);
                else
-                       intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
+                       intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
                                              MI_SRM_LRM_GLOBAL_GTT);
-               intel_ring_emit_reg(engine, DERRMR);
-               intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
+               intel_ring_emit_reg(ring, DERRMR);
+               intel_ring_emit(ring,
+                               i915_ggtt_offset(req->engine->scratch) + 256);
                if (IS_GEN8(dev)) {
-                       intel_ring_emit(engine, 0);
-                       intel_ring_emit(engine, MI_NOOP);
+                       intel_ring_emit(ring, 0);
+                       intel_ring_emit(ring, MI_NOOP);
                }
        }
 
-       intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
-       intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
-       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-       intel_ring_emit(engine, (MI_NOOP));
+       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+       intel_ring_emit(ring, fb->pitches[0] |
+                       intel_fb_modifier_to_tiling(fb->modifier[0]));
+       intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+       intel_ring_emit(ring, (MI_NOOP));
 
        return 0;
 }
@@ -11379,7 +11946,8 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
        if (resv && !reservation_object_test_signaled_rcu(resv, false))
                return true;
 
-       return engine != i915_gem_request_get_engine(obj->last_write_req);
+       return engine != i915_gem_active_get_engine(&obj->last_write,
+                                                   &obj->base.dev->struct_mutex);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11390,7 +11958,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
        const enum pipe pipe = intel_crtc->pipe;
-       u32 ctl, stride, tile_height;
+       u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
 
        ctl = I915_READ(PLANE_CTL(pipe, 0));
        ctl &= ~PLANE_CTL_TILED_MASK;
@@ -11410,20 +11978,6 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
                MISSING_CASE(fb->modifier[0]);
        }
 
-       /*
-        * The stride is either expressed as a multiple of 64 bytes chunks for
-        * linear buffers or in number of tiles for tiled buffers.
-        */
-       if (intel_rotation_90_or_270(rotation)) {
-               /* stride = Surface height in tiles */
-               tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
-               stride = DIV_ROUND_UP(fb->height, tile_height);
-       } else {
-               stride = fb->pitches[0] /
-                       intel_fb_stride_alignment(dev_priv, fb->modifier[0],
-                                                 fb->pixel_format);
-       }
-
        /*
         * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
         * PLANE_SURF updates, the update is then guaranteed to be atomic.
@@ -11440,15 +11994,13 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
 {
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_framebuffer *intel_fb =
-               to_intel_framebuffer(intel_crtc->base.primary->fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
        i915_reg_t reg = DSPCNTR(intel_crtc->plane);
        u32 dspcntr;
 
        dspcntr = I915_READ(reg);
 
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                dspcntr |= DISPPLANE_TILED;
        else
                dspcntr &= ~DISPPLANE_TILED;
@@ -11471,9 +12023,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
        struct reservation_object *resv;
 
        if (work->flip_queued_req)
-               WARN_ON(__i915_wait_request(work->flip_queued_req,
-                                           false, NULL,
-                                           &dev_priv->rps.mmioflips));
+               WARN_ON(i915_wait_request(work->flip_queued_req,
+                                         0, NULL, NO_WAITBOOST));
 
        /* For framebuffer backed by dmabuf, wait for fence */
        resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -11584,7 +12135,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct intel_flip_work *work;
        struct intel_engine_cs *engine;
        bool mmio_flip;
-       struct drm_i915_gem_request *request = NULL;
+       struct drm_i915_gem_request *request;
+       struct i915_vma *vma;
        int ret;
 
        /*
@@ -11650,22 +12202,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        /* Reference the objects for the scheduled work. */
        drm_framebuffer_reference(work->old_fb);
-       drm_gem_object_reference(&obj->base);
 
        crtc->primary->fb = fb;
        update_state_fb(crtc->primary);
 
-       intel_fbc_pre_update(intel_crtc, intel_crtc->config,
-                            to_intel_plane_state(primary->state));
-
-       work->pending_flip_obj = obj;
+       work->pending_flip_obj = i915_gem_object_get(obj);
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                goto cleanup;
 
-       intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-       if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+       intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
+       if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
                ret = -EIO;
                goto cleanup;
        }
@@ -11677,13 +12225,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
                engine = &dev_priv->engine[BCS];
-               if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
+               if (fb->modifier[0] != old_fb->modifier[0])
                        /* vlv: DISPLAY_FLIP fails to change tiling */
                        engine = NULL;
        } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
                engine = &dev_priv->engine[BCS];
        } else if (INTEL_INFO(dev)->gen >= 7) {
-               engine = i915_gem_request_get_engine(obj->last_write_req);
+               engine = i915_gem_active_get_engine(&obj->last_write,
+                                                   &obj->base.dev->struct_mutex);
                if (engine == NULL || engine->id != RCS)
                        engine = &dev_priv->engine[BCS];
        } else {
@@ -11692,47 +12241,52 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        mmio_flip = use_mmio_flip(engine, obj);
 
-       /* When using CS flips, we want to emit semaphores between rings.
-        * However, when using mmio flips we will create a task to do the
-        * synchronisation, so all we want here is to pin the framebuffer
-        * into the display plane and skip any waits.
-        */
-       if (!mmio_flip) {
-               ret = i915_gem_object_sync(obj, engine, &request);
-               if (!ret && !request) {
-                       request = i915_gem_request_alloc(engine, NULL);
-                       ret = PTR_ERR_OR_ZERO(request);
-               }
-
-               if (ret)
-                       goto cleanup_pending;
-       }
-
-       ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
-       if (ret)
+       vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
                goto cleanup_pending;
+       }
 
-       work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
-                                                 obj, 0);
+       work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
        work->gtt_offset += intel_crtc->dspaddr_offset;
        work->rotation = crtc->primary->state->rotation;
 
+       /*
+        * There's the potential that the next frame will not be compatible with
+        * FBC, so we want to call pre_update() before the actual page flip.
+        * The problem is that pre_update() caches some information about the fb
+        * object, so we want to do this only after the object is pinned. Let's
+        * be on the safe side and do this immediately before scheduling the
+        * flip.
+        */
+       intel_fbc_pre_update(intel_crtc, intel_crtc->config,
+                            to_intel_plane_state(primary->state));
+
        if (mmio_flip) {
                INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
-               i915_gem_request_assign(&work->flip_queued_req,
-                                       obj->last_write_req);
-
+               work->flip_queued_req = i915_gem_active_get(&obj->last_write,
+                                                           &obj->base.dev->struct_mutex);
                schedule_work(&work->mmio_work);
        } else {
-               i915_gem_request_assign(&work->flip_queued_req, request);
+               request = i915_gem_request_alloc(engine, engine->last_context);
+               if (IS_ERR(request)) {
+                       ret = PTR_ERR(request);
+                       goto cleanup_unpin;
+               }
+
+               ret = i915_gem_request_await_object(request, obj, false);
+               if (ret)
+                       goto cleanup_request;
+
                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
                                                   page_flip_flags);
                if (ret)
-                       goto cleanup_unpin;
+                       goto cleanup_request;
 
                intel_mark_page_flip_active(intel_crtc, work);
 
+               work->flip_queued_req = i915_gem_request_get(request);
                i915_add_request_no_flush(request);
        }
 
@@ -11740,25 +12294,25 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                          to_intel_plane(primary)->frontbuffer_bit);
        mutex_unlock(&dev->struct_mutex);
 
-       intel_frontbuffer_flip_prepare(dev,
+       intel_frontbuffer_flip_prepare(to_i915(dev),
                                       to_intel_plane(primary)->frontbuffer_bit);
 
        trace_i915_flip_request(intel_crtc->plane, obj);
 
        return 0;
 
+cleanup_request:
+       i915_add_request_no_flush(request);
 cleanup_unpin:
        intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
 cleanup_pending:
-       if (!IS_ERR_OR_NULL(request))
-               i915_add_request_no_flush(request);
        atomic_dec(&intel_crtc->unpin_work_count);
        mutex_unlock(&dev->struct_mutex);
 cleanup:
        crtc->primary->fb = old_fb;
        update_state_fb(crtc->primary);
 
-       drm_gem_object_unreference_unlocked(&obj->base);
+       i915_gem_object_put_unlocked(obj);
        drm_framebuffer_unreference(work->old_fb);
 
        spin_lock_irq(&dev->event_lock);
@@ -11826,7 +12380,7 @@ static bool intel_wm_need_update(struct drm_plane *plane,
        struct intel_plane_state *cur = to_intel_plane_state(plane->state);
 
        /* Update watermarks on tiling or size changes. */
-       if (new->visible != cur->visible)
+       if (new->base.visible != cur->base.visible)
                return true;
 
        if (!cur->base.fb || !new->base.fb)
@@ -11834,10 +12388,10 @@ static bool intel_wm_need_update(struct drm_plane *plane,
 
        if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
            cur->base.rotation != new->base.rotation ||
-           drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
-           drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
-           drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
-           drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
+           drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
+           drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
+           drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
+           drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
                return true;
 
        return false;
@@ -11845,10 +12399,10 @@ static bool intel_wm_need_update(struct drm_plane *plane,
 
 static bool needs_scaling(struct intel_plane_state *state)
 {
-       int src_w = drm_rect_width(&state->src) >> 16;
-       int src_h = drm_rect_height(&state->src) >> 16;
-       int dst_w = drm_rect_width(&state->dst);
-       int dst_h = drm_rect_height(&state->dst);
+       int src_w = drm_rect_width(&state->base.src) >> 16;
+       int src_h = drm_rect_height(&state->base.src) >> 16;
+       int dst_w = drm_rect_width(&state->base.dst);
+       int dst_h = drm_rect_height(&state->base.dst);
 
        return (src_w != dst_w || src_h != dst_h);
 }
@@ -11879,8 +12433,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
                        return ret;
        }
 
-       was_visible = old_plane_state->visible;
-       visible = to_intel_plane_state(plane_state)->visible;
+       was_visible = old_plane_state->base.visible;
+       visible = to_intel_plane_state(plane_state)->base.visible;
 
        if (!was_crtc_enabled && WARN_ON(was_visible))
                was_visible = false;
@@ -11896,7 +12450,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
         * only combine the results from all planes in the current place?
         */
        if (!is_crtc_enabled)
-               to_intel_plane_state(plane_state)->visible = visible = false;
+               to_intel_plane_state(plane_state)->base.visible = visible = false;
 
        if (!was_visible && !visible)
                return 0;
@@ -12114,21 +12668,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
                pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
        }
 
-       /* Clamp bpp to default limit on screens without EDID 1.4 */
-       if (connector->base.display_info.bpc == 0) {
-               int type = connector->base.connector_type;
-               int clamp_bpp = 24;
-
-               /* Fall back to 18 bpp when DP sink capability is unknown. */
-               if (type == DRM_MODE_CONNECTOR_DisplayPort ||
-                   type == DRM_MODE_CONNECTOR_eDP)
-                       clamp_bpp = 18;
-
-               if (bpp > clamp_bpp) {
-                       DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
-                                     bpp, clamp_bpp);
-                       pipe_config->pipe_bpp = clamp_bpp;
-               }
+       /* Clamp bpp to 8 on screens without EDID 1.4 */
+       if (connector->base.display_info.bpc == 0 && bpp > 24) {
+               DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+                             bpp);
+               pipe_config->pipe_bpp = 24;
        }
 }
 
@@ -12244,10 +12788,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
        DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
        if (IS_BROXTON(dev)) {
-               DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+               DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
                              "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
                              "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
-                             pipe_config->ddi_pll_sel,
                              pipe_config->dpll_hw_state.ebb0,
                              pipe_config->dpll_hw_state.ebb4,
                              pipe_config->dpll_hw_state.pll0,
@@ -12260,15 +12803,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                              pipe_config->dpll_hw_state.pll10,
                              pipe_config->dpll_hw_state.pcsdw12);
        } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-               DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
+               DRM_DEBUG_KMS("dpll_hw_state: "
                              "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
-                             pipe_config->ddi_pll_sel,
                              pipe_config->dpll_hw_state.ctrl1,
                              pipe_config->dpll_hw_state.cfgcr1,
                              pipe_config->dpll_hw_state.cfgcr2);
        } else if (HAS_DDI(dev)) {
-               DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
-                             pipe_config->ddi_pll_sel,
+               DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
                              pipe_config->dpll_hw_state.wrpll,
                              pipe_config->dpll_hw_state.spll);
        } else {
@@ -12282,6 +12823,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 
        DRM_DEBUG_KMS("planes on this crtc\n");
        list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+               char *format_name;
                intel_plane = to_intel_plane(plane);
                if (intel_plane->pipe != crtc->pipe)
                        continue;
@@ -12294,19 +12836,23 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                        continue;
                }
 
+               format_name = drm_get_format_name(fb->pixel_format);
+
                DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
                              plane->base.id, plane->name);
                DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
-                             fb->base.id, fb->width, fb->height,
-                             drm_get_format_name(fb->pixel_format));
+                             fb->base.id, fb->width, fb->height, format_name);
                DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
                              state->scaler_id,
-                             state->src.x1 >> 16, state->src.y1 >> 16,
-                             drm_rect_width(&state->src) >> 16,
-                             drm_rect_height(&state->src) >> 16,
-                             state->dst.x1, state->dst.y1,
-                             drm_rect_width(&state->dst),
-                             drm_rect_height(&state->dst));
+                             state->base.src.x1 >> 16,
+                             state->base.src.y1 >> 16,
+                             drm_rect_width(&state->base.src) >> 16,
+                             drm_rect_height(&state->base.src) >> 16,
+                             state->base.dst.x1, state->base.dst.y1,
+                             drm_rect_width(&state->base.dst),
+                             drm_rect_height(&state->base.dst));
+
+               kfree(format_name);
        }
 }
 
@@ -12315,6 +12861,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
        struct drm_device *dev = state->dev;
        struct drm_connector *connector;
        unsigned int used_ports = 0;
+       unsigned int used_mst_ports = 0;
 
        /*
         * Walk the connector list instead of the encoder
@@ -12351,11 +12898,20 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
                                return false;
 
                        used_ports |= port_mask;
+                       break;
+               case INTEL_OUTPUT_DP_MST:
+                       used_mst_ports |=
+                               1 << enc_to_mst(&encoder->base)->primary->port;
+                       break;
                default:
                        break;
                }
        }
 
+       /* can't mix MST and SST/HDMI on the same port */
+       if (used_ports & used_mst_ports)
+               return false;
+
        return true;
 }
 
@@ -12366,7 +12922,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        struct intel_crtc_scaler_state scaler_state;
        struct intel_dpll_hw_state dpll_hw_state;
        struct intel_shared_dpll *shared_dpll;
-       uint32_t ddi_pll_sel;
        bool force_thru;
 
        /* FIXME: before the switch to atomic started, a new pipe_config was
@@ -12378,7 +12933,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        scaler_state = crtc_state->scaler_state;
        shared_dpll = crtc_state->shared_dpll;
        dpll_hw_state = crtc_state->dpll_hw_state;
-       ddi_pll_sel = crtc_state->ddi_pll_sel;
        force_thru = crtc_state->pch_pfit.force_thru;
 
        memset(crtc_state, 0, sizeof *crtc_state);
@@ -12387,7 +12941,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        crtc_state->scaler_state = scaler_state;
        crtc_state->shared_dpll = shared_dpll;
        crtc_state->dpll_hw_state = dpll_hw_state;
-       crtc_state->ddi_pll_sel = ddi_pll_sel;
        crtc_state->pch_pfit.force_thru = force_thru;
 }
 
@@ -12475,7 +13028,7 @@ encoder_retry:
 
                encoder = to_intel_encoder(connector_state->best_encoder);
 
-               if (!(encoder->compute_config(encoder, pipe_config))) {
+               if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
                        DRM_DEBUG_KMS("Encoder config failure\n");
                        goto fail;
                }
@@ -12563,12 +13116,6 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
        return false;
 }
 
-#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
-       list_for_each_entry((intel_crtc), \
-                           &(dev)->mode_config.crtc_list, \
-                           base.head) \
-               for_each_if (mask & (1 <<(intel_crtc)->pipe))
-
 static bool
 intel_compare_m_n(unsigned int m, unsigned int n,
                  unsigned int m2, unsigned int n2,
@@ -12816,8 +13363,6 @@ intel_pipe_config_compare(struct drm_device *dev,
 
        PIPE_CONF_CHECK_I(double_wide);
 
-       PIPE_CONF_CHECK_X(ddi_pll_sel);
-
        PIPE_CONF_CHECK_P(shared_dpll);
        PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
        PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -12899,16 +13444,23 @@ static void verify_wm_state(struct drm_crtc *crtc,
                          hw_entry->start, hw_entry->end);
        }
 
-       /* cursor */
-       hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-       sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
-
-       if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
-               DRM_ERROR("mismatch in DDB state pipe %c cursor "
-                         "(expected (%u,%u), found (%u,%u))\n",
-                         pipe_name(pipe),
-                         sw_entry->start, sw_entry->end,
-                         hw_entry->start, hw_entry->end);
+       /*
+        * cursor
+        * If the cursor plane isn't active, we may not have updated it's ddb
+        * allocation. In that case since the ddb allocation will be updated
+        * once the plane becomes visible, we can skip this check
+        */
+       if (intel_crtc->cursor_addr) {
+               hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+               sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+               if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
+                       DRM_ERROR("mismatch in DDB state pipe %c cursor "
+                                 "(expected (%u,%u), found (%u,%u))\n",
+                                 pipe_name(pipe),
+                                 sw_entry->start, sw_entry->end,
+                                 hw_entry->start, hw_entry->end);
+               }
        }
 }
 
@@ -13523,8 +14075,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
                        if (!intel_plane_state->wait_req)
                                continue;
 
-                       ret = __i915_wait_request(intel_plane_state->wait_req,
-                                                 true, NULL, NULL);
+                       ret = i915_wait_request(intel_plane_state->wait_req,
+                                               I915_WAIT_INTERRUPTIBLE,
+                                               NULL, NULL);
                        if (ret) {
                                /* Any hang should be swallowed by the wait */
                                WARN_ON(ret == -EIO);
@@ -13614,6 +14167,111 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
        return false;
 }
 
+static void intel_update_crtc(struct drm_crtc *crtc,
+                             struct drm_atomic_state *state,
+                             struct drm_crtc_state *old_crtc_state,
+                             unsigned int *crtc_vblank_mask)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
+       bool modeset = needs_modeset(crtc->state);
+
+       if (modeset) {
+               update_scanline_offset(intel_crtc);
+               dev_priv->display.crtc_enable(pipe_config, state);
+       } else {
+               intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+       }
+
+       if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
+               intel_fbc_enable(
+                   intel_crtc, pipe_config,
+                   to_intel_plane_state(crtc->primary->state));
+       }
+
+       drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+
+       if (needs_vblank_wait(pipe_config))
+               *crtc_vblank_mask |= drm_crtc_mask(crtc);
+}
+
+static void intel_update_crtcs(struct drm_atomic_state *state,
+                              unsigned int *crtc_vblank_mask)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       int i;
+
+       for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+               if (!crtc->state->active)
+                       continue;
+
+               intel_update_crtc(crtc, state, old_crtc_state,
+                                 crtc_vblank_mask);
+       }
+}
+
+static void skl_update_crtcs(struct drm_atomic_state *state,
+                            unsigned int *crtc_vblank_mask)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+       struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+       unsigned int updated = 0;
+       bool progress;
+       enum pipe pipe;
+
+       /*
+        * Whenever the number of active pipes changes, we need to make sure we
+        * update the pipes in the right order so that their ddb allocations
+        * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+        * cause pipe underruns and other bad stuff.
+        */
+       do {
+               int i;
+               progress = false;
+
+               for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+                       bool vbl_wait = false;
+                       unsigned int cmask = drm_crtc_mask(crtc);
+                       pipe = to_intel_crtc(crtc)->pipe;
+
+                       if (updated & cmask || !crtc->state->active)
+                               continue;
+                       if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
+                                                       pipe))
+                               continue;
+
+                       updated |= cmask;
+
+                       /*
+                        * If this is an already active pipe, it's DDB changed,
+                        * and this isn't the last pipe that needs updating
+                        * then we need to wait for a vblank to pass for the
+                        * new ddb allocation to take effect.
+                        */
+                       if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+                           !crtc->state->active_changed &&
+                           intel_state->wm_results.dirty_pipes != updated)
+                               vbl_wait = true;
+
+                       intel_update_crtc(crtc, state, old_crtc_state,
+                                         crtc_vblank_mask);
+
+                       if (vbl_wait)
+                               intel_wait_for_vblank(dev, pipe);
+
+                       progress = true;
+               }
+       } while (progress);
+}
+
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
@@ -13636,8 +14294,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                if (!intel_plane_state->wait_req)
                        continue;
 
-               ret = __i915_wait_request(intel_plane_state->wait_req,
-                                         true, NULL, NULL);
+               ret = i915_wait_request(intel_plane_state->wait_req,
+                                       0, NULL, NULL);
                /* EIO should be eaten, and we can't get interrupted in the
                 * worker, and blocking commits have waited already. */
                WARN_ON(ret);
@@ -13673,7 +14331,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
                if (old_crtc_state->active) {
                        intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
-                       dev_priv->display.crtc_disable(crtc);
+                       dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
                        intel_crtc->active = false;
                        intel_fbc_disable(intel_crtc);
                        intel_disable_shared_dpll(intel_crtc);
@@ -13702,20 +14360,19 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                     intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
                        dev_priv->display.modeset_commit_cdclk(state);
 
+               /*
+                * SKL workaround: bspec recommends we disable the SAGV when we
+                * have more then one pipe enabled
+                */
+               if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
+                       skl_disable_sagv(dev_priv);
+
                intel_modeset_verify_disabled(dev);
        }
 
-       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+       /* Complete the events for pipes that have now been disabled */
        for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
                bool modeset = needs_modeset(crtc->state);
-               struct intel_crtc_state *pipe_config =
-                       to_intel_crtc_state(crtc->state);
-
-               if (modeset && crtc->state->active) {
-                       update_scanline_offset(to_intel_crtc(crtc));
-                       dev_priv->display.crtc_enable(crtc);
-               }
 
                /* Complete events for now disable pipes here. */
                if (modeset && !crtc->state->active && crtc->state->event) {
@@ -13725,21 +14382,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
                        crtc->state->event = NULL;
                }
-
-               if (!modeset)
-                       intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
-
-               if (crtc->state->active &&
-                   drm_atomic_get_existing_plane_state(state, crtc->primary))
-                       intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
-
-               if (crtc->state->active)
-                       drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
-
-               if (pipe_config->base.active && needs_vblank_wait(pipe_config))
-                       crtc_vblank_mask |= 1 << i;
        }
 
+       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+       dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
+
        /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
         * already, but still need the state for the delayed optimization. To
         * fix this:
@@ -13775,6 +14422,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
        }
 
+       if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
+           skl_can_enable_sagv(state))
+               skl_enable_sagv(dev_priv);
+
        drm_atomic_helper_commit_hw_done(state);
 
        if (intel_state->modeset)
@@ -13814,19 +14465,12 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
 {
        struct drm_plane_state *old_plane_state;
        struct drm_plane *plane;
-       struct drm_i915_gem_object *obj, *old_obj;
-       struct intel_plane *intel_plane;
        int i;
 
-       mutex_lock(&state->dev->struct_mutex);
-       for_each_plane_in_state(state, plane, old_plane_state, i) {
-               obj = intel_fb_obj(plane->state->fb);
-               old_obj = intel_fb_obj(old_plane_state->fb);
-               intel_plane = to_intel_plane(plane);
-
-               i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
-       }
-       mutex_unlock(&state->dev->struct_mutex);
+       for_each_plane_in_state(state, plane, old_plane_state, i)
+               i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
+                                 intel_fb_obj(plane->state->fb),
+                                 to_intel_plane(plane)->frontbuffer_bit);
 }
 
 /**
@@ -13922,8 +14566,6 @@ out:
                drm_atomic_state_free(state);
 }
 
-#undef for_each_intel_crtc_masked
-
 /*
  * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
  *        drm_atomic_helper_legacy_gamma_set() directly.
@@ -13992,7 +14634,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
  */
 int
 intel_prepare_plane_fb(struct drm_plane *plane,
-                      const struct drm_plane_state *new_state)
+                      struct drm_plane_state *new_state)
 {
        struct drm_device *dev = plane->dev;
        struct drm_framebuffer *fb = new_state->fb;
@@ -14051,15 +14693,17 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                if (ret)
                        DRM_DEBUG_KMS("failed to attach phys object\n");
        } else {
-               ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+               struct i915_vma *vma;
+
+               vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+               if (IS_ERR(vma))
+                       ret = PTR_ERR(vma);
        }
 
        if (ret == 0) {
-               struct intel_plane_state *plane_state =
-                       to_intel_plane_state(new_state);
-
-               i915_gem_request_assign(&plane_state->wait_req,
-                                       obj->last_write_req);
+               to_intel_plane_state(new_state)->wait_req =
+                       i915_gem_active_get(&obj->last_write,
+                                           &obj->base.dev->struct_mutex);
        }
 
        return ret;
@@ -14076,10 +14720,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
  */
 void
 intel_cleanup_plane_fb(struct drm_plane *plane,
-                      const struct drm_plane_state *old_state)
+                      struct drm_plane_state *old_state)
 {
        struct drm_device *dev = plane->dev;
        struct intel_plane_state *old_intel_state;
+       struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
        struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
 
@@ -14092,6 +14737,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
            !INTEL_INFO(dev)->cursor_needs_physical))
                intel_unpin_fb_obj(old_state->fb, old_state->rotation);
 
+       i915_gem_request_assign(&intel_state->wait_req, NULL);
        i915_gem_request_assign(&old_intel_state->wait_req, NULL);
 }
 
@@ -14126,13 +14772,14 @@ intel_check_primary_plane(struct drm_plane *plane,
                          struct intel_crtc_state *crtc_state,
                          struct intel_plane_state *state)
 {
+       struct drm_i915_private *dev_priv = to_i915(plane->dev);
        struct drm_crtc *crtc = state->base.crtc;
-       struct drm_framebuffer *fb = state->base.fb;
        int min_scale = DRM_PLANE_HELPER_NO_SCALING;
        int max_scale = DRM_PLANE_HELPER_NO_SCALING;
        bool can_position = false;
+       int ret;
 
-       if (INTEL_INFO(plane->dev)->gen >= 9) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                /* use scaler when colorkey is not required */
                if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
                        min_scale = 1;
@@ -14141,22 +14788,35 @@ intel_check_primary_plane(struct drm_plane *plane,
                can_position = true;
        }
 
-       return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
-                                            &state->dst, &state->clip,
-                                            state->base.rotation,
-                                            min_scale, max_scale,
-                                            can_position, true,
-                                            &state->visible);
+       ret = drm_plane_helper_check_state(&state->base,
+                                          &state->clip,
+                                          min_scale, max_scale,
+                                          can_position, true);
+       if (ret)
+               return ret;
+
+       if (!state->base.fb)
+               return 0;
+
+       if (INTEL_GEN(dev_priv) >= 9) {
+               ret = skl_check_plane_surface(state);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 
 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
                                    struct drm_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *old_intel_state =
                to_intel_crtc_state(old_crtc_state);
        bool modeset = needs_modeset(crtc->state);
+       enum pipe pipe = intel_crtc->pipe;
 
        /* Perform vblank evasion around commit operation */
        intel_pipe_update_start(intel_crtc);
@@ -14171,8 +14831,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 
        if (to_intel_crtc_state(crtc->state)->update_pipe)
                intel_update_pipe_config(intel_crtc, old_intel_state);
-       else if (INTEL_INFO(dev)->gen >= 9)
+       else if (INTEL_GEN(dev_priv) >= 9) {
                skl_detach_scalers(intel_crtc);
+
+               I915_WRITE(PIPE_WM_LINETIME(pipe),
+                          dev_priv->wm.skl_hw.wm_linetime[pipe]);
+       }
 }
 
 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -14306,11 +14970,11 @@ fail:
 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
 {
        if (!dev->mode_config.rotation_property) {
-               unsigned long flags = BIT(DRM_ROTATE_0) |
-                       BIT(DRM_ROTATE_180);
+               unsigned long flags = DRM_ROTATE_0 |
+                       DRM_ROTATE_180;
 
                if (INTEL_INFO(dev)->gen >= 9)
-                       flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
+                       flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
 
                dev->mode_config.rotation_property =
                        drm_mode_create_rotation_property(dev, flags);
@@ -14326,19 +14990,17 @@ intel_check_cursor_plane(struct drm_plane *plane,
                         struct intel_crtc_state *crtc_state,
                         struct intel_plane_state *state)
 {
-       struct drm_crtc *crtc = crtc_state->base.crtc;
        struct drm_framebuffer *fb = state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        enum pipe pipe = to_intel_plane(plane)->pipe;
        unsigned stride;
        int ret;
 
-       ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
-                                           &state->dst, &state->clip,
-                                           state->base.rotation,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           true, true, &state->visible);
+       ret = drm_plane_helper_check_state(&state->base,
+                                          &state->clip,
+                                          DRM_PLANE_HELPER_NO_SCALING,
+                                          DRM_PLANE_HELPER_NO_SCALING,
+                                          true, true);
        if (ret)
                return ret;
 
@@ -14375,7 +15037,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
         * Refuse the put the cursor into that compromised position.
         */
        if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
-           state->visible && state->base.crtc_x < 0) {
+           state->base.visible && state->base.crtc_x < 0) {
                DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
                return -EINVAL;
        }
@@ -14407,7 +15069,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
        if (!obj)
                addr = 0;
        else if (!INTEL_INFO(dev)->cursor_needs_physical)
-               addr = i915_gem_obj_ggtt_offset(obj);
+               addr = i915_gem_object_ggtt_offset(obj, NULL);
        else
                addr = obj->phys_handle->busaddr;
 
@@ -14453,8 +15115,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
                if (!dev->mode_config.rotation_property)
                        dev->mode_config.rotation_property =
                                drm_mode_create_rotation_property(dev,
-                                                       BIT(DRM_ROTATE_0) |
-                                                       BIT(DRM_ROTATE_180));
+                                                       DRM_ROTATE_0 |
+                                                       DRM_ROTATE_180);
                if (dev->mode_config.rotation_property)
                        drm_object_attach_property(&cursor->base.base,
                                dev->mode_config.rotation_property,
@@ -14660,12 +15322,50 @@ static bool intel_crt_present(struct drm_device *dev)
        return true;
 }
 
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+       int pps_num;
+       int pps_idx;
+
+       if (HAS_DDI(dev_priv))
+               return;
+       /*
+        * This w/a is needed at least on CPT/PPT, but to be sure apply it
+        * everywhere where registers can be write protected.
+        */
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               pps_num = 2;
+       else
+               pps_num = 1;
+
+       for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+               u32 val = I915_READ(PP_CONTROL(pps_idx));
+
+               val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+               I915_WRITE(PP_CONTROL(pps_idx), val);
+       }
+}
+
+static void intel_pps_init(struct drm_i915_private *dev_priv)
+{
+       if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
+               dev_priv->pps_mmio_base = PCH_PPS_BASE;
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               dev_priv->pps_mmio_base = VLV_PPS_BASE;
+       else
+               dev_priv->pps_mmio_base = PPS_BASE;
+
+       intel_pps_unlock_regs_wa(dev_priv);
+}
+
 static void intel_setup_outputs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        bool dpd_is_edp = false;
 
+       intel_pps_init(dev_priv);
+
        /*
         * intel_edp_init_connector() depends on this completing first, to
         * prevent the registeration of both eDP and LVDS and the incorrect
@@ -14853,7 +15553,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
        drm_framebuffer_cleanup(fb);
        mutex_lock(&dev->struct_mutex);
        WARN_ON(!intel_fb->obj->framebuffer_references--);
-       drm_gem_object_unreference(&intel_fb->obj->base);
+       i915_gem_object_put(intel_fb->obj);
        mutex_unlock(&dev->struct_mutex);
        kfree(intel_fb);
 }
@@ -14933,24 +15633,27 @@ static int intel_framebuffer_init(struct drm_device *dev,
                                  struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       unsigned int aligned_height;
+       unsigned int tiling = i915_gem_object_get_tiling(obj);
        int ret;
        u32 pitch_limit, stride_alignment;
+       char *format_name;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
-               /* Enforce that fb modifier and tiling mode match, but only for
-                * X-tiled. This is needed for FBC. */
-               if (!!(obj->tiling_mode == I915_TILING_X) !=
-                   !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
+               /*
+                * If there's a fence, enforce that
+                * the fb modifier and tiling mode match.
+                */
+               if (tiling != I915_TILING_NONE &&
+                   tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
                        DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
                        return -EINVAL;
                }
        } else {
-               if (obj->tiling_mode == I915_TILING_X)
+               if (tiling == I915_TILING_X) {
                        mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
-               else if (obj->tiling_mode == I915_TILING_Y) {
+               } else if (tiling == I915_TILING_Y) {
                        DRM_DEBUG("No Y tiling for legacy addfb\n");
                        return -EINVAL;
                }
@@ -14974,6 +15677,16 @@ static int intel_framebuffer_init(struct drm_device *dev,
                return -EINVAL;
        }
 
+       /*
+        * gen2/3 display engine uses the fence if present,
+        * so the tiling mode must match the fb modifier exactly.
+        */
+       if (INTEL_INFO(dev_priv)->gen < 4 &&
+           tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+               DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
+               return -EINVAL;
+       }
+
        stride_alignment = intel_fb_stride_alignment(dev_priv,
                                                     mode_cmd->modifier[0],
                                                     mode_cmd->pixel_format);
@@ -14993,10 +15706,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
                return -EINVAL;
        }
 
-       if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
-           mode_cmd->pitches[0] != obj->stride) {
+       /*
+        * If there's a fence, enforce that
+        * the fb pitch and fence stride match.
+        */
+       if (tiling != I915_TILING_NONE &&
+           mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
                DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
-                         mode_cmd->pitches[0], obj->stride);
+                         mode_cmd->pitches[0],
+                         i915_gem_object_get_stride(obj));
                return -EINVAL;
        }
 
@@ -15009,16 +15727,18 @@ static int intel_framebuffer_init(struct drm_device *dev,
                break;
        case DRM_FORMAT_XRGB1555:
                if (INTEL_INFO(dev)->gen > 3) {
-                       DRM_DEBUG("unsupported pixel format: %s\n",
-                                 drm_get_format_name(mode_cmd->pixel_format));
+                       format_name = drm_get_format_name(mode_cmd->pixel_format);
+                       DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+                       kfree(format_name);
                        return -EINVAL;
                }
                break;
        case DRM_FORMAT_ABGR8888:
                if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
                    INTEL_INFO(dev)->gen < 9) {
-                       DRM_DEBUG("unsupported pixel format: %s\n",
-                                 drm_get_format_name(mode_cmd->pixel_format));
+                       format_name = drm_get_format_name(mode_cmd->pixel_format);
+                       DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+                       kfree(format_name);
                        return -EINVAL;
                }
                break;
@@ -15026,15 +15746,17 @@ static int intel_framebuffer_init(struct drm_device *dev,
        case DRM_FORMAT_XRGB2101010:
        case DRM_FORMAT_XBGR2101010:
                if (INTEL_INFO(dev)->gen < 4) {
-                       DRM_DEBUG("unsupported pixel format: %s\n",
-                                 drm_get_format_name(mode_cmd->pixel_format));
+                       format_name = drm_get_format_name(mode_cmd->pixel_format);
+                       DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+                       kfree(format_name);
                        return -EINVAL;
                }
                break;
        case DRM_FORMAT_ABGR2101010:
                if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
-                       DRM_DEBUG("unsupported pixel format: %s\n",
-                                 drm_get_format_name(mode_cmd->pixel_format));
+                       format_name = drm_get_format_name(mode_cmd->pixel_format);
+                       DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+                       kfree(format_name);
                        return -EINVAL;
                }
                break;
@@ -15043,14 +15765,16 @@ static int intel_framebuffer_init(struct drm_device *dev,
        case DRM_FORMAT_YVYU:
        case DRM_FORMAT_VYUY:
                if (INTEL_INFO(dev)->gen < 5) {
-                       DRM_DEBUG("unsupported pixel format: %s\n",
-                                 drm_get_format_name(mode_cmd->pixel_format));
+                       format_name = drm_get_format_name(mode_cmd->pixel_format);
+                       DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+                       kfree(format_name);
                        return -EINVAL;
                }
                break;
        default:
-               DRM_DEBUG("unsupported pixel format: %s\n",
-                         drm_get_format_name(mode_cmd->pixel_format));
+               format_name = drm_get_format_name(mode_cmd->pixel_format);
+               DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+               kfree(format_name);
                return -EINVAL;
        }
 
@@ -15058,17 +15782,12 @@ static int intel_framebuffer_init(struct drm_device *dev,
        if (mode_cmd->offsets[0] != 0)
                return -EINVAL;
 
-       aligned_height = intel_fb_align_height(dev, mode_cmd->height,
-                                              mode_cmd->pixel_format,
-                                              mode_cmd->modifier[0]);
-       /* FIXME drm helper for size checks (especially planar formats)? */
-       if (obj->base.size < aligned_height * mode_cmd->pitches[0])
-               return -EINVAL;
-
        drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
        intel_fb->obj = obj;
 
-       intel_fill_fb_info(dev_priv, &intel_fb->base);
+       ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
+       if (ret)
+               return ret;
 
        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        if (ret) {
@@ -15090,13 +15809,13 @@ intel_user_framebuffer_create(struct drm_device *dev,
        struct drm_i915_gem_object *obj;
        struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
 
-       obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
-       if (&obj->base == NULL)
+       obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
+       if (!obj)
                return ERR_PTR(-ENOENT);
 
        fb = intel_framebuffer_create(dev, &mode_cmd, obj);
        if (IS_ERR(fb))
-               drm_gem_object_unreference_unlocked(&obj->base);
+               i915_gem_object_put_unlocked(obj);
 
        return fb;
 }
@@ -15279,6 +15998,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
                        skl_modeset_calc_cdclk;
        }
 
+       if (dev_priv->info.gen >= 9)
+               dev_priv->display.update_crtcs = skl_update_crtcs;
+       else
+               dev_priv->display.update_crtcs = intel_update_crtcs;
+
        switch (INTEL_INFO(dev_priv)->gen) {
        case 2:
                dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -15480,15 +16204,16 @@ static void intel_init_quirks(struct drm_device *dev)
 static void i915_disable_vga(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        u8 sr1;
        i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
        /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
-       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+       vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
        outb(SR01, VGA_SR_INDEX);
        sr1 = inb(VGA_SR_DATA);
        outb(sr1 | 1<<5, VGA_SR_DATA);
-       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+       vga_put(pdev, VGA_RSRC_LEGACY_IO);
        udelay(300);
 
        I915_WRITE(vga_reg, VGA_DISP_DISABLE);
@@ -15504,7 +16229,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
        dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
 
        intel_init_clock_gating(dev);
-       intel_enable_gt_powersave(dev_priv);
 }
 
 /*
@@ -15771,15 +16495,22 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
        return false;
 }
 
-static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
        struct intel_connector *connector;
 
        for_each_connector_on_encoder(dev, &encoder->base, connector)
-               return true;
+               return connector;
 
-       return false;
+       return NULL;
+}
+
+static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
+                             enum transcoder pch_transcoder)
+{
+       return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
+               (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
 }
 
 static void intel_sanitize_crtc(struct intel_crtc *crtc)
@@ -15825,7 +16556,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 * Temporarily change the plane mapping and disable everything
                 * ...  */
                plane = crtc->plane;
-               to_intel_plane_state(crtc->base.primary->state)->visible = true;
+               to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
                crtc->plane = !plane;
                intel_crtc_disable_noatomic(&crtc->base);
                crtc->plane = plane;
@@ -15860,14 +16591,23 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 * worst a fifo underrun happens which also sets this to false.
                 */
                crtc->cpu_fifo_underrun_disabled = true;
-               crtc->pch_fifo_underrun_disabled = true;
+               /*
+                * We track the PCH trancoder underrun reporting state
+                * within the crtc. With crtc for pipe A housing the underrun
+                * reporting state for PCH transcoder A, crtc for pipe B housing
+                * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+                * and marking underrun reporting as disabled for the non-existing
+                * PCH transcoders B and C would prevent enabling the south
+                * error interrupt (see cpt_can_enable_serr_int()).
+                */
+               if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
+                       crtc->pch_fifo_underrun_disabled = true;
        }
 }
 
 static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
        struct intel_connector *connector;
-       struct drm_device *dev = encoder->base.dev;
 
        /* We need to check both for a crtc link (meaning that the
         * encoder is active and trying to read from a pipe) and the
@@ -15875,7 +16615,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
        bool has_active_crtc = encoder->base.crtc &&
                to_intel_crtc(encoder->base.crtc)->active;
 
-       if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
+       connector = intel_encoder_find_connector(encoder);
+       if (connector && !has_active_crtc) {
                DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
                              encoder->base.base.id,
                              encoder->base.name);
@@ -15884,12 +16625,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                 * fallout from our resume register restoring. Disable
                 * the encoder manually again. */
                if (encoder->base.crtc) {
+                       struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
+
                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
                                      encoder->base.base.id,
                                      encoder->base.name);
-                       encoder->disable(encoder);
+                       encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
                        if (encoder->post_disable)
-                               encoder->post_disable(encoder);
+                               encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
                }
                encoder->base.crtc = NULL;
 
@@ -15897,12 +16640,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                 * a bug in one of the get_hw_state functions. Or someplace else
                 * in our code, like the register restore mess on resume. Clamp
                 * things to off as a safer default. */
-               for_each_intel_connector(dev, connector) {
-                       if (connector->encoder != encoder)
-                               continue;
-                       connector->base.dpms = DRM_MODE_DPMS_OFF;
-                       connector->base.encoder = NULL;
-               }
+
+               connector->base.dpms = DRM_MODE_DPMS_OFF;
+               connector->base.encoder = NULL;
        }
        /* Enabled encoders without active connectors will be fixed in
         * the crtc fixup. */
@@ -15952,10 +16692,10 @@ static void readout_plane_state(struct intel_crtc *crtc)
        struct intel_plane_state *plane_state =
                to_intel_plane_state(primary->state);
 
-       plane_state->visible = crtc->active &&
+       plane_state->base.visible = crtc->active &&
                primary_get_hw_state(to_intel_plane(primary));
 
-       if (plane_state->visible)
+       if (plane_state->base.visible)
                crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
 }
 
@@ -16174,9 +16914,10 @@ void intel_display_resume(struct drm_device *dev)
        struct drm_atomic_state *state = dev_priv->modeset_restore_state;
        struct drm_modeset_acquire_ctx ctx;
        int ret;
-       bool setup = false;
 
        dev_priv->modeset_restore_state = NULL;
+       if (state)
+               state->acquire_ctx = &ctx;
 
        /*
         * This is a cludge because with real atomic modeset mode_config.mutex
@@ -16187,43 +16928,17 @@ void intel_display_resume(struct drm_device *dev)
        mutex_lock(&dev->mode_config.mutex);
        drm_modeset_acquire_init(&ctx, 0);
 
-retry:
-       ret = drm_modeset_lock_all_ctx(dev, &ctx);
-
-       if (ret == 0 && !setup) {
-               setup = true;
-
-               intel_modeset_setup_hw_state(dev);
-               i915_redisable_vga(dev);
-       }
-
-       if (ret == 0 && state) {
-               struct drm_crtc_state *crtc_state;
-               struct drm_crtc *crtc;
-               int i;
-
-               state->acquire_ctx = &ctx;
-
-               /* ignore any reset values/BIOS leftovers in the WM registers */
-               to_intel_atomic_state(state)->skip_intermediate_wm = true;
-
-               for_each_crtc_in_state(state, crtc, crtc_state, i) {
-                       /*
-                        * Force recalculation even if we restore
-                        * current state. With fast modeset this may not result
-                        * in a modeset when the state is compatible.
-                        */
-                       crtc_state->mode_changed = true;
-               }
-
-               ret = drm_atomic_commit(state);
-       }
+       while (1) {
+               ret = drm_modeset_lock_all_ctx(dev, &ctx);
+               if (ret != -EDEADLK)
+                       break;
 
-       if (ret == -EDEADLK) {
                drm_modeset_backoff(&ctx);
-               goto retry;
        }
 
+       if (!ret)
+               ret = __intel_display_resume(dev, state);
+
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
        mutex_unlock(&dev->mode_config.mutex);
@@ -16239,7 +16954,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
        struct drm_i915_gem_object *obj;
-       int ret;
 
        intel_init_gt_powersave(dev_priv);
 
@@ -16253,15 +16967,17 @@ void intel_modeset_gem_init(struct drm_device *dev)
         * for this.
         */
        for_each_crtc(dev, c) {
+               struct i915_vma *vma;
+
                obj = intel_fb_obj(c->primary->fb);
                if (obj == NULL)
                        continue;
 
                mutex_lock(&dev->struct_mutex);
-               ret = intel_pin_and_fence_fb_obj(c->primary->fb,
+               vma = intel_pin_and_fence_fb_obj(c->primary->fb,
                                                 c->primary->state->rotation);
                mutex_unlock(&dev->struct_mutex);
-               if (ret) {
+               if (IS_ERR(vma)) {
                        DRM_ERROR("failed to pin boot fb on pipe %d\n",
                                  to_intel_crtc(c)->pipe);
                        drm_framebuffer_unreference(c->primary->fb);
index 21b04c3eda41bf6f7685b6080b9e4a5228f3a116..acd0c51f74d5e2621e62903c1329a44cfec8d648 100644 (file)
@@ -190,6 +190,29 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
        return (max_link_clock * max_lanes * 8) / 10;
 }
 
+static int
+intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &intel_dig_port->base;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       int max_dotclk = dev_priv->max_dotclk_freq;
+       int ds_max_dotclk;
+
+       int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+
+       if (type != DP_DS_PORT_TYPE_VGA)
+               return max_dotclk;
+
+       ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
+                                                   intel_dp->downstream_ports);
+
+       if (ds_max_dotclk != 0)
+               max_dotclk = min(max_dotclk, ds_max_dotclk);
+
+       return max_dotclk;
+}
+
 static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
@@ -199,7 +222,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        int target_clock = mode->clock;
        int max_rate, mode_rate, max_lanes, max_link_clock;
-       int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+       int max_dotclk;
+
+       max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 
        if (is_edp(intel_dp) && fixed_mode) {
                if (mode->hdisplay > fixed_mode->hdisplay)
@@ -256,6 +281,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 static void
 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                                              struct intel_dp *intel_dp);
+static void
+intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
 
 static void pps_lock(struct intel_dp *intel_dp)
 {
@@ -463,13 +490,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
                               enum pipe pipe)
 {
-       return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
+       return I915_READ(PP_STATUS(pipe)) & PP_ON;
 }
 
 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
                                enum pipe pipe)
 {
-       return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+       return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 }
 
 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
@@ -486,7 +513,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
        enum pipe pipe;
 
        for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
-               u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+               u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
                        PANEL_PORT_SELECT_MASK;
 
                if (port_sel != PANEL_PORT_SELECT_VLV(port))
@@ -583,30 +610,21 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
                                    struct intel_dp *intel_dp,
                                    struct pps_registers *regs)
 {
+       int pps_idx = 0;
+
        memset(regs, 0, sizeof(*regs));
 
-       if (IS_BROXTON(dev_priv)) {
-               int idx = bxt_power_sequencer_idx(intel_dp);
-
-               regs->pp_ctrl = BXT_PP_CONTROL(idx);
-               regs->pp_stat = BXT_PP_STATUS(idx);
-               regs->pp_on = BXT_PP_ON_DELAYS(idx);
-               regs->pp_off = BXT_PP_OFF_DELAYS(idx);
-       } else if (HAS_PCH_SPLIT(dev_priv)) {
-               regs->pp_ctrl = PCH_PP_CONTROL;
-               regs->pp_stat = PCH_PP_STATUS;
-               regs->pp_on = PCH_PP_ON_DELAYS;
-               regs->pp_off = PCH_PP_OFF_DELAYS;
-               regs->pp_div = PCH_PP_DIVISOR;
-       } else {
-               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+       if (IS_BROXTON(dev_priv))
+               pps_idx = bxt_power_sequencer_idx(intel_dp);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               pps_idx = vlv_power_sequencer_pipe(intel_dp);
 
-               regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
-               regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
-               regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
-               regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
-               regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
-       }
+       regs->pp_ctrl = PP_CONTROL(pps_idx);
+       regs->pp_stat = PP_STATUS(pps_idx);
+       regs->pp_on = PP_ON_DELAYS(pps_idx);
+       regs->pp_off = PP_OFF_DELAYS(pps_idx);
+       if (!IS_BROXTON(dev_priv))
+               regs->pp_div = PP_DIVISOR(pps_idx);
 }
 
 static i915_reg_t
@@ -651,8 +669,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
                i915_reg_t pp_ctrl_reg, pp_div_reg;
                u32 pp_div;
 
-               pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
-               pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
+               pp_ctrl_reg = PP_CONTROL(pipe);
+               pp_div_reg  = PP_DIVISOR(pipe);
                pp_div = I915_READ(pp_div_reg);
                pp_div &= PP_REFERENCE_DIVIDER_MASK;
 
@@ -1041,10 +1059,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                if (WARN_ON(txsize > 20))
                        return -E2BIG;
 
+               WARN_ON(!msg->buffer != !msg->size);
+
                if (msg->buffer)
                        memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
-               else
-                       WARN_ON(msg->size);
 
                ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
                if (ret > 0) {
@@ -1250,7 +1268,7 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
 }
 
 static void
-intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
+intel_dp_aux_init(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->port;
@@ -1426,6 +1444,44 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
+static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+{
+       uint8_t rev;
+       int len;
+
+       if ((drm_debug & DRM_UT_KMS) == 0)
+               return;
+
+       if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+             DP_DWN_STRM_PORT_PRESENT))
+               return;
+
+       len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
+       if (len < 0)
+               return;
+
+       DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+}
+
+static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+{
+       uint8_t rev[2];
+       int len;
+
+       if ((drm_debug & DRM_UT_KMS) == 0)
+               return;
+
+       if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+             DP_DWN_STRM_PORT_PRESENT))
+               return;
+
+       len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
+       if (len < 0)
+               return;
+
+       DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+}
+
 static int rate_to_index(int find, const int *rates)
 {
        int i = 0;
@@ -1447,7 +1503,7 @@ intel_dp_max_link_rate(struct intel_dp *intel_dp)
        if (WARN_ON(len <= 0))
                return 162000;
 
-       return rates[rate_to_index(0, rates) - 1];
+       return rates[len - 1];
 }
 
 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
@@ -1468,9 +1524,24 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
        }
 }
 
+static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
+                               struct intel_crtc_state *pipe_config)
+{
+       int bpp, bpc;
+
+       bpp = pipe_config->pipe_bpp;
+       bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
+
+       if (bpc > 0)
+               bpp = min(bpp, 3*bpc);
+
+       return bpp;
+}
+
 bool
 intel_dp_compute_config(struct intel_encoder *encoder,
-                       struct intel_crtc_state *pipe_config)
+                       struct intel_crtc_state *pipe_config,
+                       struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1533,7 +1604,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
-       bpp = pipe_config->pipe_bpp;
+       bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
        if (is_edp(intel_dp)) {
 
                /* Get bpp from vbt only for panels that dont have bpp in edid */
@@ -1647,22 +1718,28 @@ found:
 }
 
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-                             const struct intel_crtc_state *pipe_config)
+                             int link_rate, uint8_t lane_count,
+                             bool link_mst)
 {
-       intel_dp->link_rate = pipe_config->port_clock;
-       intel_dp->lane_count = pipe_config->lane_count;
+       intel_dp->link_rate = link_rate;
+       intel_dp->lane_count = lane_count;
+       intel_dp->link_mst = link_mst;
 }
 
-static void intel_dp_prepare(struct intel_encoder *encoder)
+static void intel_dp_prepare(struct intel_encoder *encoder,
+                            struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
-       intel_dp_set_link_params(intel_dp, crtc->config);
+       intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
+                                pipe_config->lane_count,
+                                intel_crtc_has_type(pipe_config,
+                                                    INTEL_OUTPUT_DP_MST));
 
        /*
         * There are four kinds of DP registers:
@@ -1688,7 +1765,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
 
        /* Handle DP bits in common between all three register formats */
        intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
-       intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
+       intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
 
        /* Split out the IBX/CPU vs CPT settings */
 
@@ -1716,7 +1793,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
                I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
        } else {
                if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
-                   !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
+                   !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
                        intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1835,7 +1912,8 @@ static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
        lockdep_assert_held(&dev_priv->pps_mutex);
 
        control = I915_READ(_pp_ctrl_reg(intel_dp));
-       if (!IS_BROXTON(dev)) {
+       if (WARN_ON(!HAS_DDI(dev_priv) &&
+                   (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
                control &= ~PANEL_UNLOCK_MASK;
                control |= PANEL_UNLOCK_REGS;
        }
@@ -1956,7 +2034,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
        I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
 
-       if ((pp & POWER_TARGET_ON) == 0)
+       if ((pp & PANEL_POWER_ON) == 0)
                intel_dp->panel_power_off_time = ktime_get_boottime();
 
        power_domain = intel_display_port_aux_power_domain(intel_encoder);
@@ -2043,7 +2121,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
                POSTING_READ(pp_ctrl_reg);
        }
 
-       pp |= POWER_TARGET_ON;
+       pp |= PANEL_POWER_ON;
        if (!IS_GEN5(dev))
                pp |= PANEL_POWER_RESET;
 
@@ -2095,7 +2173,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
         * panels get very unhappy and cease to work. */
-       pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+       pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
                EDP_BLC_ENABLE);
 
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2254,10 +2332,10 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
 
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
+                               struct intel_crtc_state *pipe_config)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
        assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -2265,11 +2343,11 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
        assert_edp_pll_disabled(dev_priv);
 
        DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
-                     crtc->config->port_clock);
+                     pipe_config->port_clock);
 
        intel_dp->DP &= ~DP_PLL_FREQ_MASK;
 
-       if (crtc->config->port_clock == 162000)
+       if (pipe_config->port_clock == 162000)
                intel_dp->DP |= DP_PLL_FREQ_162MHZ;
        else
                intel_dp->DP |= DP_PLL_FREQ_270MHZ;
@@ -2478,16 +2556,17 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        }
 }
 
-static void intel_disable_dp(struct intel_encoder *encoder)
+static void intel_disable_dp(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       struct drm_device *dev = encoder->base.dev;
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (crtc->config->has_audio)
+       if (old_crtc_state->has_audio)
                intel_audio_codec_disable(encoder);
 
-       if (HAS_PSR(dev) && !HAS_DDI(dev))
+       if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
                intel_psr_disable(intel_dp);
 
        /* Make sure the panel is off before trying to change the mode. But also
@@ -2498,11 +2577,13 @@ static void intel_disable_dp(struct intel_encoder *encoder)
        intel_edp_panel_off(intel_dp);
 
        /* disable the port before the pipe on g4x */
-       if (INTEL_INFO(dev)->gen < 5)
+       if (INTEL_GEN(dev_priv) < 5)
                intel_dp_link_down(intel_dp);
 }
 
-static void ilk_post_disable_dp(struct intel_encoder *encoder)
+static void ilk_post_disable_dp(struct intel_encoder *encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
@@ -2514,14 +2595,18 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder)
                ironlake_edp_pll_off(intel_dp);
 }
 
-static void vlv_post_disable_dp(struct intel_encoder *encoder)
+static void vlv_post_disable_dp(struct intel_encoder *encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
        intel_dp_link_down(intel_dp);
 }
 
-static void chv_post_disable_dp(struct intel_encoder *encoder)
+static void chv_post_disable_dp(struct intel_encoder *encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
@@ -2547,6 +2632,10 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
 
+       if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+               DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
+                             dp_train_pat & DP_TRAINING_PATTERN_MASK);
+
        if (HAS_DDI(dev)) {
                uint32_t temp = I915_READ(DP_TP_CTL(port));
 
@@ -2588,7 +2677,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                        *DP |= DP_LINK_TRAIN_PAT_2_CPT;
                        break;
                case DP_TRAINING_PATTERN_3:
-                       DRM_ERROR("DP training pattern 3 not supported\n");
+                       DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
                        *DP |= DP_LINK_TRAIN_PAT_2_CPT;
                        break;
                }
@@ -2613,7 +2702,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                        if (IS_CHERRYVIEW(dev)) {
                                *DP |= DP_LINK_TRAIN_PAT_3_CHV;
                        } else {
-                               DRM_ERROR("DP training pattern 3 not supported\n");
+                               DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
                                *DP |= DP_LINK_TRAIN_PAT_2;
                        }
                        break;
@@ -2621,19 +2710,15 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
        }
 }
 
-static void intel_dp_enable_port(struct intel_dp *intel_dp)
+static void intel_dp_enable_port(struct intel_dp *intel_dp,
+                                struct intel_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc =
-               to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
 
        /* enable with pattern 1 (as per spec) */
-       _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
-                                DP_TRAINING_PATTERN_1);
 
-       I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-       POSTING_READ(intel_dp->output_reg);
+       intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
 
        /*
         * Magic for VLV/CHV. We _must_ first set up the register
@@ -2642,14 +2727,15 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
         * fail when the power sequencer is freshly used for this port.
         */
        intel_dp->DP |= DP_PORT_EN;
-       if (crtc->config->has_audio)
+       if (old_crtc_state->has_audio)
                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
        POSTING_READ(intel_dp->output_reg);
 }
 
-static void intel_enable_dp(struct intel_encoder *encoder)
+static void intel_enable_dp(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
@@ -2666,7 +2752,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
                vlv_init_panel_power_sequencer(intel_dp);
 
-       intel_dp_enable_port(intel_dp);
+       intel_dp_enable_port(intel_dp, pipe_config);
 
        edp_panel_vdd_on(intel_dp);
        edp_panel_on(intel_dp);
@@ -2678,7 +2764,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
                unsigned int lane_mask = 0x0;
 
                if (IS_CHERRYVIEW(dev))
-                       lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
+                       lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
 
                vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
                                    lane_mask);
@@ -2688,22 +2774,26 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        intel_dp_start_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
 
-       if (crtc->config->has_audio) {
+       if (pipe_config->has_audio) {
                DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
                                 pipe_name(pipe));
                intel_audio_codec_enable(encoder);
        }
 }
 
-static void g4x_enable_dp(struct intel_encoder *encoder)
+static void g4x_enable_dp(struct intel_encoder *encoder,
+                         struct intel_crtc_state *pipe_config,
+                         struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-       intel_enable_dp(encoder);
+       intel_enable_dp(encoder, pipe_config);
        intel_edp_backlight_on(intel_dp);
 }
 
-static void vlv_enable_dp(struct intel_encoder *encoder)
+static void vlv_enable_dp(struct intel_encoder *encoder,
+                         struct intel_crtc_state *pipe_config,
+                         struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
@@ -2711,16 +2801,18 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
        intel_psr_enable(intel_dp);
 }
 
-static void g4x_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       intel_dp_prepare(encoder);
+       intel_dp_prepare(encoder, pipe_config);
 
        /* Only ilk+ has port A */
        if (port == PORT_A)
-               ironlake_edp_pll_on(intel_dp);
+               ironlake_edp_pll_on(intel_dp, pipe_config);
 }
 
 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -2728,7 +2820,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
        enum pipe pipe = intel_dp->pps_pipe;
-       i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+       i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
 
        edp_panel_vdd_off_sync(intel_dp);
 
@@ -2826,38 +2918,48 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 }
 
-static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        vlv_phy_pre_encoder_enable(encoder);
 
-       intel_enable_dp(encoder);
+       intel_enable_dp(encoder, pipe_config);
 }
 
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
-       intel_dp_prepare(encoder);
+       intel_dp_prepare(encoder, pipe_config);
 
        vlv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_pre_enable_dp(struct intel_encoder *encoder)
+static void chv_pre_enable_dp(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        chv_phy_pre_encoder_enable(encoder);
 
-       intel_enable_dp(encoder);
+       intel_enable_dp(encoder, pipe_config);
 
        /* Second common lane will stay alive on its own now */
        chv_phy_release_cl2_override(encoder);
 }
 
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
-       intel_dp_prepare(encoder);
+       intel_dp_prepare(encoder, pipe_config);
 
        chv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
+static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        chv_phy_post_pll_disable(encoder);
 }
@@ -3395,84 +3497,67 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 }
 
 static bool
-intel_dp_get_dpcd(struct intel_dp *intel_dp)
+intel_dp_read_dpcd(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
        if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
                             sizeof(intel_dp->dpcd)) < 0)
                return false; /* aux transfer failed */
 
        DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
 
-       if (intel_dp->dpcd[DP_DPCD_REV] == 0)
-               return false; /* DPCD not present */
+       return intel_dp->dpcd[DP_DPCD_REV] != 0;
+}
 
-       if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
-                            &intel_dp->sink_count, 1) < 0)
-               return false;
+static bool
+intel_edp_init_dpcd(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
 
-       /*
-        * Sink count can change between short pulse hpd hence
-        * a member variable in intel_dp will track any changes
-        * between short pulse interrupts.
-        */
-       intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+       /* this function is meant to be called only once */
+       WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
 
-       /*
-        * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
-        * a dongle is present but no display. Unless we require to know
-        * if a dongle is present or not, we don't need to update
-        * downstream port information. So, an early return here saves
-        * time from performing other operations which are not required.
-        */
-       if (!is_edp(intel_dp) && !intel_dp->sink_count)
+       if (!intel_dp_read_dpcd(intel_dp))
                return false;
 
-       /* Check if the panel supports PSR */
-       memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
-       if (is_edp(intel_dp)) {
-               drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
-                                intel_dp->psr_dpcd,
-                                sizeof(intel_dp->psr_dpcd));
-               if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
-                       dev_priv->psr.sink_support = true;
-                       DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
-               }
-
-               if (INTEL_INFO(dev)->gen >= 9 &&
-                       (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
-                       uint8_t frame_sync_cap;
-
-                       dev_priv->psr.sink_support = true;
-                       drm_dp_dpcd_read(&intel_dp->aux,
-                                        DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
-                                        &frame_sync_cap, 1);
-                       dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
-                       /* PSR2 needs frame sync as well */
-                       dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
-                       DRM_DEBUG_KMS("PSR2 %s on sink",
-                               dev_priv->psr.psr2_support ? "supported" : "not supported");
-               }
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+               dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+                       DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
 
-               /* Read the eDP Display control capabilities registers */
-               memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
-               if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
-                               (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
-                                               intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
-                                                               sizeof(intel_dp->edp_dpcd)))
-                       DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
-                                       intel_dp->edp_dpcd);
-       }
-
-       DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
-                     yesno(intel_dp_source_supports_hbr2(intel_dp)),
-                     yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
+       /* Check if the panel supports PSR */
+       drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
+                        intel_dp->psr_dpcd,
+                        sizeof(intel_dp->psr_dpcd));
+       if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+               dev_priv->psr.sink_support = true;
+               DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+       }
+
+       if (INTEL_GEN(dev_priv) >= 9 &&
+           (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+               uint8_t frame_sync_cap;
+
+               dev_priv->psr.sink_support = true;
+               drm_dp_dpcd_read(&intel_dp->aux,
+                                DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+                                &frame_sync_cap, 1);
+               dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
+               /* PSR2 needs frame sync as well */
+               dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+               DRM_DEBUG_KMS("PSR2 %s on sink",
+                             dev_priv->psr.psr2_support ? "supported" : "not supported");
+       }
+
+       /* Read the eDP Display control capabilities registers */
+       if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+           drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+                            intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) ==
+                            sizeof(intel_dp->edp_dpcd)))
+               DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+                             intel_dp->edp_dpcd);
 
        /* Intermediate frequency support */
-       if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
+       if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
                __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
                int i;
 
@@ -3491,7 +3576,36 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                intel_dp->num_sink_rates = i;
        }
 
-       intel_dp_print_rates(intel_dp);
+       return true;
+}
+
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+       if (!intel_dp_read_dpcd(intel_dp))
+               return false;
+
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
+                            &intel_dp->sink_count, 1) < 0)
+               return false;
+
+       /*
+        * Sink count can change between short pulse hpd hence
+        * a member variable in intel_dp will track any changes
+        * between short pulse interrupts.
+        */
+       intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+
+       /*
+        * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+        * a dongle is present but no display. Unless we require to know
+        * if a dongle is present or not, we don't need to update
+        * downstream port information. So, an early return here saves
+        * time from performing other operations which are not required.
+        */
+       if (!is_edp(intel_dp) && !intel_dp->sink_count)
+               return false;
 
        if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
              DP_DWN_STRM_PORT_PRESENT))
@@ -3526,7 +3640,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
 }
 
 static bool
-intel_dp_probe_mst(struct intel_dp *intel_dp)
+intel_dp_can_mst(struct intel_dp *intel_dp)
 {
        u8 buf[1];
 
@@ -3539,18 +3653,30 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
                return false;
 
-       if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
-               if (buf[0] & DP_MST_CAP) {
-                       DRM_DEBUG_KMS("Sink is MST capable\n");
-                       intel_dp->is_mst = true;
-               } else {
-                       DRM_DEBUG_KMS("Sink is not MST capable\n");
-                       intel_dp->is_mst = false;
-               }
-       }
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
+               return false;
+
+       return buf[0] & DP_MST_CAP;
+}
+
+static void
+intel_dp_configure_mst(struct intel_dp *intel_dp)
+{
+       if (!i915.enable_dp_mst)
+               return;
 
-       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
-       return intel_dp->is_mst;
+       if (!intel_dp->can_mst)
+               return;
+
+       intel_dp->is_mst = intel_dp_can_mst(intel_dp);
+
+       if (intel_dp->is_mst)
+               DRM_DEBUG_KMS("Sink is MST capable\n");
+       else
+               DRM_DEBUG_KMS("Sink is not MST capable\n");
+
+       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+                                       intel_dp->is_mst);
 }
 
 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
@@ -3909,7 +4035,7 @@ static bool
 intel_dp_short_pulse(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       u8 sink_irq_vector;
+       u8 sink_irq_vector = 0;
        u8 old_sink_count = intel_dp->sink_count;
        bool ret;
 
@@ -3936,7 +4062,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
 
        /* Try to read the source of the interrupt */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-           intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+           intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
+           sink_irq_vector != 0) {
                /* Clear interrupt source */
                drm_dp_dpcd_writeb(&intel_dp->aux,
                                   DP_DEVICE_SERVICE_IRQ_VECTOR,
@@ -3980,6 +4107,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
                connector_status_connected : connector_status_disconnected;
        }
 
+       if (intel_dp_can_mst(intel_dp))
+               return connector_status_connected;
+
        /* If no HPD, poke DDC gently */
        if (drm_probe_ddc(&intel_dp->aux.ddc))
                return connector_status_connected;
@@ -4148,7 +4278,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
  *
  * Return %true if @port is connected, %false otherwise.
  */
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
                                         struct intel_digital_port *port)
 {
        if (HAS_PCH_IBX(dev_priv))
@@ -4217,8 +4347,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
        struct drm_device *dev = connector->dev;
        enum drm_connector_status status;
        enum intel_display_power_domain power_domain;
-       bool ret;
-       u8 sink_irq_vector;
+       u8 sink_irq_vector = 0;
 
        power_domain = intel_display_port_aux_power_domain(intel_encoder);
        intel_display_power_get(to_i915(dev), power_domain);
@@ -4252,10 +4381,20 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
        if (intel_encoder->type != INTEL_OUTPUT_EDP)
                intel_encoder->type = INTEL_OUTPUT_DP;
 
+       DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
+                     yesno(intel_dp_source_supports_hbr2(intel_dp)),
+                     yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
+
+       intel_dp_print_rates(intel_dp);
+
        intel_dp_probe_oui(intel_dp);
 
-       ret = intel_dp_probe_mst(intel_dp);
-       if (ret) {
+       intel_dp_print_hw_revision(intel_dp);
+       intel_dp_print_sw_revision(intel_dp);
+
+       intel_dp_configure_mst(intel_dp);
+
+       if (intel_dp->is_mst) {
                /*
                 * If we are in MST mode then this connector
                 * won't appear connected or have anything
@@ -4290,7 +4429,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
        /* Try to read the source of the interrupt */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-           intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+           intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
+           sink_irq_vector != 0) {
                /* Clear interrupt source */
                drm_dp_dpcd_writeb(&intel_dp->aux,
                                   DP_DEVICE_SERVICE_IRQ_VECTOR,
@@ -4630,13 +4770,8 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
 
        pps_lock(intel_dp);
 
-       /*
-        * Read out the current power sequencer assignment,
-        * in case the BIOS did something with it.
-        */
-       if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
-               vlv_initial_power_sequencer_setup(intel_dp);
-
+       /* Reinit the power sequencer, in case BIOS did something with it. */
+       intel_dp_pps_init(encoder->dev, intel_dp);
        intel_edp_panel_vdd_sanitize(intel_dp);
 
        pps_unlock(intel_dp);
@@ -4984,9 +5119,21 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                      I915_READ(regs.pp_div));
 }
 
+static void intel_dp_pps_init(struct drm_device *dev,
+                             struct intel_dp *intel_dp)
+{
+       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+               vlv_initial_power_sequencer_setup(intel_dp);
+       } else {
+               intel_dp_init_panel_power_sequencer(dev, intel_dp);
+               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+       }
+}
+
 /**
  * intel_dp_set_drrs_state - program registers for RR switch to take effect
- * @dev: DRM device
+ * @dev_priv: i915 device
+ * @crtc_state: a pointer to the active intel_crtc_state
  * @refresh_rate: RR to be programmed
  *
  * This function gets called when refresh rate (RR) has to be changed from
@@ -4996,14 +5143,14 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  *
  * The caller of this function needs to take a lock on dev_priv->drrs.
  */
-static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
+                                   struct intel_crtc_state *crtc_state,
+                                   int refresh_rate)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        struct intel_digital_port *dig_port = NULL;
        struct intel_dp *intel_dp = dev_priv->drrs.dp;
-       struct intel_crtc_state *config = NULL;
-       struct intel_crtc *intel_crtc = NULL;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
        if (refresh_rate <= 0) {
@@ -5030,8 +5177,6 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
                return;
        }
 
-       config = intel_crtc->config;
-
        if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
                DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
                return;
@@ -5047,12 +5192,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
                return;
        }
 
-       if (!intel_crtc->active) {
+       if (!crtc_state->base.active) {
                DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
                return;
        }
 
-       if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+       if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
                switch (index) {
                case DRRS_HIGH_RR:
                        intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5064,18 +5209,18 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
                default:
                        DRM_ERROR("Unsupported refreshrate type\n");
                }
-       } else if (INTEL_INFO(dev)->gen > 6) {
-               i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
+       } else if (INTEL_GEN(dev_priv) > 6) {
+               i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
                u32 val;
 
                val = I915_READ(reg);
                if (index > DRRS_HIGH_RR) {
-                       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+                       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                                val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
                        else
                                val |= PIPECONF_EDP_RR_MODE_SWITCH;
                } else {
-                       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+                       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                                val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
                        else
                                val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
@@ -5091,18 +5236,17 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 /**
  * intel_edp_drrs_enable - init drrs struct if supported
  * @intel_dp: DP struct
+ * @crtc_state: A pointer to the active crtc state.
  *
  * Initializes frontbuffer_bits and drrs.dp
  */
-void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+                          struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_crtc *crtc = dig_port->base.base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       if (!intel_crtc->config->has_drrs) {
+       if (!crtc_state->has_drrs) {
                DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
                return;
        }
@@ -5124,17 +5268,16 @@ unlock:
 /**
  * intel_edp_drrs_disable - Disable DRRS
  * @intel_dp: DP struct
+ * @old_crtc_state: Pointer to old crtc_state.
  *
  */
-void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+                           struct intel_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_crtc *crtc = dig_port->base.base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       if (!intel_crtc->config->has_drrs)
+       if (!old_crtc_state->has_drrs)
                return;
 
        mutex_lock(&dev_priv->drrs.mutex);
@@ -5144,9 +5287,8 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
        }
 
        if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(&dev_priv->drm,
-                                       intel_dp->attached_connector->panel.
-                                       fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(dev_priv, old_crtc_state,
+                       intel_dp->attached_connector->panel.fixed_mode->vrefresh);
 
        dev_priv->drrs.dp = NULL;
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5175,10 +5317,12 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
        if (dev_priv->drrs.busy_frontbuffer_bits)
                goto unlock;
 
-       if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
-               intel_dp_set_drrs_state(&dev_priv->drm,
-                                       intel_dp->attached_connector->panel.
-                                       downclock_mode->vrefresh);
+       if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
+               struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+
+               intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+                       intel_dp->attached_connector->panel.downclock_mode->vrefresh);
+       }
 
 unlock:
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5186,7 +5330,7 @@ unlock:
 
 /**
  * intel_edp_drrs_invalidate - Disable Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called everytime rendering on the given planes start.
@@ -5194,10 +5338,9 @@ unlock:
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
-void intel_edp_drrs_invalidate(struct drm_device *dev,
-               unsigned frontbuffer_bits)
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+                              unsigned int frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -5220,16 +5363,15 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
 
        /* invalidate means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(&dev_priv->drm,
-                                       dev_priv->drrs.dp->attached_connector->panel.
-                                       fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+                       dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
        mutex_unlock(&dev_priv->drrs.mutex);
 }
 
 /**
  * intel_edp_drrs_flush - Restart Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called every time rendering on the given planes has
@@ -5239,10 +5381,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
-void intel_edp_drrs_flush(struct drm_device *dev,
-               unsigned frontbuffer_bits)
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+                         unsigned int frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -5265,9 +5406,8 @@ void intel_edp_drrs_flush(struct drm_device *dev,
 
        /* flush means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(&dev_priv->drm,
-                                       dev_priv->drrs.dp->attached_connector->panel.
-                                       fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+                               dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
        /*
         * flush also means no more activity hence schedule downclock, if all
@@ -5400,27 +5540,15 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        pps_lock(intel_dp);
 
        intel_dp_init_panel_power_timestamps(intel_dp);
-
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-               vlv_initial_power_sequencer_setup(intel_dp);
-       } else {
-               intel_dp_init_panel_power_sequencer(dev, intel_dp);
-               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
-       }
-
+       intel_dp_pps_init(dev, intel_dp);
        intel_edp_panel_vdd_sanitize(intel_dp);
 
        pps_unlock(intel_dp);
 
        /* Cache DPCD and EDID for edp. */
-       has_dpcd = intel_dp_get_dpcd(intel_dp);
+       has_dpcd = intel_edp_init_dpcd(intel_dp);
 
-       if (has_dpcd) {
-               if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
-                       dev_priv->no_aux_handshake =
-                               intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
-                               DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
-       } else {
+       if (!has_dpcd) {
                /* if this fails, presume the device is a ghost */
                DRM_INFO("failed to retrieve link info, disabling eDP\n");
                goto out_vdd_off;
@@ -5576,7 +5704,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
-       intel_dp_aux_init(intel_dp, intel_connector);
+       intel_dp_aux_init(intel_dp);
 
        INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
                          edp_panel_vdd_work);
index 60fb39cd220b4a01f5b9469a5de29ee496abe61f..c438b02184cb06a0902ad3dd9ba4ae43622f0e2d 100644 (file)
 
 #include "intel_drv.h"
 
+static void
+intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+
+       DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
+                     link_status[0], link_status[1], link_status[2],
+                     link_status[3], link_status[4], link_status[5]);
+}
+
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp,
                       const uint8_t link_status[DP_LINK_STATUS_SIZE])
@@ -103,13 +112,24 @@ intel_dp_update_link_train(struct intel_dp *intel_dp)
        return ret == intel_dp->lane_count;
 }
 
+static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
+{
+       int lane;
+
+       for (lane = 0; lane < intel_dp->lane_count; lane++)
+               if ((intel_dp->train_set[lane] &
+                    DP_TRAIN_MAX_SWING_REACHED) == 0)
+                       return false;
+
+       return true;
+}
+
 /* Enable corresponding port and start training pattern 1 */
-static void
+static bool
 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 {
-       int i;
        uint8_t voltage;
-       int voltage_tries, loop_tries;
+       int voltage_tries, max_vswing_tries;
        uint8_t link_config[2];
        uint8_t link_bw, rate_select;
 
@@ -125,6 +145,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
        if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
                link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
        drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+
        if (intel_dp->num_sink_rates)
                drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
                                  &rate_select, 1);
@@ -140,60 +161,54 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                                       DP_TRAINING_PATTERN_1 |
                                       DP_LINK_SCRAMBLING_DISABLE)) {
                DRM_ERROR("failed to enable link training\n");
-               return;
+               return false;
        }
 
-       voltage = 0xff;
-       voltage_tries = 0;
-       loop_tries = 0;
+       voltage_tries = 1;
+       max_vswing_tries = 0;
        for (;;) {
                uint8_t link_status[DP_LINK_STATUS_SIZE];
 
                drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+
                if (!intel_dp_get_link_status(intel_dp, link_status)) {
                        DRM_ERROR("failed to get link status\n");
-                       break;
+                       return false;
                }
 
                if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        DRM_DEBUG_KMS("clock recovery OK\n");
-                       break;
+                       return true;
                }
 
-               /* Check to see if we've tried the max voltage */
-               for (i = 0; i < intel_dp->lane_count; i++)
-                       if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
-                               break;
-               if (i == intel_dp->lane_count) {
-                       ++loop_tries;
-                       if (loop_tries == 5) {
-                               DRM_ERROR("too many full retries, give up\n");
-                               break;
-                       }
-                       intel_dp_reset_link_train(intel_dp,
-                                                 DP_TRAINING_PATTERN_1 |
-                                                 DP_LINK_SCRAMBLING_DISABLE);
-                       voltage_tries = 0;
-                       continue;
+               if (voltage_tries == 5) {
+                       DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+                       return false;
+               }
+
+               if (max_vswing_tries == 1) {
+                       DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+                       return false;
                }
 
-               /* Check to see if we've tried the same voltage 5 times */
-               if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-                       ++voltage_tries;
-                       if (voltage_tries == 5) {
-                               DRM_ERROR("too many voltage retries, give up\n");
-                               break;
-                       }
-               } else
-                       voltage_tries = 0;
                voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
                /* Update training set as requested by target */
                intel_get_adjust_train(intel_dp, link_status);
                if (!intel_dp_update_link_train(intel_dp)) {
                        DRM_ERROR("failed to update link training\n");
-                       break;
+                       return false;
                }
+
+               if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
+                   voltage)
+                       ++voltage_tries;
+               else
+                       voltage_tries = 1;
+
+               if (intel_dp_link_max_vswing_reached(intel_dp))
+                       ++max_vswing_tries;
+
        }
 }
 
@@ -229,12 +244,12 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
        return training_pattern;
 }
 
-static void
+static bool
 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 {
-       bool channel_eq = false;
-       int tries, cr_tries;
+       int tries;
        u32 training_pattern;
+       uint8_t link_status[DP_LINK_STATUS_SIZE];
 
        training_pattern = intel_dp_training_pattern(intel_dp);
 
@@ -243,19 +258,11 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
                                     training_pattern |
                                     DP_LINK_SCRAMBLING_DISABLE)) {
                DRM_ERROR("failed to start channel equalization\n");
-               return;
+               return false;
        }
 
-       tries = 0;
-       cr_tries = 0;
-       channel_eq = false;
-       for (;;) {
-               uint8_t link_status[DP_LINK_STATUS_SIZE];
-
-               if (cr_tries > 5) {
-                       DRM_ERROR("failed to train DP, aborting\n");
-                       break;
-               }
+       intel_dp->channel_eq_status = false;
+       for (tries = 0; tries < 5; tries++) {
 
                drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
                if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -266,44 +273,38 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
                /* Make sure clock is still ok */
                if (!drm_dp_clock_recovery_ok(link_status,
                                              intel_dp->lane_count)) {
-                       intel_dp_link_training_clock_recovery(intel_dp);
-                       intel_dp_set_link_train(intel_dp,
-                                               training_pattern |
-                                               DP_LINK_SCRAMBLING_DISABLE);
-                       cr_tries++;
-                       continue;
+                       intel_dp_dump_link_status(link_status);
+                       DRM_DEBUG_KMS("Clock recovery check failed, cannot "
+                                     "continue channel equalization\n");
+                       break;
                }
 
                if (drm_dp_channel_eq_ok(link_status,
                                         intel_dp->lane_count)) {
-                       channel_eq = true;
+                       intel_dp->channel_eq_status = true;
+                       DRM_DEBUG_KMS("Channel EQ done. DP Training "
+                                     "successful\n");
                        break;
                }
 
-               /* Try 5 times, then try clock recovery if that fails */
-               if (tries > 5) {
-                       intel_dp_link_training_clock_recovery(intel_dp);
-                       intel_dp_set_link_train(intel_dp,
-                                               training_pattern |
-                                               DP_LINK_SCRAMBLING_DISABLE);
-                       tries = 0;
-                       cr_tries++;
-                       continue;
-               }
-
                /* Update training set as requested by target */
                intel_get_adjust_train(intel_dp, link_status);
                if (!intel_dp_update_link_train(intel_dp)) {
                        DRM_ERROR("failed to update link training\n");
                        break;
                }
-               ++tries;
+       }
+
+       /* Try 5 times, else fail and try at lower BW */
+       if (tries == 5) {
+               intel_dp_dump_link_status(link_status);
+               DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
        }
 
        intel_dp_set_idle_link_train(intel_dp);
 
-       if (channel_eq)
-               DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+       return intel_dp->channel_eq_status;
+
 }
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
index 68a005d729e90b218cc3ac4eaefcb094388370b5..54a9d7610d8ff184bbe9800b223fa43c9e4a0d99 100644 (file)
 #include <drm/drm_edid.h>
 
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
-                                       struct intel_crtc_state *pipe_config)
+                                       struct intel_crtc_state *pipe_config,
+                                       struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_atomic_state *state;
-       int bpp, i;
+       int bpp;
        int lane_count, slots;
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       struct drm_connector *drm_connector;
-       struct intel_connector *connector, *found = NULL;
-       struct drm_connector_state *connector_state;
        int mst_pbn;
 
        pipe_config->dp_encoder_is_mst = true;
@@ -54,7 +52,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
         */
        lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
 
-
        pipe_config->lane_count = lane_count;
 
        pipe_config->pipe_bpp = 24;
@@ -62,20 +59,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
        state = pipe_config->base.state;
 
-       for_each_connector_in_state(state, drm_connector, connector_state, i) {
-               connector = to_intel_connector(drm_connector);
-
-               if (connector_state->best_encoder == &encoder->base) {
-                       found = connector;
-                       break;
-               }
-       }
-
-       if (!found) {
-               DRM_ERROR("can't find connector\n");
-               return false;
-       }
-
        mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 
        pipe_config->pbn = mst_pbn;
@@ -92,16 +75,20 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
 }
 
-static void intel_mst_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_disable_dp(struct intel_encoder *encoder,
+                                struct intel_crtc_state *old_crtc_state,
+                                struct drm_connector_state *old_conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_connector *connector =
+               to_intel_connector(old_conn_state->connector);
        int ret;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-       drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
+       drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
        if (ret) {
@@ -109,11 +96,15 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
        }
 }
 
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+                                     struct intel_crtc_state *old_crtc_state,
+                                     struct drm_connector_state *old_conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_connector *connector =
+               to_intel_connector(old_conn_state->connector);
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
@@ -122,59 +113,51 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
        /* and this can also fail */
        drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
-       drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
+       drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
 
        intel_dp->active_mst_links--;
 
        intel_mst->connector = NULL;
        if (intel_dp->active_mst_links == 0) {
-               intel_dig_port->base.post_disable(&intel_dig_port->base);
+               intel_dig_port->base.post_disable(&intel_dig_port->base,
+                                                 NULL, NULL);
+
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
        }
 }
 
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_dig_port->port;
+       struct intel_connector *connector =
+               to_intel_connector(conn_state->connector);
        int ret;
        uint32_t temp;
-       struct intel_connector *found = NULL, *connector;
        int slots;
-       struct drm_crtc *crtc = encoder->base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       for_each_intel_connector(dev, connector) {
-               if (connector->base.state->best_encoder == &encoder->base) {
-                       found = connector;
-                       break;
-               }
-       }
-
-       if (!found) {
-               DRM_ERROR("can't find connector\n");
-               return;
-       }
 
        /* MST encoders are bound to a crtc, not to a connector,
         * force the mapping here for get_hw_state.
         */
-       found->encoder = encoder;
+       connector->encoder = encoder;
+       intel_mst->connector = connector;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-       intel_mst->connector = found;
-
        if (intel_dp->active_mst_links == 0) {
-               intel_prepare_ddi_buffer(&intel_dig_port->base);
-
-               intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
+               intel_ddi_clk_select(&intel_dig_port->base,
+                                    pipe_config->shared_dpll);
 
-               intel_dp_set_link_params(intel_dp, intel_crtc->config);
+               intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
+               intel_dp_set_link_params(intel_dp,
+                                        pipe_config->port_clock,
+                                        pipe_config->lane_count,
+                                        true);
 
                intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
 
@@ -185,8 +168,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        }
 
        ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
-                                      intel_mst->connector->port,
-                                      intel_crtc->config->pbn, &slots);
+                                      connector->port,
+                                      pipe_config->pbn, &slots);
        if (ret == false) {
                DRM_ERROR("failed to allocate vcpi\n");
                return;
@@ -200,13 +183,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 }
 
-static void intel_mst_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_enable_dp(struct intel_encoder *encoder,
+                               struct intel_crtc_state *pipe_config,
+                               struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_dig_port->port;
        int ret;
 
@@ -239,9 +223,8 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        u32 temp, flags = 0;
 
index 5c1f2d235ffa5a66baf029f116d8ba8b65a5d8ac..c26d18a574b6c54b7cfdfbd4d24b7293d5d87ed0 100644 (file)
 
 #include "intel_drv.h"
 
+struct intel_shared_dpll *
+skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
+{
+       struct intel_shared_dpll *pll = NULL;
+       struct intel_dpll_hw_state dpll_hw_state;
+       enum intel_dpll_id i;
+       bool found = false;
+
+       if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+               return pll;
+
+       for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) {
+               pll = &dev_priv->shared_dplls[i];
+
+               /* Only want to check enabled timings first */
+               if (pll->config.crtc_mask == 0)
+                       continue;
+
+               if (memcmp(&dpll_hw_state, &pll->config.hw_state,
+                          sizeof(pll->config.hw_state)) == 0) {
+                       found = true;
+                       break;
+               }
+       }
+
+       /* Ok no matching timings, maybe there's a free one? */
+       for (i = DPLL_ID_SKL_DPLL1;
+            ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
+               pll = &dev_priv->shared_dplls[i];
+               if (pll->config.crtc_mask == 0) {
+                       pll->config.hw_state = dpll_hw_state;
+                       break;
+               }
+       }
+
+       return pll;
+}
+
 struct intel_shared_dpll *
 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
                            enum intel_dpll_id id)
@@ -452,26 +490,6 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
        return val & SPLL_PLL_ENABLE;
 }
 
-static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
-{
-       switch (pll->id) {
-       case DPLL_ID_WRPLL1:
-               return PORT_CLK_SEL_WRPLL1;
-       case DPLL_ID_WRPLL2:
-               return PORT_CLK_SEL_WRPLL2;
-       case DPLL_ID_SPLL:
-               return PORT_CLK_SEL_SPLL;
-       case DPLL_ID_LCPLL_810:
-               return PORT_CLK_SEL_LCPLL_810;
-       case DPLL_ID_LCPLL_1350:
-               return PORT_CLK_SEL_LCPLL_1350;
-       case DPLL_ID_LCPLL_2700:
-               return PORT_CLK_SEL_LCPLL_2700;
-       default:
-               return PORT_CLK_SEL_NONE;
-       }
-}
-
 #define LC_FREQ 2700
 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 
@@ -687,11 +705,65 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
        *r2_out = best.r2;
 }
 
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
+                                                      struct intel_crtc *crtc,
+                                                      struct intel_crtc_state *crtc_state)
+{
+       struct intel_shared_dpll *pll;
+       uint32_t val;
+       unsigned int p, n2, r2;
+
+       hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+       val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+             WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+             WRPLL_DIVIDER_POST(p);
+
+       crtc_state->dpll_hw_state.wrpll = val;
+
+       pll = intel_find_shared_dpll(crtc, crtc_state,
+                                    DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+       if (!pll)
+               return NULL;
+
+       return pll;
+}
+
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+                                             int clock)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id pll_id;
+
+       switch (clock / 2) {
+       case 81000:
+               pll_id = DPLL_ID_LCPLL_810;
+               break;
+       case 135000:
+               pll_id = DPLL_ID_LCPLL_1350;
+               break;
+       case 270000:
+               pll_id = DPLL_ID_LCPLL_2700;
+               break;
+       default:
+               DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+               return NULL;
+       }
+
+       pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+       if (!pll)
+               return NULL;
+
+       return pll;
+}
+
 static struct intel_shared_dpll *
 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
             struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_shared_dpll *pll;
        int clock = crtc_state->port_clock;
 
@@ -699,41 +771,12 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
               sizeof(crtc_state->dpll_hw_state));
 
        if (encoder->type == INTEL_OUTPUT_HDMI) {
-               uint32_t val;
-               unsigned p, n2, r2;
-
-               hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
-               val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
-                     WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-                     WRPLL_DIVIDER_POST(p);
-
-               crtc_state->dpll_hw_state.wrpll = val;
-
-               pll = intel_find_shared_dpll(crtc, crtc_state,
-                                            DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+               pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
 
        } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_DP_MST ||
                   encoder->type == INTEL_OUTPUT_EDP) {
-               enum intel_dpll_id pll_id;
-
-               switch (clock / 2) {
-               case 81000:
-                       pll_id = DPLL_ID_LCPLL_810;
-                       break;
-               case 135000:
-                       pll_id = DPLL_ID_LCPLL_1350;
-                       break;
-               case 270000:
-                       pll_id = DPLL_ID_LCPLL_2700;
-                       break;
-               default:
-                       DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
-                       return NULL;
-               }
-
-               pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+               pll = hsw_ddi_dp_get_dpll(encoder, clock);
 
        } else if (encoder->type == INTEL_OUTPUT_ANALOG) {
                if (WARN_ON(crtc_state->port_clock / 2 != 135000))
@@ -751,14 +794,11 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        if (!pll)
                return NULL;
 
-       crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll);
-
        intel_reference_shared_dpll(pll, crtc_state);
 
        return pll;
 }
 
-
 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
        .enable = hsw_ddi_wrpll_enable,
        .disable = hsw_ddi_wrpll_disable,
@@ -1194,75 +1234,110 @@ skip_remaining_dividers:
        return true;
 }
 
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
+                                     struct intel_crtc_state *crtc_state,
+                                     int clock)
 {
-       struct intel_shared_dpll *pll;
        uint32_t ctrl1, cfgcr1, cfgcr2;
-       int clock = crtc_state->port_clock;
+       struct skl_wrpll_params wrpll_params = { 0, };
 
        /*
         * See comment in intel_dpll_hw_state to understand why we always use 0
         * as the DPLL id in this function.
         */
-
        ctrl1 = DPLL_CTRL1_OVERRIDE(0);
 
-       if (encoder->type == INTEL_OUTPUT_HDMI) {
-               struct skl_wrpll_params wrpll_params = { 0, };
+       ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-               ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+       if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+               return false;
 
-               if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
-                       return NULL;
+       cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+               DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+               wrpll_params.dco_integer;
+
+       cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+               DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+               DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+               DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+               wrpll_params.central_freq;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+       crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+       return true;
+}
 
-               cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
-                        DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
-                        wrpll_params.dco_integer;
 
-               cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
-                        DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
-                        DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
-                        DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
-                        wrpll_params.central_freq;
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+                                 struct intel_dpll_hw_state *dpll_hw_state)
+{
+       uint32_t ctrl1;
+
+       /*
+        * See comment in intel_dpll_hw_state to understand why we always use 0
+        * as the DPLL id in this function.
+        */
+       ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+       switch (clock / 2) {
+       case 81000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+               break;
+       case 135000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+               break;
+       case 270000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+               break;
+               /* eDP 1.4 rates */
+       case 162000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+               break;
+       case 108000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+               break;
+       case 216000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+               break;
+       }
+
+       dpll_hw_state->ctrl1 = ctrl1;
+       return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+            struct intel_encoder *encoder)
+{
+       struct intel_shared_dpll *pll;
+       int clock = crtc_state->port_clock;
+       bool bret;
+       struct intel_dpll_hw_state dpll_hw_state;
+
+       memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+       if (encoder->type == INTEL_OUTPUT_HDMI) {
+               bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+               if (!bret) {
+                       DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+                       return NULL;
+               }
        } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_DP_MST ||
                   encoder->type == INTEL_OUTPUT_EDP) {
-               switch (crtc_state->port_clock / 2) {
-               case 81000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
-                       break;
-               case 135000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
-                       break;
-               case 270000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
-                       break;
-               /* eDP 1.4 rates */
-               case 162000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
-                       break;
-               case 108000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
-                       break;
-               case 216000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
-                       break;
+               bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+               if (!bret) {
+                       DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+                       return NULL;
                }
-
-               cfgcr1 = cfgcr2 = 0;
+               crtc_state->dpll_hw_state = dpll_hw_state;
        } else {
                return NULL;
        }
 
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.ctrl1 = ctrl1;
-       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
-       crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
        if (encoder->type == INTEL_OUTPUT_EDP)
                pll = intel_find_shared_dpll(crtc, crtc_state,
                                             DPLL_ID_SKL_DPLL0,
@@ -1274,8 +1349,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        if (!pll)
                return NULL;
 
-       crtc_state->ddi_pll_sel = pll->id;
-
        intel_reference_shared_dpll(pll, crtc_state);
 
        return pll;
@@ -1484,6 +1557,8 @@ struct bxt_clk_div {
        uint32_t m2_frac;
        bool m2_frac_en;
        uint32_t n;
+
+       int vco;
 };
 
 /* pre-calculated values for DP linkrates */
@@ -1497,57 +1572,60 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
        {432000, 3, 1, 32, 1677722, 1, 1}
 };
 
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
+static bool
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
+                         struct intel_crtc_state *crtc_state, int clock,
+                         struct bxt_clk_div *clk_div)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll;
-       enum intel_dpll_id i;
-       struct intel_digital_port *intel_dig_port;
-       struct bxt_clk_div clk_div = {0};
-       int vco = 0;
-       uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-       uint32_t lanestagger;
-       int clock = crtc_state->port_clock;
+       struct dpll best_clock;
 
-       if (encoder->type == INTEL_OUTPUT_HDMI) {
-               struct dpll best_clock;
+       /* Calculate HDMI div */
+       /*
+        * FIXME: tie the following calculation into
+        * i9xx_crtc_compute_clock
+        */
+       if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+               DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+                                clock, pipe_name(intel_crtc->pipe));
+               return false;
+       }
 
-               /* Calculate HDMI div */
-               /*
-                * FIXME: tie the following calculation into
-                * i9xx_crtc_compute_clock
-                */
-               if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
-                       DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
-                                        clock, pipe_name(crtc->pipe));
-                       return NULL;
-               }
+       clk_div->p1 = best_clock.p1;
+       clk_div->p2 = best_clock.p2;
+       WARN_ON(best_clock.m1 != 2);
+       clk_div->n = best_clock.n;
+       clk_div->m2_int = best_clock.m2 >> 22;
+       clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
+       clk_div->m2_frac_en = clk_div->m2_frac != 0;
 
-               clk_div.p1 = best_clock.p1;
-               clk_div.p2 = best_clock.p2;
-               WARN_ON(best_clock.m1 != 2);
-               clk_div.n = best_clock.n;
-               clk_div.m2_int = best_clock.m2 >> 22;
-               clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
-               clk_div.m2_frac_en = clk_div.m2_frac != 0;
+       clk_div->vco = best_clock.vco;
 
-               vco = best_clock.vco;
-       } else if (encoder->type == INTEL_OUTPUT_DP ||
-                  encoder->type == INTEL_OUTPUT_EDP) {
-               int i;
+       return true;
+}
 
-               clk_div = bxt_dp_clk_val[0];
-               for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
-                       if (bxt_dp_clk_val[i].clock == clock) {
-                               clk_div = bxt_dp_clk_val[i];
-                               break;
-                       }
+static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+{
+       int i;
+
+       *clk_div = bxt_dp_clk_val[0];
+       for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+               if (bxt_dp_clk_val[i].clock == clock) {
+                       *clk_div = bxt_dp_clk_val[i];
+                       break;
                }
-               vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
        }
 
+       clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
+}
+
+static bool bxt_ddi_set_dpll_hw_state(int clock,
+                         struct bxt_clk_div *clk_div,
+                         struct intel_dpll_hw_state *dpll_hw_state)
+{
+       int vco = clk_div->vco;
+       uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+       uint32_t lanestagger;
+
        if (vco >= 6200000 && vco <= 6700000) {
                prop_coef = 4;
                int_coef = 9;
@@ -1566,12 +1644,9 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                targ_cnt = 9;
        } else {
                DRM_ERROR("Invalid VCO\n");
-               return NULL;
+               return false;
        }
 
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
        if (clock > 270000)
                lanestagger = 0x18;
        else if (clock > 135000)
@@ -1583,35 +1658,75 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        else
                lanestagger = 0x02;
 
-       crtc_state->dpll_hw_state.ebb0 =
-               PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
-       crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
-       crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
-       crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+       dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+       dpll_hw_state->pll0 = clk_div->m2_int;
+       dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
+       dpll_hw_state->pll2 = clk_div->m2_frac;
 
-       if (clk_div.m2_frac_en)
-               crtc_state->dpll_hw_state.pll3 =
-                       PORT_PLL_M2_FRAC_ENABLE;
+       if (clk_div->m2_frac_en)
+               dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
 
-       crtc_state->dpll_hw_state.pll6 =
-               prop_coef | PORT_PLL_INT_COEFF(int_coef);
-       crtc_state->dpll_hw_state.pll6 |=
-               PORT_PLL_GAIN_CTL(gain_ctl);
+       dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
+       dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
 
-       crtc_state->dpll_hw_state.pll8 = targ_cnt;
+       dpll_hw_state->pll8 = targ_cnt;
 
-       crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+       dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
-       crtc_state->dpll_hw_state.pll10 =
+       dpll_hw_state->pll10 =
                PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
                | PORT_PLL_DCO_AMP_OVR_EN_H;
 
-       crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+       dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
-       crtc_state->dpll_hw_state.pcsdw12 =
-               LANESTAGGER_STRAP_OVRD | lanestagger;
+       dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
 
-       intel_dig_port = enc_to_dig_port(&encoder->base);
+       return true;
+}
+
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+                         struct intel_dpll_hw_state *dpll_hw_state)
+{
+       struct bxt_clk_div clk_div = {0};
+
+       bxt_ddi_dp_pll_dividers(clock, &clk_div);
+
+       return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+}
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc *crtc,
+               struct intel_crtc_state *crtc_state,
+               struct intel_encoder *encoder)
+{
+       struct bxt_clk_div clk_div = {0};
+       struct intel_dpll_hw_state dpll_hw_state = {0};
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_digital_port *intel_dig_port;
+       struct intel_shared_dpll *pll;
+       int i, clock = crtc_state->port_clock;
+
+       if (encoder->type == INTEL_OUTPUT_HDMI
+           && !bxt_ddi_hdmi_pll_dividers(crtc, crtc_state,
+                                         clock, &clk_div))
+               return NULL;
+
+       if ((encoder->type == INTEL_OUTPUT_DP ||
+            encoder->type == INTEL_OUTPUT_EDP) &&
+           !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+               return NULL;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state = dpll_hw_state;
+
+       if (encoder->type == INTEL_OUTPUT_DP_MST) {
+               struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+
+               intel_dig_port = intel_mst->primary;
+       } else
+               intel_dig_port = enc_to_dig_port(&encoder->base);
 
        /* 1:1 mapping between ports and PLLs */
        i = (enum intel_dpll_id) intel_dig_port->port;
@@ -1622,9 +1737,6 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 
        intel_reference_shared_dpll(pll, crtc_state);
 
-       /* shared DPLL id 0 is DPLL A */
-       crtc_state->ddi_pll_sel = pll->id;
-
        return pll;
 }
 
index 89c5ada1a3157003c165dae6f9af67908e3152cf..f4385353bc11a60370dab58f44a8eb47bbed3aa2 100644 (file)
@@ -160,5 +160,20 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc);
 void intel_shared_dpll_commit(struct drm_atomic_state *state);
 void intel_shared_dpll_init(struct drm_device *dev);
 
+/* BXT dpll related functions */
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+                         struct intel_dpll_hw_state *dpll_hw_state);
+
+
+/* SKL dpll related functions */
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+                                 struct intel_dpll_hw_state *dpll_hw_state);
+struct intel_shared_dpll *skl_find_link_pll(struct drm_i915_private *dev_priv,
+                                           int clock);
+
+
+/* HSW dpll related functions */
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+                                             int clock);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
index cc937a19b1ba09486bf9ce04688ecba5a5c123ab..8fd16adf069b2f894d0eb1d65d662e39754fbaf4 100644 (file)
  */
 #define _wait_for(COND, US, W) ({ \
        unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1;   \
-       int ret__ = 0;                                                  \
-       while (!(COND)) {                                               \
-               if (time_after(jiffies, timeout__)) {                   \
-                       if (!(COND))                                    \
-                               ret__ = -ETIMEDOUT;                     \
+       int ret__;                                                      \
+       for (;;) {                                                      \
+               bool expired__ = time_after(jiffies, timeout__);        \
+               if (COND) {                                             \
+                       ret__ = 0;                                      \
+                       break;                                          \
+               }                                                       \
+               if (expired__) {                                        \
+                       ret__ = -ETIMEDOUT;                             \
                        break;                                          \
                }                                                       \
                if ((W) && drm_can_sleep()) {                           \
@@ -178,11 +182,22 @@ struct intel_framebuffer {
        struct drm_framebuffer base;
        struct drm_i915_gem_object *obj;
        struct intel_rotation_info rot_info;
+
+       /* for each plane in the normal GTT view */
+       struct {
+               unsigned int x, y;
+       } normal[2];
+       /* for each plane in the rotated GTT view */
+       struct {
+               unsigned int x, y;
+               unsigned int pitch; /* pixels */
+       } rotated[2];
 };
 
 struct intel_fbdev {
        struct drm_fb_helper helper;
        struct intel_framebuffer *fb;
+       struct i915_vma *vma;
        async_cookie_t cookie;
        int preferred_bpp;
 };
@@ -194,14 +209,26 @@ struct intel_encoder {
        unsigned int cloneable;
        void (*hot_plug)(struct intel_encoder *);
        bool (*compute_config)(struct intel_encoder *,
-                              struct intel_crtc_state *);
-       void (*pre_pll_enable)(struct intel_encoder *);
-       void (*pre_enable)(struct intel_encoder *);
-       void (*enable)(struct intel_encoder *);
-       void (*mode_set)(struct intel_encoder *intel_encoder);
-       void (*disable)(struct intel_encoder *);
-       void (*post_disable)(struct intel_encoder *);
-       void (*post_pll_disable)(struct intel_encoder *);
+                              struct intel_crtc_state *,
+                              struct drm_connector_state *);
+       void (*pre_pll_enable)(struct intel_encoder *,
+                              struct intel_crtc_state *,
+                              struct drm_connector_state *);
+       void (*pre_enable)(struct intel_encoder *,
+                          struct intel_crtc_state *,
+                          struct drm_connector_state *);
+       void (*enable)(struct intel_encoder *,
+                      struct intel_crtc_state *,
+                      struct drm_connector_state *);
+       void (*disable)(struct intel_encoder *,
+                       struct intel_crtc_state *,
+                       struct drm_connector_state *);
+       void (*post_disable)(struct intel_encoder *,
+                            struct intel_crtc_state *,
+                            struct drm_connector_state *);
+       void (*post_pll_disable)(struct intel_encoder *,
+                                struct intel_crtc_state *,
+                                struct drm_connector_state *);
        /* Read out the current hw state of this connector, returning true if
         * the encoder is active. If the encoder is enabled it also set the pipe
         * it is connected to in the pipe parameter. */
@@ -338,10 +365,16 @@ struct intel_atomic_state {
 
 struct intel_plane_state {
        struct drm_plane_state base;
-       struct drm_rect src;
-       struct drm_rect dst;
        struct drm_rect clip;
-       bool visible;
+
+       struct {
+               u32 offset;
+               int x, y;
+       } main;
+       struct {
+               u32 offset;
+               int x, y;
+       } aux;
 
        /*
         * scaler_id
@@ -561,12 +594,6 @@ struct intel_crtc_state {
        /* Selected dpll when shared or NULL. */
        struct intel_shared_dpll *shared_dpll;
 
-       /*
-        * - PORT_CLK_SEL for DDI ports on HSW/BDW.
-        * - enum skl_dpll on SKL
-        */
-       uint32_t ddi_pll_sel;
-
        /* Actual register state of the dpll, for shared dpll cross-checking. */
        struct intel_dpll_hw_state dpll_hw_state;
 
@@ -683,8 +710,8 @@ struct intel_crtc {
 
        struct intel_crtc_state *config;
 
-       /* reset counter value when the last flip was submitted */
-       unsigned int reset_counter;
+       /* global reset count when the last flip was submitted */
+       unsigned int reset_count;
 
        /* Access to these should be protected by dev_priv->irq_lock. */
        bool cpu_fifo_underrun_disabled;
@@ -852,8 +879,10 @@ struct intel_dp {
        int link_rate;
        uint8_t lane_count;
        uint8_t sink_count;
+       bool link_mst;
        bool has_audio;
        bool detect_done;
+       bool channel_eq_status;
        enum hdmi_force_audio force_audio;
        bool limited_color_range;
        bool color_range_auto;
@@ -1106,8 +1135,11 @@ void intel_crt_reset(struct drm_encoder *encoder);
 
 /* intel_ddi.c */
 void intel_ddi_clk_select(struct intel_encoder *encoder,
-                         const struct intel_crtc_state *pipe_config);
-void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
+                         struct intel_shared_dpll *pll);
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state);
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
 void hsw_fdi_link_train(struct drm_crtc *crtc);
 void intel_ddi_init(struct drm_device *dev, enum port port);
 enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
@@ -1122,7 +1154,6 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc,
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config);
 struct intel_encoder *
@@ -1133,22 +1164,12 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
                         struct intel_crtc_state *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
-
-/* intel_frontbuffer.c */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            enum fb_op_origin origin);
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
-                                   unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
-                                    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_device *dev,
-                           unsigned frontbuffer_bits);
+struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
+                                                 int clock);
 unsigned int intel_fb_align_height(struct drm_device *dev,
                                   unsigned int height,
                                   uint32_t pixel_format,
                                   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
-                       enum fb_op_origin origin);
 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
                              uint64_t fb_modifier, uint32_t pixel_format);
 
@@ -1164,14 +1185,22 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
 void intel_update_rawclk(struct drm_i915_private *dev_priv);
 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
                      const char *name, u32 reg, int ref_freq);
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
 extern const struct drm_plane_funcs intel_plane_funcs;
 void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_fb_xy_to_linear(int x, int y,
+                                  const struct intel_plane_state *state,
+                                  int plane);
+void intel_add_fb_offsets(int *x, int *y,
+                         const struct intel_plane_state *state, int plane);
 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
 bool intel_has_pending_fb_unpin(struct drm_device *dev);
 void intel_mark_busy(struct drm_i915_private *dev_priv);
 void intel_mark_idle(struct drm_i915_private *dev_priv);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
 int intel_display_suspend(struct drm_device *dev);
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 int intel_connector_init(struct intel_connector *);
 struct intel_connector *intel_connector_alloc(void);
@@ -1227,8 +1256,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 void intel_release_load_detect_pipe(struct drm_connector *connector,
                                    struct intel_load_detect_pipe *old,
                                    struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
-                              unsigned int rotation);
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
@@ -1238,9 +1267,9 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
 int intel_prepare_plane_fb(struct drm_plane *plane,
-                          const struct drm_plane_state *new_state);
+                          struct drm_plane_state *new_state);
 void intel_cleanup_plane_fb(struct drm_plane *plane,
-                           const struct drm_plane_state *old_state);
+                           struct drm_plane_state *old_state);
 int intel_plane_atomic_get_property(struct drm_plane *plane,
                                    const struct drm_plane_state *state,
                                    struct drm_property *property,
@@ -1258,7 +1287,7 @@ unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
 static inline bool
 intel_rotation_90_or_270(unsigned int rotation)
 {
-       return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
+       return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
 }
 
 void intel_create_rotation_property(struct drm_device *dev,
@@ -1290,9 +1319,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 u32 intel_compute_tile_offset(int *x, int *y,
-                             const struct drm_framebuffer *fb, int plane,
-                             unsigned int pitch,
-                             unsigned int rotation);
+                             const struct intel_plane_state *state, int plane);
 void intel_prepare_reset(struct drm_i915_private *dev_priv);
 void intel_finish_reset(struct drm_i915_private *dev_priv);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@@ -1335,13 +1362,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
-u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
-                          struct drm_i915_gem_object *obj,
-                          unsigned int plane);
+u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
 u32 skl_plane_ctl_rotation(unsigned int rotation);
+u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
+                    unsigned int rotation);
+int skl_check_plane_surface(struct intel_plane_state *plane_state);
 
 /* intel_csr.c */
 void intel_csr_ucode_init(struct drm_i915_private *);
@@ -1355,7 +1383,8 @@ bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                             struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-                             const struct intel_crtc_state *pipe_config);
+                             int link_rate, uint8_t lane_count,
+                             bool link_mst);
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
@@ -1364,7 +1393,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
-                            struct intel_crtc_state *pipe_config);
+                            struct intel_crtc_state *pipe_config,
+                            struct drm_connector_state *conn_state);
 bool intel_dp_is_edp(struct drm_device *dev, enum port port);
 enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
                                  bool long_hpd);
@@ -1382,13 +1412,14 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp);
-void intel_edp_drrs_invalidate(struct drm_device *dev,
-               unsigned frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
-                                        struct intel_digital_port *port);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+                          struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+                          struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+                              unsigned int frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+                         unsigned int frontbuffer_bits);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1488,7 +1519,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                               struct intel_connector *intel_connector);
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-                              struct intel_crtc_state *pipe_config);
+                              struct intel_crtc_state *pipe_config,
+                              struct drm_connector_state *conn_state);
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 
 
@@ -1561,13 +1593,13 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
 /* intel_psr.c */
 void intel_psr_enable(struct intel_dp *intel_dp);
 void intel_psr_disable(struct intel_dp *intel_dp);
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
                          unsigned frontbuffer_bits);
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
                     unsigned frontbuffer_bits,
                     enum fb_op_origin origin);
 void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
                                   unsigned frontbuffer_bits);
 
 /* intel_runtime_pm.c */
@@ -1667,13 +1699,6 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
        atomic_dec(&dev_priv->pm.wakeref_count);
 }
 
-/* TODO: convert users of these to rely instead on proper RPM refcounting */
-#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv)  \
-       disable_rpm_wakeref_asserts(dev_priv)
-
-#define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv)   \
-       enable_rpm_wakeref_asserts(dev_priv)
-
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
@@ -1699,11 +1724,11 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
 void intel_gpu_ips_teardown(void);
 void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
 void gen6_rps_busy(struct drm_i915_private *dev_priv);
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
@@ -1716,6 +1741,21 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
                          struct skl_ddb_allocation *ddb /* out */);
+bool skl_can_enable_sagv(struct drm_atomic_state *state);
+int skl_enable_sagv(struct drm_i915_private *dev_priv);
+int skl_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+                              const struct skl_ddb_allocation *new,
+                              enum pipe pipe);
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+                                const struct skl_ddb_allocation *old,
+                                const struct skl_ddb_allocation *new,
+                                enum pipe pipe);
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+                        const struct skl_wm_values *wm);
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+                       const struct skl_wm_values *wm,
+                       int plane);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 bool ilk_disable_lp_wm(struct drm_device *dev);
 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
index de8e9fb51595b984780984ba29408c8ed0efee6b..b2e3d3a334f7bf2a20047f2d144e4bb54afe1856 100644 (file)
@@ -312,7 +312,8 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
 }
 
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config)
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
@@ -533,14 +534,15 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
        intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+                             struct intel_crtc_state *pipe_config);
 
-static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+                                struct intel_crtc_state *pipe_config,
+                                struct drm_connector_state *conn_state)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        enum port port;
 
        DRM_DEBUG_KMS("\n");
@@ -550,9 +552,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
         * lock. It needs to be fully powered down to fix it.
         */
        intel_disable_dsi_pll(encoder);
-       intel_enable_dsi_pll(encoder, crtc->config);
+       intel_enable_dsi_pll(encoder, pipe_config);
 
-       intel_dsi_prepare(encoder);
+       intel_dsi_prepare(encoder, pipe_config);
 
        /* Panel Enable over CRC PMIC */
        if (intel_dsi->gpio_panel)
@@ -582,7 +584,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
        intel_dsi_enable(encoder);
 }
 
-static void intel_dsi_enable_nop(struct intel_encoder *encoder)
+static void intel_dsi_enable_nop(struct intel_encoder *encoder,
+                                struct intel_crtc_state *pipe_config,
+                                struct drm_connector_state *conn_state)
 {
        DRM_DEBUG_KMS("\n");
 
@@ -592,7 +596,9 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder)
         */
 }
 
-static void intel_dsi_pre_disable(struct intel_encoder *encoder)
+static void intel_dsi_pre_disable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
@@ -694,7 +700,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
        intel_disable_dsi_pll(encoder);
 }
 
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_post_disable(struct intel_encoder *encoder,
+                                  struct intel_crtc_state *pipe_config,
+                                  struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -819,6 +827,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
        u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
                                crtc_hblank_start_sw, crtc_hblank_end_sw;
 
+       /* FIXME: hw readout should not depend on SW state */
        intel_crtc = to_intel_crtc(encoder->base.crtc);
        adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
 
@@ -1104,14 +1113,15 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
        }
 }
 
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+                             struct intel_crtc_state *pipe_config)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-       const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        enum port port;
        unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
        u32 val, tmp;
@@ -1348,7 +1358,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
                intel_connector->panel.fitting_mode = val;
        }
 
-       crtc = intel_attached_encoder(connector)->base.crtc;
+       crtc = connector->state->crtc;
        if (crtc && crtc->state->enable) {
                /*
                 * If the CRTC is enabled, the display will be changed
index 47bdf9dad0d37c093a1b22099c4db0a5099be552..2e452c505e7eefd95f56ed4e9009d5d5e1f39c2e 100644 (file)
@@ -174,7 +174,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
        pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
-static void intel_disable_dvo(struct intel_encoder *encoder)
+static void intel_disable_dvo(struct intel_encoder *encoder,
+                             struct intel_crtc_state *old_crtc_state,
+                             struct drm_connector_state *old_conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -186,17 +188,18 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
        I915_READ(dvo_reg);
 }
 
-static void intel_enable_dvo(struct intel_encoder *encoder)
+static void intel_enable_dvo(struct intel_encoder *encoder,
+                            struct intel_crtc_state *pipe_config,
+                            struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
        u32 temp = I915_READ(dvo_reg);
 
        intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
-                                        &crtc->config->base.mode,
-                                        &crtc->config->base.adjusted_mode);
+                                        &pipe_config->base.mode,
+                                        &pipe_config->base.adjusted_mode);
 
        I915_WRITE(dvo_reg, temp | DVO_ENABLE);
        I915_READ(dvo_reg);
@@ -235,7 +238,8 @@ intel_dvo_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_dvo_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config)
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        const struct drm_display_mode *fixed_mode =
@@ -253,12 +257,13 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_dvo_pre_enable(struct intel_encoder *encoder)
+static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+                                struct intel_crtc_state *pipe_config,
+                                struct drm_connector_state *conn_state)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        int pipe = crtc->pipe;
        u32 dvo_val;
@@ -554,7 +559,6 @@ void intel_dvo_init(struct drm_device *dev)
                return;
        }
 
-       drm_encoder_cleanup(&intel_encoder->base);
        kfree(intel_dvo);
        kfree(intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
new file mode 100644 (file)
index 0000000..e405f10
--- /dev/null
@@ -0,0 +1,336 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+#include "intel_lrc.h"
+
+static const struct engine_info {
+       const char *name;
+       unsigned exec_id;
+       enum intel_engine_hw_id hw_id;
+       u32 mmio_base;
+       unsigned irq_shift;
+       int (*init_legacy)(struct intel_engine_cs *engine);
+       int (*init_execlists)(struct intel_engine_cs *engine);
+} intel_engines[] = {
+       [RCS] = {
+               .name = "render ring",
+               .exec_id = I915_EXEC_RENDER,
+               .hw_id = RCS_HW,
+               .mmio_base = RENDER_RING_BASE,
+               .irq_shift = GEN8_RCS_IRQ_SHIFT,
+               .init_execlists = logical_render_ring_init,
+               .init_legacy = intel_init_render_ring_buffer,
+       },
+       [BCS] = {
+               .name = "blitter ring",
+               .exec_id = I915_EXEC_BLT,
+               .hw_id = BCS_HW,
+               .mmio_base = BLT_RING_BASE,
+               .irq_shift = GEN8_BCS_IRQ_SHIFT,
+               .init_execlists = logical_xcs_ring_init,
+               .init_legacy = intel_init_blt_ring_buffer,
+       },
+       [VCS] = {
+               .name = "bsd ring",
+               .exec_id = I915_EXEC_BSD,
+               .hw_id = VCS_HW,
+               .mmio_base = GEN6_BSD_RING_BASE,
+               .irq_shift = GEN8_VCS1_IRQ_SHIFT,
+               .init_execlists = logical_xcs_ring_init,
+               .init_legacy = intel_init_bsd_ring_buffer,
+       },
+       [VCS2] = {
+               .name = "bsd2 ring",
+               .exec_id = I915_EXEC_BSD,
+               .hw_id = VCS2_HW,
+               .mmio_base = GEN8_BSD2_RING_BASE,
+               .irq_shift = GEN8_VCS2_IRQ_SHIFT,
+               .init_execlists = logical_xcs_ring_init,
+               .init_legacy = intel_init_bsd2_ring_buffer,
+       },
+       [VECS] = {
+               .name = "video enhancement ring",
+               .exec_id = I915_EXEC_VEBOX,
+               .hw_id = VECS_HW,
+               .mmio_base = VEBOX_RING_BASE,
+               .irq_shift = GEN8_VECS_IRQ_SHIFT,
+               .init_execlists = logical_xcs_ring_init,
+               .init_legacy = intel_init_vebox_ring_buffer,
+       },
+};
+
+static struct intel_engine_cs *
+intel_engine_setup(struct drm_i915_private *dev_priv,
+                  enum intel_engine_id id)
+{
+       const struct engine_info *info = &intel_engines[id];
+       struct intel_engine_cs *engine = &dev_priv->engine[id];
+
+       engine->id = id;
+       engine->i915 = dev_priv;
+       engine->name = info->name;
+       engine->exec_id = info->exec_id;
+       engine->hw_id = engine->guc_id = info->hw_id;
+       engine->mmio_base = info->mmio_base;
+       engine->irq_shift = info->irq_shift;
+
+       return engine;
+}
+
+/**
+ * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * @dev: DRM device.
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_engines_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+       unsigned int mask = 0;
+       int (*init)(struct intel_engine_cs *engine);
+       unsigned int i;
+       int ret;
+
+       WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
+       WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+               GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
+
+       for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
+               if (!HAS_ENGINE(dev_priv, i))
+                       continue;
+
+               if (i915.enable_execlists)
+                       init = intel_engines[i].init_execlists;
+               else
+                       init = intel_engines[i].init_legacy;
+
+               if (!init)
+                       continue;
+
+               ret = init(intel_engine_setup(dev_priv, i));
+               if (ret)
+                       goto cleanup;
+
+               mask |= ENGINE_MASK(i);
+       }
+
+       /*
+        * Catch failures to update intel_engines table when the new engines
+        * are added to the driver by a warning and disabling the forgotten
+        * engines.
+        */
+       if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+               device_info->ring_mask = mask;
+
+       device_info->num_rings = hweight32(mask);
+
+       return 0;
+
+cleanup:
+       for (i = 0; i < I915_NUM_ENGINES; i++) {
+               if (i915.enable_execlists)
+                       intel_logical_ring_cleanup(&dev_priv->engine[i]);
+               else
+                       intel_engine_cleanup(&dev_priv->engine[i]);
+       }
+
+       return ret;
+}
+
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       /* Our semaphore implementation is strictly monotonic (i.e. we proceed
+        * so long as the semaphore value in the register/page is greater
+        * than the sync value), so whenever we reset the seqno,
+        * so long as we reset the tracking semaphore value to 0, it will
+        * always be before the next request's seqno. If we don't reset
+        * the semaphore value, then when the seqno moves backwards all
+        * future waits will complete instantly (causing rendering corruption).
+        */
+       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
+               I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+               I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+               if (HAS_VEBOX(dev_priv))
+                       I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+       }
+       if (dev_priv->semaphore) {
+               struct page *page = i915_vma_first_page(dev_priv->semaphore);
+               void *semaphores;
+
+               /* Semaphores are in noncoherent memory, flush to be safe */
+               semaphores = kmap(page);
+               memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+                      0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+               drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+                                      I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+               kunmap(page);
+       }
+       memset(engine->semaphore.sync_seqno, 0,
+              sizeof(engine->semaphore.sync_seqno));
+
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
+       engine->last_submitted_seqno = seqno;
+
+       engine->hangcheck.seqno = seqno;
+
+       /* After manually advancing the seqno, fake the interrupt in case
+        * there are any waiters for that seqno.
+        */
+       intel_engine_wakeup(engine);
+}
+
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+{
+       memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+       clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+       if (intel_engine_has_waiter(engine))
+               i915_queue_hangcheck(engine->i915);
+}
+
+static void intel_engine_init_requests(struct intel_engine_cs *engine)
+{
+       init_request_active(&engine->last_request, NULL);
+       INIT_LIST_HEAD(&engine->request_list);
+}
+
+/**
+ * intel_engines_setup_common - setup engine state not requiring hw access
+ * @engine: Engine to setup.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do not require hardware access.
+ *
+ * Typically done early in the submission mode specific engine setup stage.
+ */
+void intel_engine_setup_common(struct intel_engine_cs *engine)
+{
+       INIT_LIST_HEAD(&engine->execlist_queue);
+       spin_lock_init(&engine->execlist_lock);
+
+       engine->fence_context = fence_context_alloc(1);
+
+       intel_engine_init_requests(engine);
+       intel_engine_init_hangcheck(engine);
+       i915_gem_batch_pool_init(engine, &engine->batch_pool);
+
+       intel_engine_init_cmd_parser(engine);
+}
+
+int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int ret;
+
+       WARN_ON(engine->scratch);
+
+       obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+       if (!obj)
+               obj = i915_gem_object_create(&engine->i915->drm, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate scratch page\n");
+               return PTR_ERR(obj);
+       }
+
+       vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unref;
+       }
+
+       ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
+       if (ret)
+               goto err_unref;
+
+       engine->scratch = vma;
+       DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+                        engine->name, i915_ggtt_offset(vma));
+       return 0;
+
+err_unref:
+       i915_gem_object_put(obj);
+       return ret;
+}
+
+static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+{
+       i915_vma_unpin_and_release(&engine->scratch);
+}
+
+/**
+ * intel_engines_init_common - initialize cengine state which might require hw access
+ * @engine: Engine to initialize.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do require hardware access.
+ *
+ * Typcally done at later stages of submission mode specific engine setup.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+int intel_engine_init_common(struct intel_engine_cs *engine)
+{
+       int ret;
+
+       ret = intel_engine_init_breadcrumbs(engine);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+void intel_engine_reset_irq(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (intel_engine_has_waiter(engine))
+               engine->irq_enable(engine);
+       else
+               engine->irq_disable(engine);
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+/**
+ * intel_engines_cleanup_common - cleans up the engine state created by
+ *                                the common initiailizers.
+ * @engine: Engine to cleanup.
+ *
+ * This cleans up everything created by the common helpers.
+ */
+void intel_engine_cleanup_common(struct intel_engine_cs *engine)
+{
+       intel_engine_cleanup_scratch(engine);
+
+       intel_engine_fini_breadcrumbs(engine);
+       intel_engine_cleanup_cmd_parser(engine);
+       i915_gem_batch_pool_fini(&engine->batch_pool);
+}
index 6a7ad3ed1463206c7b1b88e658e3103e7c58526e..faa67624e1ed734b80b0c0b9734b63c173d9389c 100644 (file)
@@ -190,9 +190,13 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
        else
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-       dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
 
-       I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+               I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+       } else {
+               I915_WRITE(DPFC_FENCE_YOFF, 0);
+       }
 
        /* enable it... */
        I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -244,21 +248,29 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
                break;
        }
-       dpfc_ctl |= DPFC_CTL_FENCE_EN;
-       if (IS_GEN5(dev_priv))
-               dpfc_ctl |= params->fb.fence_reg;
+
+       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN;
+               if (IS_GEN5(dev_priv))
+                       dpfc_ctl |= params->fb.fence_reg;
+               if (IS_GEN6(dev_priv)) {
+                       I915_WRITE(SNB_DPFC_CTL_SA,
+                                  SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                       I915_WRITE(DPFC_CPU_FENCE_OFFSET,
+                                  params->crtc.fence_y_offset);
+               }
+       } else {
+               if (IS_GEN6(dev_priv)) {
+                       I915_WRITE(SNB_DPFC_CTL_SA, 0);
+                       I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+               }
+       }
 
        I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
        I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-       if (IS_GEN6(dev_priv)) {
-               I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
-               I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
-       }
-
        intel_fbc_recompress(dev_priv);
 }
 
@@ -305,7 +317,15 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+               dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+               I915_WRITE(SNB_DPFC_CTL_SA,
+                          SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+               I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
+       } else {
+               I915_WRITE(SNB_DPFC_CTL_SA,0);
+               I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+       }
 
        if (dev_priv->fbc.false_color)
                dpfc_ctl |= FBC_CTL_FALSE_COLOR;
@@ -324,10 +344,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-       I915_WRITE(SNB_DPFC_CTL_SA,
-                  SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
-       I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
-
        intel_fbc_recompress(dev_priv);
 }
 
@@ -494,7 +510,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc,
        if (!no_fbc_on_multiple_pipes(dev_priv))
                return true;
 
-       if (plane_state->visible)
+       if (plane_state->base.visible)
                fbc->visible_pipes_mask |= (1 << pipe);
        else
                fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -709,6 +725,14 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        return effective_w <= max_w && effective_h <= max_h;
 }
 
+/* XXX replace me when we have VMA tracking for intel_plane_state */
+static int get_fence_id(struct drm_framebuffer *fb)
+{
+       struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
+
+       return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
+}
+
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                                         struct intel_crtc_state *crtc_state,
                                         struct intel_plane_state *plane_state)
@@ -725,9 +749,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                        ilk_pipe_pixel_rate(crtc_state);
 
        cache->plane.rotation = plane_state->base.rotation;
-       cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
-       cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
-       cache->plane.visible = plane_state->visible;
+       cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       cache->plane.visible = plane_state->base.visible;
 
        if (!cache->plane.visible)
                return;
@@ -737,11 +761,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        /* FIXME: We lack the proper locking here, so only run this on the
         * platforms that need. */
        if (IS_GEN(dev_priv, 5, 6))
-               cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
+               cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
        cache->fb.pixel_format = fb->pixel_format;
        cache->fb.stride = fb->pitches[0];
-       cache->fb.fence_reg = obj->fence_reg;
-       cache->fb.tiling_mode = obj->tiling_mode;
+       cache->fb.fence_reg = get_fence_id(fb);
+       cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
 }
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -768,6 +792,10 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 
        /* The use of a CPU fence is mandatory in order to detect writes
         * by the CPU to the scanout and trigger updates to the FBC.
+        *
+        * Note that is possible for a tiled surface to be unmappable (and
+        * so have no fence associated with it) due to aperture constaints
+        * at the time of pinning.
         */
        if (cache->fb.tiling_mode != I915_TILING_X ||
            cache->fb.fence_reg == I915_FENCE_REG_NONE) {
@@ -775,7 +803,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
        if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
-           cache->plane.rotation != BIT(DRM_ROTATE_0)) {
+           cache->plane.rotation != DRM_ROTATE_0) {
                fbc->no_fbc_reason = "rotation unsupported";
                return false;
        }
@@ -1050,7 +1078,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
                struct intel_plane_state *intel_plane_state =
                        to_intel_plane_state(plane_state);
 
-               if (!intel_plane_state->visible)
+               if (!intel_plane_state->base.visible)
                        continue;
 
                for_each_crtc_in_state(state, crtc, crtc_state, j) {
@@ -1075,6 +1103,8 @@ out:
 /**
  * intel_fbc_enable: tries to enable FBC on the CRTC
  * @crtc: the CRTC
+ * @crtc_state: corresponding &drm_crtc_state for @crtc
+ * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
  *
  * This function checks if the given CRTC was chosen for FBC, then enables it if
  * possible. Notice that it doesn't activate FBC. It is valid to call
@@ -1163,11 +1193,8 @@ void intel_fbc_disable(struct intel_crtc *crtc)
                return;
 
        mutex_lock(&fbc->lock);
-       if (fbc->crtc == crtc) {
-               WARN_ON(!fbc->enabled);
-               WARN_ON(fbc->active);
+       if (fbc->crtc == crtc)
                __intel_fbc_disable(dev_priv);
-       }
        mutex_unlock(&fbc->lock);
 
        cancel_work_sync(&fbc->work.work);
@@ -1212,7 +1239,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
 
        for_each_intel_crtc(&dev_priv->drm, crtc)
                if (intel_crtc_active(&crtc->base) &&
-                   to_intel_plane_state(crtc->base.primary->state)->visible)
+                   to_intel_plane_state(crtc->base.primary->state)->base.visible)
                        dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
 }
 
@@ -1230,12 +1257,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
        if (i915.enable_fbc >= 0)
                return !!i915.enable_fbc;
 
+       if (!HAS_FBC(dev_priv))
+               return 0;
+
        if (IS_BROADWELL(dev_priv))
                return 1;
 
        return 0;
 }
 
+static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
+       if (intel_iommu_gfx_mapped &&
+           (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
+               DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+               return true;
+       }
+#endif
+
+       return false;
+}
+
 /**
  * intel_fbc_init - Initialize FBC
  * @dev_priv: the i915 device
@@ -1253,6 +1297,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
        fbc->active = false;
        fbc->work.scheduled = false;
 
+       if (need_fbc_vtd_wa(dev_priv))
+               mkwrite_device_info(dev_priv)->has_fbc = false;
+
        i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
        DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
 
index 86b00c6db1a6d694ad1beb97fcb37fc2cb899873..b7098f98bb671d2c7e06c296466a482c91159ddd 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/tty.h>
 #include <linux/sysrq.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/vga_switcheroo.h>
 
@@ -42,6 +41,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -159,7 +159,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 
        fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
        if (IS_ERR(fb)) {
-               drm_gem_object_unreference(&obj->base);
+               i915_gem_object_put(obj);
                ret = PTR_ERR(fb);
                goto out;
        }
@@ -183,13 +183,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
        struct intel_framebuffer *intel_fb = ifbdev->fb;
        struct drm_device *dev = helper->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct fb_info *info;
        struct drm_framebuffer *fb;
        struct i915_vma *vma;
-       struct drm_i915_gem_object *obj;
        bool prealloc = false;
-       void *vaddr;
+       void __iomem *vaddr;
        int ret;
 
        if (intel_fb &&
@@ -215,17 +215,17 @@ static int intelfb_create(struct drm_fb_helper *helper,
                sizes->fb_height = intel_fb->base.height;
        }
 
-       obj = intel_fb->obj;
-
        mutex_lock(&dev->struct_mutex);
 
        /* Pin the GGTT vma for our access via info->screen_base.
         * This also validates that any existing fb inherited from the
         * BIOS is suitable for own access.
         */
-       ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
-       if (ret)
+       vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
                goto out_unlock;
+       }
 
        info = drm_fb_helper_alloc_fbi(helper);
        if (IS_ERR(info)) {
@@ -245,13 +245,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
        info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
        info->fbops = &intelfb_ops;
 
-       vma = i915_gem_obj_to_ggtt(obj);
-
        /* setup aperture base/size for vesafb takeover */
        info->apertures->ranges[0].base = dev->mode_config.fb_base;
        info->apertures->ranges[0].size = ggtt->mappable_end;
 
-       info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+       info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
        info->fix.smem_len = vma->node.size;
 
        vaddr = i915_vma_pin_iomap(vma);
@@ -273,23 +271,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
         * If the object is stolen however, it will be full of whatever
         * garbage was left in there.
         */
-       if (ifbdev->fb->obj->stolen && !prealloc)
+       if (intel_fb->obj->stolen && !prealloc)
                memset_io(info->screen_base, 0, info->screen_size);
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
-                     fb->width, fb->height,
-                     i915_gem_obj_ggtt_offset(obj), obj);
+       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
+                     fb->width, fb->height, i915_ggtt_offset(vma));
+       ifbdev->vma = vma;
 
        mutex_unlock(&dev->struct_mutex);
-       vga_switcheroo_client_fb_set(dev->pdev, info);
+       vga_switcheroo_client_fb_set(pdev, info);
        return 0;
 
 out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
 out_unpin:
-       intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+       intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -554,7 +552,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
        if (ifbdev->fb) {
                mutex_lock(&ifbdev->helper.dev->struct_mutex);
-               intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+               intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
                mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
                drm_framebuffer_remove(&ifbdev->fb->base);
@@ -768,7 +766,7 @@ void intel_fbdev_fini(struct drm_device *dev)
        if (!ifbdev)
                return;
 
-       flush_work(&dev_priv->fbdev_suspend_work);
+       cancel_work_sync(&dev_priv->fbdev_suspend_work);
        if (!current_is_async())
                intel_fbdev_sync(ifbdev);
 
@@ -782,7 +780,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
        struct intel_fbdev *ifbdev = dev_priv->fbdev;
        struct fb_info *info;
 
-       if (!ifbdev)
+       if (!ifbdev || !ifbdev->fb)
                return;
 
        info = ifbdev->helper.fbdev;
@@ -827,31 +825,28 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       if (dev_priv->fbdev)
-               drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+       if (ifbdev && ifbdev->fb)
+               drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
 void intel_fbdev_restore_mode(struct drm_device *dev)
 {
-       int ret;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_fbdev *ifbdev = dev_priv->fbdev;
-       struct drm_fb_helper *fb_helper;
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
        if (!ifbdev)
                return;
 
        intel_fbdev_sync(ifbdev);
+       if (!ifbdev->fb)
+               return;
 
-       fb_helper = &ifbdev->helper;
-
-       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
-       if (ret) {
+       if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
                DRM_DEBUG("failed to restore crtc mode\n");
        } else {
-               mutex_lock(&fb_helper->dev->struct_mutex);
+               mutex_lock(&dev->struct_mutex);
                intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
-               mutex_unlock(&fb_helper->dev->struct_mutex);
+               mutex_unlock(&dev->struct_mutex);
        }
 }
index ac85357010b4b652ec15811d9c7d4a3b52ad8ebd..966de4c7c7a2e7a4a68a282558e8d2f438535f0f 100644 (file)
 #include <drm/drmP.h>
 
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include "i915_drv.h"
 
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @origin: which operation caused the invalidation
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            enum fb_op_origin origin)
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                              enum fb_op_origin origin,
+                              unsigned int frontbuffer_bits)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       if (!obj->frontbuffer_bits)
-               return;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
        if (origin == ORIGIN_CS) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
-               dev_priv->fb_tracking.busy_bits
-                       |= obj->frontbuffer_bits;
-               dev_priv->fb_tracking.flip_bits
-                       &= ~obj->frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
+               spin_lock(&dev_priv->fb_tracking.lock);
+               dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
+               dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+               spin_unlock(&dev_priv->fb_tracking.lock);
        }
 
-       intel_psr_invalidate(dev, obj->frontbuffer_bits);
-       intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
-       intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
+       intel_psr_invalidate(dev_priv, frontbuffer_bits);
+       intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
+       intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -113,64 +96,45 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-static void intel_frontbuffer_flush(struct drm_device *dev,
+static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
                                    unsigned frontbuffer_bits,
                                    enum fb_op_origin origin)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
        /* Delay flushing when rings are still busy.*/
-       mutex_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&dev_priv->fb_tracking.lock);
        frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
+       spin_unlock(&dev_priv->fb_tracking.lock);
 
        if (!frontbuffer_bits)
                return;
 
-       intel_edp_drrs_flush(dev, frontbuffer_bits);
-       intel_psr_flush(dev, frontbuffer_bits, origin);
+       intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
+       intel_psr_flush(dev_priv, frontbuffer_bits, origin);
        intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- * @origin: which operation caused the flush
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                       bool retire, enum fb_op_origin origin)
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                         bool retire,
+                         enum fb_op_origin origin,
+                         unsigned int frontbuffer_bits)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       unsigned frontbuffer_bits;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       if (!obj->frontbuffer_bits)
-               return;
-
-       frontbuffer_bits = obj->frontbuffer_bits;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
        if (retire) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
+               spin_lock(&dev_priv->fb_tracking.lock);
                /* Filter out new bits since rendering started. */
                frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
                dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
+               spin_unlock(&dev_priv->fb_tracking.lock);
        }
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+       if (frontbuffer_bits)
+               intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. The actual
@@ -180,23 +144,21 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
                                    unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&dev_priv->fb_tracking.lock);
        dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
        /* Remove stale busy bits due to the old buffer. */
        dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
+       spin_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_psr_single_frame_update(dev, frontbuffer_bits);
+       intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
 }
 
 /**
  * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after the flip has been latched and will complete
@@ -204,23 +166,23 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
                                     unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&dev_priv->fb_tracking.lock);
        /* Mask any cancelled flips. */
        frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
        dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
+       spin_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+       if (frontbuffer_bits)
+               intel_frontbuffer_flush(dev_priv,
+                                       frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
  * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. This is for
@@ -229,15 +191,13 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip(struct drm_device *dev,
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
                            unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&dev_priv->fb_tracking.lock);
        /* Remove stale busy bits due to the old buffer. */
        dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
+       spin_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+       intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
new file mode 100644 (file)
index 0000000..76ceb53
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __INTEL_FRONTBUFFER_H__
+#define __INTEL_FRONTBUFFER_H__
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+                                   unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+                                    unsigned frontbuffer_bits);
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+                           unsigned frontbuffer_bits);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                              enum fb_op_origin origin,
+                              unsigned int frontbuffer_bits);
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                         bool retire,
+                         enum fb_op_origin origin,
+                         unsigned int frontbuffer_bits);
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                                          enum fb_op_origin origin)
+{
+       unsigned int frontbuffer_bits;
+
+       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+       if (!frontbuffer_bits)
+               return;
+
+       __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                                     bool retire,
+                                     enum fb_op_origin origin)
+{
+       unsigned int frontbuffer_bits;
+
+       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+       if (!frontbuffer_bits)
+               return;
+
+       __intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
+}
+
+#endif /* __INTEL_FRONTBUFFER_H__ */
index 3e3e743740c01a2651850836f4fee970ea294b42..b1ba869588114fb58729c218b91322e5a07624ea 100644 (file)
@@ -63,26 +63,27 @@ struct drm_i915_gem_request;
  *   retcode: errno from last guc_submit()
  */
 struct i915_guc_client {
-       struct drm_i915_gem_object *client_obj;
+       struct i915_vma *vma;
        void *client_base;              /* first page (only) of above   */
        struct i915_gem_context *owner;
        struct intel_guc *guc;
+
+       uint32_t engines;               /* bitmap of (host) engine ids  */
        uint32_t priority;
        uint32_t ctx_index;
-
        uint32_t proc_desc_offset;
+
        uint32_t doorbell_offset;
        uint32_t cookie;
        uint16_t doorbell_id;
-       uint16_t padding;               /* Maintain alignment           */
+       uint16_t padding[3];            /* Maintain alignment           */
 
+       spinlock_t wq_lock;
        uint32_t wq_offset;
        uint32_t wq_size;
        uint32_t wq_tail;
-       uint32_t unused;                /* Was 'wq_head'                */
-
+       uint32_t wq_rsvd;
        uint32_t no_wq_space;
-       uint32_t q_fail;                /* No longer used               */
        uint32_t b_fail;
        int retcode;
 
@@ -125,11 +126,10 @@ struct intel_guc_fw {
 struct intel_guc {
        struct intel_guc_fw guc_fw;
        uint32_t log_flags;
-       struct drm_i915_gem_object *log_obj;
-
-       struct drm_i915_gem_object *ads_obj;
+       struct i915_vma *log_vma;
 
-       struct drm_i915_gem_object *ctx_pool_obj;
+       struct i915_vma *ads_vma;
+       struct i915_vma *ctx_pool_vma;
        struct ida ctx_ids;
 
        struct i915_guc_client *execbuf_client;
@@ -159,8 +159,7 @@ extern int intel_guc_resume(struct drm_device *dev);
 /* i915_guc_submission.c */
 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
-int i915_guc_submit(struct drm_i915_gem_request *rq);
+int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
 
index 944786d7075b7c73631093c3240f091dda065c1d..e40db2d2ae994b81fe44041d13b63b9e5713acc8 100644 (file)
  *
  *     +-------------------------------+
  *     |        guc_css_header         |
+ *     |                               |
  *     | contains major/minor version  |
  *     +-------------------------------+
  *     |             uCode             |
  *
  * 1. Header, uCode and RSA are must-have components.
  * 2. All firmware components, if they present, are in the sequence illustrated
- * in the layout table above.
+ *    in the layout table above.
  * 3. Length info of each component can be found in header, in dwords.
  * 4. Modulus and exponent key are not required by driver. They may not appear
- * in fw. So driver will load a truncated firmware in this case.
+ *    in fw. So driver will load a truncated firmware in this case.
  */
 
 struct guc_css_header {
index 605c69658d2c1c55b2408b0bc36dd6c41d26d667..6fd39efb789475e1ec7cb50e594d9e6ddcc2e409 100644 (file)
  *
  */
 
-#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
+#define SKL_FW_MAJOR 6
+#define SKL_FW_MINOR 1
+
+#define BXT_FW_MAJOR 8
+#define BXT_FW_MINOR 7
+
+#define KBL_FW_MAJOR 9
+#define KBL_FW_MINOR 14
+
+#define GUC_FW_PATH(platform, major, minor) \
+       "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
+
+#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR)
 MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 
-#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
+#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR)
 MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
 
-#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
+#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
 MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
 
 /* User-friendly representation of an enum */
@@ -85,7 +97,7 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
        }
 };
 
-static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        int irqs;
@@ -102,7 +114,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
        I915_WRITE(GUC_WD_VECS_IER, 0);
 }
 
-static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        int irqs;
@@ -122,13 +134,28 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
        I915_WRITE(GUC_WD_VECS_IER, ~irqs);
 
        /*
-        * If GuC has routed PM interrupts to itself, don't keep it.
-        * and keep other interrupts those are unmasked by GuC.
-       */
+        * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
+        * (unmasked) PM interrupts to the GuC. All other bits of this
+        * register *disable* generation of a specific interrupt.
+        *
+        * 'pm_intr_keep' indicates bits that are NOT to be set when
+        * writing to the PM interrupt mask register, i.e. interrupts
+        * that must not be disabled.
+        *
+        * If the GuC is handling these interrupts, then we must not let
+        * the PM code disable ANY interrupt that the GuC is expecting.
+        * So for each ENABLED (0) bit in this register, we must SET the
+        * bit in pm_intr_keep so that it's left enabled for the GuC.
+        *
+        * OTOH the REDIRECT_TO_GUC bit is initially SET in pm_intr_keep
+        * (so interrupts go to the DISPLAY unit at first); but here we
+        * need to CLEAR that bit, which will result in the register bit
+        * being left SET!
+        */
        tmp = I915_READ(GEN6_PMINTRMSK);
-       if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
-               dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
-               dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+       if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
+               dev_priv->rps.pm_intr_keep |= ~tmp;
+               dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
        }
 }
 
@@ -140,17 +167,24 @@ static u32 get_gttype(struct drm_i915_private *dev_priv)
 
 static u32 get_core_family(struct drm_i915_private *dev_priv)
 {
-       switch (INTEL_INFO(dev_priv)->gen) {
+       u32 gen = INTEL_GEN(dev_priv);
+
+       switch (gen) {
        case 9:
                return GFXCORE_FAMILY_GEN9;
 
        default:
-               DRM_ERROR("GUC: unsupported core family\n");
+               WARN(1, "GEN%d does not support GuC operation!\n", gen);
                return GFXCORE_FAMILY_UNKNOWN;
        }
 }
 
-static void set_guc_init_params(struct drm_i915_private *dev_priv)
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+static void guc_params_init(struct drm_i915_private *dev_priv)
 {
        struct intel_guc *guc = &dev_priv->guc;
        u32 params[GUC_CTL_MAX_DWORDS];
@@ -181,16 +215,15 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
                        i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
        }
 
-       if (guc->ads_obj) {
-               u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
-                               >> PAGE_SHIFT;
+       if (guc->ads_vma) {
+               u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
                params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
                params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
        }
 
        /* If GuC submission is enabled, set up additional parameters here */
        if (i915.enable_guc_submission) {
-               u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
+               u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
                u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
 
                pgs >>= PAGE_SHIFT;
@@ -238,12 +271,12 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
  * Note that GuC needs the CSS header plus uKernel code to be copied by the
  * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
  */
-static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
+static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
+                             struct i915_vma *vma)
 {
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-       struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
        unsigned long offset;
-       struct sg_table *sg = fw_obj->pages;
+       struct sg_table *sg = vma->pages;
        u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
        int i, ret = 0;
 
@@ -260,7 +293,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
        I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
 
        /* Set the source address for the new blob */
-       offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
+       offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
        I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
        I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
@@ -315,6 +348,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 {
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        struct drm_device *dev = &dev_priv->drm;
+       struct i915_vma *vma;
        int ret;
 
        ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -323,10 +357,10 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
                return ret;
        }
 
-       ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
-       if (ret) {
-               DRM_DEBUG_DRIVER("pin failed %d\n", ret);
-               return ret;
+       vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
+       if (IS_ERR(vma)) {
+               DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
+               return PTR_ERR(vma);
        }
 
        /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
@@ -349,7 +383,9 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
        }
 
        /* WaC6DisallowByGfxPause*/
-       I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
+       if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
+           IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+               I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
 
        if (IS_BROXTON(dev))
                I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
@@ -361,13 +397,13 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
                I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
                                            I915_READ(GEN7_MISCCPCTL)));
 
-               /* allows for 5us before GT can go to RC6 */
+               /* allows for 5us (in 10ns units) before GT can go to RC6 */
                I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
        }
 
-       set_guc_init_params(dev_priv);
+       guc_params_init(dev_priv);
 
-       ret = guc_ucode_xfer_dma(dev_priv);
+       ret = guc_ucode_xfer_dma(dev_priv, vma);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
@@ -375,12 +411,12 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
         * We keep the object pages for reuse during resume. But we can unpin it
         * now that DMA has completed, so it doesn't continue to take up space.
         */
-       i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
+       i915_vma_unpin(vma);
 
        return ret;
 }
 
-static int i915_reset_guc(struct drm_i915_private *dev_priv)
+static int guc_hw_reset(struct drm_i915_private *dev_priv)
 {
        int ret;
        u32 guc_status;
@@ -433,7 +469,7 @@ int intel_guc_setup(struct drm_device *dev)
                goto fail;
        } else if (*fw_path == '\0') {
                /* Device has a GuC but we don't know what f/w to load? */
-               DRM_INFO("No GuC firmware known for this platform\n");
+               WARN(1, "No GuC firmware known for this platform!\n");
                err = -ENODEV;
                goto fail;
        }
@@ -447,7 +483,7 @@ int intel_guc_setup(struct drm_device *dev)
                goto fail;
        }
 
-       direct_interrupts_to_host(dev_priv);
+       guc_interrupts_release(dev_priv);
 
        guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
 
@@ -470,11 +506,9 @@ int intel_guc_setup(struct drm_device *dev)
                 * Always reset the GuC just before (re)loading, so
                 * that the state and timing are fairly predictable
                 */
-               err = i915_reset_guc(dev_priv);
-               if (err) {
-                       DRM_ERROR("GuC reset failed: %d\n", err);
+               err = guc_hw_reset(dev_priv);
+               if (err)
                        goto fail;
-               }
 
                err = guc_ucode_xfer(dev_priv);
                if (!err)
@@ -497,7 +531,7 @@ int intel_guc_setup(struct drm_device *dev)
                err = i915_guc_submission_enable(dev_priv);
                if (err)
                        goto fail;
-               direct_interrupts_to_guc(dev_priv);
+               guc_interrupts_capture(dev_priv);
        }
 
        return 0;
@@ -506,7 +540,7 @@ fail:
        if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
                guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
 
-       direct_interrupts_to_host(dev_priv);
+       guc_interrupts_release(dev_priv);
        i915_guc_submission_disable(dev_priv);
        i915_guc_submission_fini(dev_priv);
 
@@ -532,15 +566,15 @@ fail:
        else if (err == 0)
                DRM_INFO("GuC firmware load skipped\n");
        else if (ret != -EIO)
-               DRM_INFO("GuC firmware load failed: %d\n", err);
+               DRM_NOTE("GuC firmware load failed: %d\n", err);
        else
-               DRM_ERROR("GuC firmware load failed: %d\n", err);
+               DRM_WARN("GuC firmware load failed: %d\n", err);
 
        if (i915.enable_guc_submission) {
                if (fw_path == NULL)
                        DRM_INFO("GuC submission without firmware not supported\n");
                if (ret == 0)
-                       DRM_INFO("Falling back from GuC submission to execlist mode\n");
+                       DRM_NOTE("Falling back from GuC submission to execlist mode\n");
                else
                        DRM_ERROR("GuC init failed: %d\n", ret);
        }
@@ -551,6 +585,7 @@ fail:
 
 static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 {
+       struct pci_dev *pdev = dev->pdev;
        struct drm_i915_gem_object *obj;
        const struct firmware *fw;
        struct guc_css_header *css;
@@ -560,7 +595,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
        DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
                intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
 
-       err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
+       err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
        if (err)
                goto fail;
        if (!fw)
@@ -571,7 +606,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        /* Check the size of the blob before examining buffer contents */
        if (fw->size < sizeof(struct guc_css_header)) {
-               DRM_ERROR("Firmware header is missing\n");
+               DRM_NOTE("Firmware header is missing\n");
                goto fail;
        }
 
@@ -583,7 +618,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
                css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
 
        if (guc_fw->header_size != sizeof(struct guc_css_header)) {
-               DRM_ERROR("CSS header definition mismatch\n");
+               DRM_NOTE("CSS header definition mismatch\n");
                goto fail;
        }
 
@@ -593,7 +628,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        /* now RSA */
        if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
-               DRM_ERROR("RSA key size is bad\n");
+               DRM_NOTE("RSA key size is bad\n");
                goto fail;
        }
        guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
@@ -602,14 +637,14 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
        /* At least, it should have header, uCode and RSA. Size of all three. */
        size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
        if (fw->size < size) {
-               DRM_ERROR("Missing firmware components\n");
+               DRM_NOTE("Missing firmware components\n");
                goto fail;
        }
 
        /* Header and uCode will be loaded to WOPCM. Size of the two. */
        size = guc_fw->header_size + guc_fw->ucode_size;
        if (size > guc_wopcm_size(to_i915(dev))) {
-               DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+               DRM_NOTE("Firmware is too large to fit in WOPCM\n");
                goto fail;
        }
 
@@ -624,7 +659,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
            guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
-               DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
+               DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
                        guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
                        guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
                err = -ENOEXEC;
@@ -654,15 +689,15 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
        return;
 
 fail:
+       DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
+                guc_fw->guc_fw_path, err);
        DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
                err, fw, guc_fw->guc_fw_obj);
-       DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
-                 guc_fw->guc_fw_path, err);
 
        mutex_lock(&dev->struct_mutex);
        obj = guc_fw->guc_fw_obj;
        if (obj)
-               drm_gem_object_unreference(&obj->base);
+               i915_gem_object_put(obj);
        guc_fw->guc_fw_obj = NULL;
        mutex_unlock(&dev->struct_mutex);
 
@@ -695,16 +730,16 @@ void intel_guc_init(struct drm_device *dev)
                fw_path = NULL;
        } else if (IS_SKYLAKE(dev)) {
                fw_path = I915_SKL_GUC_UCODE;
-               guc_fw->guc_fw_major_wanted = 6;
-               guc_fw->guc_fw_minor_wanted = 1;
+               guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
+               guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
        } else if (IS_BROXTON(dev)) {
                fw_path = I915_BXT_GUC_UCODE;
-               guc_fw->guc_fw_major_wanted = 8;
-               guc_fw->guc_fw_minor_wanted = 7;
+               guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
+               guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
        } else if (IS_KABYLAKE(dev)) {
                fw_path = I915_KBL_GUC_UCODE;
-               guc_fw->guc_fw_major_wanted = 9;
-               guc_fw->guc_fw_minor_wanted = 14;
+               guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
+               guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
        } else {
                fw_path = "";   /* unknown device */
        }
@@ -738,12 +773,12 @@ void intel_guc_fini(struct drm_device *dev)
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
        mutex_lock(&dev->struct_mutex);
-       direct_interrupts_to_host(dev_priv);
+       guc_interrupts_release(dev_priv);
        i915_guc_submission_disable(dev_priv);
        i915_guc_submission_fini(dev_priv);
 
        if (guc_fw->guc_fw_obj)
-               drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
+               i915_gem_object_put(guc_fw->guc_fw_obj);
        guc_fw->guc_fw_obj = NULL;
        mutex_unlock(&dev->struct_mutex);
 
index 4df9f384910ccd8050119873a2ae9418db57d15e..c51073f78730c4367459cffbb620bdeeeea08a2e 100644 (file)
@@ -985,7 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
        intel_audio_codec_enable(encoder);
 }
 
-static void g4x_enable_hdmi(struct intel_encoder *encoder)
+static void g4x_enable_hdmi(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1006,7 +1008,9 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
                intel_enable_hdmi_audio(encoder);
 }
 
-static void ibx_enable_hdmi(struct intel_encoder *encoder)
+static void ibx_enable_hdmi(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1055,7 +1059,9 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
                intel_enable_hdmi_audio(encoder);
 }
 
-static void cpt_enable_hdmi(struct intel_encoder *encoder)
+static void cpt_enable_hdmi(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1108,11 +1114,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder)
                intel_enable_hdmi_audio(encoder);
 }
 
-static void vlv_enable_hdmi(struct intel_encoder *encoder)
+static void vlv_enable_hdmi(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state)
 {
 }
 
-static void intel_disable_hdmi(struct intel_encoder *encoder)
+static void intel_disable_hdmi(struct intel_encoder *encoder,
+                              struct intel_crtc_state *old_crtc_state,
+                              struct drm_connector_state *old_conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1164,17 +1174,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
        intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
 }
 
-static void g4x_disable_hdmi(struct intel_encoder *encoder)
+static void g4x_disable_hdmi(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
        if (crtc->config->has_audio)
                intel_audio_codec_disable(encoder);
 
-       intel_disable_hdmi(encoder);
+       intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_hdmi(struct intel_encoder *encoder)
+static void pch_disable_hdmi(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
@@ -1182,9 +1196,11 @@ static void pch_disable_hdmi(struct intel_encoder *encoder)
                intel_audio_codec_disable(encoder);
 }
 
-static void pch_post_disable_hdmi(struct intel_encoder *encoder)
+static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
-       intel_disable_hdmi(encoder);
+       intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
 }
 
 static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
@@ -1285,7 +1301,8 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-                              struct intel_crtc_state *pipe_config)
+                              struct intel_crtc_state *pipe_config,
+                              struct drm_connector_state *conn_state)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
@@ -1422,24 +1439,22 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
 }
 
 static bool
-intel_hdmi_set_edid(struct drm_connector *connector, bool force)
+intel_hdmi_set_edid(struct drm_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-       struct edid *edid = NULL;
+       struct edid *edid;
        bool connected = false;
 
-       if (force) {
-               intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-               edid = drm_get_edid(connector,
-                                   intel_gmbus_get_adapter(dev_priv,
-                                   intel_hdmi->ddc_bus));
+       edid = drm_get_edid(connector,
+                           intel_gmbus_get_adapter(dev_priv,
+                           intel_hdmi->ddc_bus));
 
-               intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+       intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
 
-               intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
-       }
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
        to_intel_connector(connector)->detect_edid = edid;
        if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1465,37 +1480,16 @@ static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
        enum drm_connector_status status;
-       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       bool live_status = false;
-       unsigned int try;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
 
        intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-       for (try = 0; !live_status && try < 9; try++) {
-               if (try)
-                       msleep(10);
-               live_status = intel_digital_port_connected(dev_priv,
-                               hdmi_to_dig_port(intel_hdmi));
-       }
-
-       if (!live_status) {
-               DRM_DEBUG_KMS("HDMI live status down\n");
-               /*
-                * Live status register is not reliable on all intel platforms.
-                * So consider live_status only for certain platforms, for
-                * others, read EDID to determine presence of sink.
-                */
-               if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
-                       live_status = true;
-       }
-
        intel_hdmi_unset_edid(connector);
 
-       if (intel_hdmi_set_edid(connector, live_status)) {
+       if (intel_hdmi_set_edid(connector)) {
                struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
                hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@@ -1521,7 +1515,7 @@ intel_hdmi_force(struct drm_connector *connector)
        if (connector->status != connector_status_connected)
                return;
 
-       intel_hdmi_set_edid(connector, true);
+       intel_hdmi_set_edid(connector);
        hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
 }
 
@@ -1638,7 +1632,9 @@ done:
        return 0;
 }
 
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1651,7 +1647,9 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
                                   adjusted_mode);
 }
 
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+                               struct intel_crtc_state *pipe_config,
+                               struct drm_connector_state *conn_state)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1671,37 +1669,47 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
                                   intel_crtc->config->has_hdmi_sink,
                                   adjusted_mode);
 
-       g4x_enable_hdmi(encoder);
+       g4x_enable_hdmi(encoder, pipe_config, conn_state);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
 
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        intel_hdmi_prepare(encoder);
 
        vlv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        intel_hdmi_prepare(encoder);
 
        chv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+                                     struct intel_crtc_state *old_crtc_state,
+                                     struct drm_connector_state *old_conn_state)
 {
        chv_phy_post_pll_disable(encoder);
 }
 
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
        /* Reset lanes to avoid HDMI flicker (VLV w/a) */
        vlv_phy_reset_lanes(encoder);
 }
 
-static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1714,7 +1722,9 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+                               struct intel_crtc_state *pipe_config,
+                               struct drm_connector_state *conn_state)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1734,7 +1744,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
                                   intel_crtc->config->has_hdmi_sink,
                                   adjusted_mode);
 
-       g4x_enable_hdmi(encoder);
+       g4x_enable_hdmi(encoder, pipe_config, conn_state);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 
index f48957ea100d9283b53d02f7aef82ae5cacbb2e9..334d47b5811a750b356f78ad43beffbf285a0f82 100644 (file)
@@ -477,7 +477,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-void i915_hpd_poll_init_work(struct work_struct *work) {
+static void i915_hpd_poll_init_work(struct work_struct *work)
+{
        struct drm_i915_private *dev_priv =
                container_of(work, struct drm_i915_private,
                             hotplug.poll_init_work);
@@ -525,7 +526,6 @@ void i915_hpd_poll_init_work(struct work_struct *work) {
 /**
  * intel_hpd_poll_init - enables/disables polling for connectors with hpd
  * @dev_priv: i915 device instance
- * @enabled: Whether to enable or disable polling
  *
  * This function enables polling for all connectors, regardless of whether or
  * not they support hotplug detection. Under certain conditions HPD may not be
index 1f266d7df2ec076afdffefef40ec870f9115291e..79aab9ad6faa826416abd8004f35ce78d9f62c2c 100644 (file)
@@ -255,67 +255,59 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
        algo->data = bus;
 }
 
-static int
-gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
-                    u32 gmbus2_status,
-                    u32 gmbus4_irq_en)
+static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
 {
-       int i;
-       u32 gmbus2 = 0;
        DEFINE_WAIT(wait);
-
-       if (!HAS_GMBUS_IRQ(dev_priv))
-               gmbus4_irq_en = 0;
+       u32 gmbus2;
+       int ret;
 
        /* Important: The hw handles only the first bit, so set only one! Since
         * we also need to check for NAKs besides the hw ready/idle signal, we
-        * need to wake up periodically and check that ourselves. */
-       I915_WRITE(GMBUS4, gmbus4_irq_en);
-
-       for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
-               prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
-                               TASK_UNINTERRUPTIBLE);
+        * need to wake up periodically and check that ourselves.
+        */
+       if (!HAS_GMBUS_IRQ(dev_priv))
+               irq_en = 0;
 
-               gmbus2 = I915_READ_NOTRACE(GMBUS2);
-               if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
-                       break;
+       add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+       I915_WRITE_FW(GMBUS4, irq_en);
 
-               schedule_timeout(1);
-       }
-       finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+       status |= GMBUS_SATOER;
+       ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+       if (ret)
+               ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
 
-       I915_WRITE(GMBUS4, 0);
+       I915_WRITE_FW(GMBUS4, 0);
+       remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
 
        if (gmbus2 & GMBUS_SATOER)
                return -ENXIO;
-       if (gmbus2 & gmbus2_status)
-               return 0;
-       return -ETIMEDOUT;
+
+       return ret;
 }
 
 static int
 gmbus_wait_idle(struct drm_i915_private *dev_priv)
 {
+       DEFINE_WAIT(wait);
+       u32 irq_enable;
        int ret;
 
-       if (!HAS_GMBUS_IRQ(dev_priv))
-               return intel_wait_for_register(dev_priv,
-                                              GMBUS2, GMBUS_ACTIVE, 0,
-                                              10);
-
        /* Important: The hw handles only the first bit, so set only one! */
-       I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
+       irq_enable = 0;
+       if (HAS_GMBUS_IRQ(dev_priv))
+               irq_enable = GMBUS_IDLE_EN;
 
-       ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
-                                (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
-                                msecs_to_jiffies_timeout(10));
+       add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+       I915_WRITE_FW(GMBUS4, irq_enable);
 
-       I915_WRITE(GMBUS4, 0);
+       ret = intel_wait_for_register_fw(dev_priv,
+                                        GMBUS2, GMBUS_ACTIVE, 0,
+                                        10);
 
-       if (ret)
-               return 0;
-       else
-               return -ETIMEDOUT;
+       I915_WRITE_FW(GMBUS4, 0);
+       remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+
+       return ret;
 }
 
 static int
@@ -323,22 +315,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
                      unsigned short addr, u8 *buf, unsigned int len,
                      u32 gmbus1_index)
 {
-       I915_WRITE(GMBUS1,
-                  gmbus1_index |
-                  GMBUS_CYCLE_WAIT |
-                  (len << GMBUS_BYTE_COUNT_SHIFT) |
-                  (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-                  GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+       I915_WRITE_FW(GMBUS1,
+                     gmbus1_index |
+                     GMBUS_CYCLE_WAIT |
+                     (len << GMBUS_BYTE_COUNT_SHIFT) |
+                     (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                     GMBUS_SLAVE_READ | GMBUS_SW_RDY);
        while (len) {
                int ret;
                u32 val, loop = 0;
 
-               ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
-                                          GMBUS_HW_RDY_EN);
+               ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
                if (ret)
                        return ret;
 
-               val = I915_READ(GMBUS3);
+               val = I915_READ_FW(GMBUS3);
                do {
                        *buf++ = val & 0xff;
                        val >>= 8;
@@ -385,12 +376,12 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
                len -= 1;
        }
 
-       I915_WRITE(GMBUS3, val);
-       I915_WRITE(GMBUS1,
-                  GMBUS_CYCLE_WAIT |
-                  (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
-                  (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-                  GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+       I915_WRITE_FW(GMBUS3, val);
+       I915_WRITE_FW(GMBUS1,
+                     GMBUS_CYCLE_WAIT |
+                     (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+                     (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                     GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
        while (len) {
                int ret;
 
@@ -399,10 +390,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
                        val |= *buf++ << (8 * loop);
                } while (--len && ++loop < 4);
 
-               I915_WRITE(GMBUS3, val);
+               I915_WRITE_FW(GMBUS3, val);
 
-               ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
-                                          GMBUS_HW_RDY_EN);
+               ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
                if (ret)
                        return ret;
        }
@@ -460,13 +450,13 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
 
        /* GMBUS5 holds 16-bit index */
        if (gmbus5)
-               I915_WRITE(GMBUS5, gmbus5);
+               I915_WRITE_FW(GMBUS5, gmbus5);
 
        ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
 
        /* Clear GMBUS5 after each index transfer */
        if (gmbus5)
-               I915_WRITE(GMBUS5, 0);
+               I915_WRITE_FW(GMBUS5, 0);
 
        return ret;
 }
@@ -478,11 +468,15 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
                                               struct intel_gmbus,
                                               adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
+       const unsigned int fw =
+               intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
+                                              FW_REG_READ | FW_REG_WRITE);
        int i = 0, inc, try = 0;
        int ret = 0;
 
+       intel_uncore_forcewake_get(dev_priv, fw);
 retry:
-       I915_WRITE(GMBUS0, bus->reg0);
+       I915_WRITE_FW(GMBUS0, bus->reg0);
 
        for (; i < num; i += inc) {
                inc = 1;
@@ -496,8 +490,8 @@ retry:
                }
 
                if (!ret)
-                       ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
-                                                  GMBUS_HW_WAIT_EN);
+                       ret = gmbus_wait(dev_priv,
+                                        GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
                if (ret == -ETIMEDOUT)
                        goto timeout;
                else if (ret)
@@ -508,7 +502,7 @@ retry:
         * a STOP on the very first cycle. To simplify the code we
         * unconditionally generate the STOP condition with an additional gmbus
         * cycle. */
-       I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+       I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
 
        /* Mark the GMBUS interface as disabled after waiting for idle.
         * We will re-enable it at the start of the next xfer,
@@ -519,7 +513,7 @@ retry:
                         adapter->name);
                ret = -ETIMEDOUT;
        }
-       I915_WRITE(GMBUS0, 0);
+       I915_WRITE_FW(GMBUS0, 0);
        ret = ret ?: i;
        goto out;
 
@@ -548,9 +542,9 @@ clear_err:
         * of resetting the GMBUS controller and so clearing the
         * BUS_ERROR raised by the slave's NAK.
         */
-       I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
-       I915_WRITE(GMBUS1, 0);
-       I915_WRITE(GMBUS0, 0);
+       I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
+       I915_WRITE_FW(GMBUS1, 0);
+       I915_WRITE_FW(GMBUS0, 0);
 
        DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
                         adapter->name, msgs[i].addr,
@@ -573,7 +567,7 @@ clear_err:
 timeout:
        DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
                      bus->adapter.name, bus->reg0 & 0xff);
-       I915_WRITE(GMBUS0, 0);
+       I915_WRITE_FW(GMBUS0, 0);
 
        /*
         * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -582,6 +576,7 @@ timeout:
        ret = -EAGAIN;
 
 out:
+       intel_uncore_forcewake_put(dev_priv, fw);
        return ret;
 }
 
@@ -633,6 +628,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
 int intel_setup_gmbus(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct intel_gmbus *bus;
        unsigned int pin;
        int ret;
@@ -663,7 +659,7 @@ int intel_setup_gmbus(struct drm_device *dev)
                         "i915 gmbus %s",
                         get_gmbus_pin(dev_priv, pin)->name);
 
-               bus->adapter.dev.parent = &dev->pdev->dev;
+               bus->adapter.dev.parent = &pdev->dev;
                bus->dev_priv = dev_priv;
 
                bus->adapter.algo = &gmbus_algorithm;
index 414ddda4392255b1d5479c66b4d75f86235ea2e8..251143361f31e5bf83a3d4df810d222d657f8731 100644 (file)
 #define GEN8_CTX_STATUS_COMPLETE       (1 << 4)
 #define GEN8_CTX_STATUS_LITE_RESTORE   (1 << 15)
 
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+        (GEN8_CTX_STATUS_ACTIVE_IDLE | \
+         GEN8_CTX_STATUS_PREEMPTED | \
+         GEN8_CTX_STATUS_ELEMENT_SWITCH)
+
 #define CTX_LRI_HEADER_0               0x01
 #define CTX_CONTEXT_CONTROL            0x02
 #define CTX_RING_HEAD                  0x04
@@ -263,12 +268,10 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
-       if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
-               engine->idle_lite_restore_wa = ~0;
-
-       engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
-                                       IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
-                                       (engine->id == VCS || engine->id == VCS2);
+       engine->disable_lite_restore_wa =
+               (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+                IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+               (engine->id == VCS || engine->id == VCS2);
 
        engine->ctx_desc_template = GEN8_CTX_VALID;
        if (IS_GEN8(dev_priv))
@@ -288,7 +291,6 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 /**
  * intel_lr_context_descriptor_update() - calculate & cache the descriptor
  *                                       descriptor for a pinned context
- *
  * @ctx: Context to work on
  * @engine: Engine the descriptor will be used with
  *
@@ -297,12 +299,13 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
  * expensive to calculate, we'll just do it once and cache the result,
  * which remains valid until the context is unpinned.
  *
- * This is what a descriptor looks like, from LSB to MSB:
- *    bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
- *    bits 12-31:    LRCA, GTT address of (the HWSP of) this context
- *    bits 32-52:    ctx ID, a globally unique tag
- *    bits 53-54:    mbz, reserved for use by hardware
- *    bits 55-63:    group ID, currently unused and set to 0
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
+ *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
+ *      bits 32-52:    ctx ID, a globally unique tag
+ *      bits 53-54:    mbz, reserved for use by hardware
+ *      bits 55-63:    group ID, currently unused and set to 0
  */
 static void
 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
@@ -315,7 +318,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 
        desc = ctx->desc_template;                              /* bits  3-4  */
        desc |= engine->ctx_desc_template;                      /* bits  0-11 */
-       desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+       desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
                                                                /* bits 12-31 */
        desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;           /* bits 32-52 */
 
@@ -328,34 +331,18 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
        return ctx->engine[engine->id].lrc_desc;
 }
 
-static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
-                                struct drm_i915_gem_request *rq1)
+static inline void
+execlists_context_status_change(struct drm_i915_gem_request *rq,
+                               unsigned long status)
 {
+       /*
+        * Only used when GVT-g is enabled now. When GVT-g is disabled,
+        * The compiler should eliminate this function as dead-code.
+        */
+       if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+               return;
 
-       struct intel_engine_cs *engine = rq0->engine;
-       struct drm_i915_private *dev_priv = rq0->i915;
-       uint64_t desc[2];
-
-       if (rq1) {
-               desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
-               rq1->elsp_submitted++;
-       } else {
-               desc[1] = 0;
-       }
-
-       desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
-       rq0->elsp_submitted++;
-
-       /* You must always write both descriptors in the order below. */
-       I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
-       I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
-
-       I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
-       /* The context is automatically loaded after the following */
-       I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
-
-       /* ELSP is a wo register, use another nearby reg for posting */
-       POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
+       atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
 }
 
 static void
@@ -367,13 +354,13 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
        ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
 }
 
-static void execlists_update_context(struct drm_i915_gem_request *rq)
+static u64 execlists_update_context(struct drm_i915_gem_request *rq)
 {
-       struct intel_engine_cs *engine = rq->engine;
+       struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
        struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-       uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
+       u32 *reg_state = ce->lrc_reg_state;
 
-       reg_state[CTX_RING_TAIL+1] = rq->tail;
+       reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
 
        /* True 32b PPGTT with dynamic page allocation: update PDP
         * registers and point the unallocated PDPs to scratch page.
@@ -382,321 +369,236 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
         */
        if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
                execlists_update_context_pdps(ppgtt, reg_state);
+
+       return ce->lrc_desc;
 }
 
-static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
-                                     struct drm_i915_gem_request *rq1)
+static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = rq0->i915;
-       unsigned int fw_domains = rq0->engine->fw_domains;
-
-       execlists_update_context(rq0);
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct execlist_port *port = engine->execlist_port;
+       u32 __iomem *elsp =
+               dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+       u64 desc[2];
 
-       if (rq1)
-               execlists_update_context(rq1);
+       if (!port[0].count)
+               execlists_context_status_change(port[0].request,
+                                               INTEL_CONTEXT_SCHEDULE_IN);
+       desc[0] = execlists_update_context(port[0].request);
+       engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
 
-       spin_lock_irq(&dev_priv->uncore.lock);
-       intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+       if (port[1].request) {
+               GEM_BUG_ON(port[1].count);
+               execlists_context_status_change(port[1].request,
+                                               INTEL_CONTEXT_SCHEDULE_IN);
+               desc[1] = execlists_update_context(port[1].request);
+               port[1].count = 1;
+       } else {
+               desc[1] = 0;
+       }
+       GEM_BUG_ON(desc[0] == desc[1]);
 
-       execlists_elsp_write(rq0, rq1);
+       /* You must always write both descriptors in the order below. */
+       writel(upper_32_bits(desc[1]), elsp);
+       writel(lower_32_bits(desc[1]), elsp);
 
-       intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
-       spin_unlock_irq(&dev_priv->uncore.lock);
+       writel(upper_32_bits(desc[0]), elsp);
+       /* The context is automatically loaded after the following */
+       writel(lower_32_bits(desc[0]), elsp);
 }
 
-static inline void execlists_context_status_change(
-               struct drm_i915_gem_request *rq,
-               unsigned long status)
+static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
 {
-       /*
-        * Only used when GVT-g is enabled now. When GVT-g is disabled,
-        * The compiler should eliminate this function as dead-code.
-        */
-       if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
-               return;
-
-       atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+       return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+               ctx->execlists_force_single_submission);
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *engine)
+static bool can_merge_ctx(const struct i915_gem_context *prev,
+                         const struct i915_gem_context *next)
 {
-       struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
-       struct drm_i915_gem_request *cursor, *tmp;
+       if (prev != next)
+               return false;
 
-       assert_spin_locked(&engine->execlist_lock);
+       if (ctx_single_port_submission(prev))
+               return false;
 
-       /*
-        * If irqs are not active generate a warning as batches that finish
-        * without the irqs may get lost and a GPU Hang may occur.
-        */
-       WARN_ON(!intel_irqs_enabled(engine->i915));
-
-       /* Try to read in pairs */
-       list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
-                                execlist_link) {
-               if (!req0) {
-                       req0 = cursor;
-               } else if (req0->ctx == cursor->ctx) {
-                       /* Same ctx: ignore first request, as second request
-                        * will update tail past first request's workload */
-                       cursor->elsp_submitted = req0->elsp_submitted;
-                       list_del(&req0->execlist_link);
-                       i915_gem_request_unreference(req0);
-                       req0 = cursor;
-               } else {
-                       if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
-                               /*
-                                * req0 (after merged) ctx requires single
-                                * submission, stop picking
-                                */
-                               if (req0->ctx->execlists_force_single_submission)
-                                       break;
-                               /*
-                                * req0 ctx doesn't require single submission,
-                                * but next req ctx requires, stop picking
-                                */
-                               if (cursor->ctx->execlists_force_single_submission)
-                                       break;
-                       }
-                       req1 = cursor;
-                       WARN_ON(req1->elsp_submitted);
-                       break;
-               }
-       }
+       return true;
+}
 
-       if (unlikely(!req0))
-               return;
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+       struct drm_i915_gem_request *cursor, *last;
+       struct execlist_port *port = engine->execlist_port;
+       bool submit = false;
+
+       last = port->request;
+       if (last)
+               /* WaIdleLiteRestore:bdw,skl
+                * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
+                * as we resubmit the request. See gen8_emit_request()
+                * for where we prepare the padding after the end of the
+                * request.
+                */
+               last->tail = last->wa_tail;
 
-       execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
+       GEM_BUG_ON(port[1].request);
 
-       if (req1)
-               execlists_context_status_change(req1,
-                                               INTEL_CONTEXT_SCHEDULE_IN);
+       /* Hardware submission is through 2 ports. Conceptually each port
+        * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+        * static for a context, and unique to each, so we only execute
+        * requests belonging to a single context from each ring. RING_HEAD
+        * is maintained by the CS in the context image, it marks the place
+        * where it got up to last time, and through RING_TAIL we tell the CS
+        * where we want to execute up to this time.
+        *
+        * In this list the requests are in order of execution. Consecutive
+        * requests from the same context are adjacent in the ringbuffer. We
+        * can combine these requests into a single RING_TAIL update:
+        *
+        *              RING_HEAD...req1...req2
+        *                                    ^- RING_TAIL
+        * since to execute req2 the CS must first execute req1.
+        *
+        * Our goal then is to point each port to the end of a consecutive
+        * sequence of requests as being the most optimal (fewest wake ups
+        * and context switches) submission.
+        */
 
-       if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
-               /*
-                * WaIdleLiteRestore: make sure we never cause a lite restore
-                * with HEAD==TAIL.
+       spin_lock(&engine->execlist_lock);
+       list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+               /* Can we combine this request with the current port? It has to
+                * be the same context/ringbuffer and not have any exceptions
+                * (e.g. GVT saying never to combine contexts).
                 *
-                * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
-                * resubmit the request. See gen8_emit_request() for where we
-                * prepare the padding after the end of the request.
+                * If we can combine the requests, we can execute both by
+                * updating the RING_TAIL to point to the end of the second
+                * request, and so we never need to tell the hardware about
+                * the first.
                 */
-               struct intel_ringbuffer *ringbuf;
+               if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
+                       /* If we are on the second port and cannot combine
+                        * this request with the last, then we are done.
+                        */
+                       if (port != engine->execlist_port)
+                               break;
+
+                       /* If GVT overrides us we only ever submit port[0],
+                        * leaving port[1] empty. Note that we also have
+                        * to be careful that we don't queue the same
+                        * context (even though a different request) to
+                        * the second port.
+                        */
+                       if (ctx_single_port_submission(cursor->ctx))
+                               break;
+
+                       GEM_BUG_ON(last->ctx == cursor->ctx);
+
+                       i915_gem_request_assign(&port->request, last);
+                       port++;
+               }
+               last = cursor;
+               submit = true;
+       }
+       if (submit) {
+               /* Decouple all the requests submitted from the queue */
+               engine->execlist_queue.next = &cursor->execlist_link;
+               cursor->execlist_link.prev = &engine->execlist_queue;
 
-               ringbuf = req0->ctx->engine[engine->id].ringbuf;
-               req0->tail += 8;
-               req0->tail &= ringbuf->size - 1;
+               i915_gem_request_assign(&port->request, last);
        }
+       spin_unlock(&engine->execlist_lock);
 
-       execlists_submit_requests(req0, req1);
+       if (submit)
+               execlists_submit_ports(engine);
 }
 
-static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
+static bool execlists_elsp_idle(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *head_req;
-
-       assert_spin_locked(&engine->execlist_lock);
-
-       head_req = list_first_entry_or_null(&engine->execlist_queue,
-                                           struct drm_i915_gem_request,
-                                           execlist_link);
-
-       if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
-               return 0;
-
-       WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
-
-       if (--head_req->elsp_submitted > 0)
-               return 0;
-
-       execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
-
-       list_del(&head_req->execlist_link);
-       i915_gem_request_unreference(head_req);
-
-       return 1;
+       return !engine->execlist_port[0].request;
 }
 
-static u32
-get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
-                  u32 *context_id)
+static bool execlists_elsp_ready(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-       u32 status;
+       int port;
 
-       read_pointer %= GEN8_CSB_ENTRIES;
-
-       status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
-
-       if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
-               return 0;
+       port = 1; /* wait for a free slot */
+       if (engine->disable_lite_restore_wa || engine->preempt_wa)
+               port = 0; /* wait for GPU to be idle before continuing */
 
-       *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
-                                                             read_pointer));
-
-       return status;
+       return !engine->execlist_port[port].request;
 }
 
-/**
- * intel_lrc_irq_handler() - handle Context Switch interrupts
- * @data: tasklet handler passed in unsigned long
- *
+/*
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
  */
 static void intel_lrc_irq_handler(unsigned long data)
 {
        struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+       struct execlist_port *port = engine->execlist_port;
        struct drm_i915_private *dev_priv = engine->i915;
-       u32 status_pointer;
-       unsigned int read_pointer, write_pointer;
-       u32 csb[GEN8_CSB_ENTRIES][2];
-       unsigned int csb_read = 0, i;
-       unsigned int submit_contexts = 0;
 
        intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
-       status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
-
-       read_pointer = engine->next_context_status_buffer;
-       write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
-       if (read_pointer > write_pointer)
-               write_pointer += GEN8_CSB_ENTRIES;
-
-       while (read_pointer < write_pointer) {
-               if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
-                       break;
-               csb[csb_read][0] = get_context_status(engine, ++read_pointer,
-                                                     &csb[csb_read][1]);
-               csb_read++;
-       }
-
-       engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
-       /* Update the read pointer to the old write pointer. Manual ringbuffer
-        * management ftw </sarcasm> */
-       I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
-                     _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
-                                   engine->next_context_status_buffer << 8));
-
-       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
-
-       spin_lock(&engine->execlist_lock);
+       if (!execlists_elsp_idle(engine)) {
+               u32 __iomem *csb_mmio =
+                       dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+               u32 __iomem *buf =
+                       dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
+               unsigned int csb, head, tail;
+
+               csb = readl(csb_mmio);
+               head = GEN8_CSB_READ_PTR(csb);
+               tail = GEN8_CSB_WRITE_PTR(csb);
+               if (tail < head)
+                       tail += GEN8_CSB_ENTRIES;
+               while (head < tail) {
+                       unsigned int idx = ++head % GEN8_CSB_ENTRIES;
+                       unsigned int status = readl(buf + 2 * idx);
+
+                       if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+                               continue;
+
+                       GEM_BUG_ON(port[0].count == 0);
+                       if (--port[0].count == 0) {
+                               GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+                               execlists_context_status_change(port[0].request,
+                                                               INTEL_CONTEXT_SCHEDULE_OUT);
+
+                               i915_gem_request_put(port[0].request);
+                               port[0] = port[1];
+                               memset(&port[1], 0, sizeof(port[1]));
+
+                               engine->preempt_wa = false;
+                       }
 
-       for (i = 0; i < csb_read; i++) {
-               if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
-                       if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
-                               if (execlists_check_remove_request(engine, csb[i][1]))
-                                       WARN(1, "Lite Restored request removed from queue\n");
-                       } else
-                               WARN(1, "Preemption without Lite Restore\n");
+                       GEM_BUG_ON(port[0].count == 0 &&
+                                  !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
                }
 
-               if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
-                   GEN8_CTX_STATUS_ELEMENT_SWITCH))
-                       submit_contexts +=
-                               execlists_check_remove_request(engine, csb[i][1]);
-       }
-
-       if (submit_contexts) {
-               if (!engine->disable_lite_restore_wa ||
-                   (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
-                       execlists_context_unqueue(engine);
+               writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+                                    GEN8_CSB_WRITE_PTR(csb) << 8),
+                      csb_mmio);
        }
 
-       spin_unlock(&engine->execlist_lock);
+       if (execlists_elsp_ready(engine))
+               execlists_dequeue(engine);
 
-       if (unlikely(submit_contexts > 2))
-               DRM_ERROR("More than two context complete events?\n");
+       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
 }
 
-static void execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
-       struct drm_i915_gem_request *cursor;
-       int num_elements = 0;
+       unsigned long flags;
 
-       spin_lock_bh(&engine->execlist_lock);
-
-       list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
-               if (++num_elements > 2)
-                       break;
-
-       if (num_elements > 2) {
-               struct drm_i915_gem_request *tail_req;
-
-               tail_req = list_last_entry(&engine->execlist_queue,
-                                          struct drm_i915_gem_request,
-                                          execlist_link);
-
-               if (request->ctx == tail_req->ctx) {
-                       WARN(tail_req->elsp_submitted != 0,
-                               "More than 2 already-submitted reqs queued\n");
-                       list_del(&tail_req->execlist_link);
-                       i915_gem_request_unreference(tail_req);
-               }
-       }
+       spin_lock_irqsave(&engine->execlist_lock, flags);
 
-       i915_gem_request_reference(request);
        list_add_tail(&request->execlist_link, &engine->execlist_queue);
-       request->ctx_hw_id = request->ctx->hw_id;
-       if (num_elements == 0)
-               execlists_context_unqueue(engine);
+       if (execlists_elsp_idle(engine))
+               tasklet_hi_schedule(&engine->irq_tasklet);
 
-       spin_unlock_bh(&engine->execlist_lock);
-}
-
-static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       uint32_t flush_domains;
-       int ret;
-
-       flush_domains = 0;
-       if (engine->gpu_caches_dirty)
-               flush_domains = I915_GEM_GPU_DOMAINS;
-
-       ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-       if (ret)
-               return ret;
-
-       engine->gpu_caches_dirty = false;
-       return 0;
-}
-
-static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
-                                struct list_head *vmas)
-{
-       const unsigned other_rings = ~intel_engine_flag(req->engine);
-       struct i915_vma *vma;
-       uint32_t flush_domains = 0;
-       bool flush_chipset = false;
-       int ret;
-
-       list_for_each_entry(vma, vmas, exec_list) {
-               struct drm_i915_gem_object *obj = vma->obj;
-
-               if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, req->engine, &req);
-                       if (ret)
-                               return ret;
-               }
-
-               if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
-                       flush_chipset |= i915_gem_clflush_object(obj, false);
-
-               flush_domains |= obj->base.write_domain;
-       }
-
-       if (flush_domains & I915_GEM_DOMAIN_GTT)
-               wmb();
-
-       /* Unconditionally invalidate gpu caches and ensure that we do flush
-        * any residual writes from the previous batch.
-        */
-       return logical_ring_invalidate_all_caches(req);
+       spin_unlock_irqrestore(&engine->execlist_lock, flags);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -717,7 +619,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
                        return ret;
        }
 
-       request->ringbuf = ce->ringbuf;
+       request->ring = ce->ring;
 
        if (i915.enable_guc_submission) {
                /*
@@ -725,7 +627,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
                 * going any further, as the i915_add_request() call
                 * later on mustn't fail ...
                 */
-               ret = i915_guc_wq_check_space(request);
+               ret = i915_guc_wq_reserve(request);
                if (ret)
                        return ret;
        }
@@ -762,7 +664,7 @@ err_unpin:
 }
 
 /*
- * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * intel_logical_ring_advance() - advance the tail and prepare for submission
  * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
@@ -771,13 +673,13 @@ err_unpin:
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static int
-intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
+intel_logical_ring_advance(struct drm_i915_gem_request *request)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
+       struct intel_ring *ring = request->ring;
        struct intel_engine_cs *engine = request->engine;
 
-       intel_logical_ring_advance(ringbuf);
-       request->tail = ringbuf->tail;
+       intel_ring_advance(ring);
+       request->tail = ring->tail;
 
        /*
         * Here we add two extra NOOPs as padding to avoid
@@ -785,9 +687,10 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
         *
         * Caller must reserve WA_TAIL_DWORDS for us!
         */
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+       request->wa_tail = ring->tail;
 
        /* We keep the previous context alive until we retire the following
         * request. This ensures that any the context object is still pinned
@@ -797,165 +700,12 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
         */
        request->previous_context = engine->last_context;
        engine->last_context = request->ctx;
-
-       if (i915.enable_guc_submission)
-               i915_guc_submit(request);
-       else
-               execlists_context_queue(request);
-
-       return 0;
-}
-
-/**
- * execlists_submission() - submit a batchbuffer for execution, Execlists style
- * @params: execbuffer call parameters.
- * @args: execbuffer call arguments.
- * @vmas: list of vmas.
- *
- * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
- * away the submission details of the execbuffer ioctl call.
- *
- * Return: non-zero if the submission fails.
- */
-int intel_execlists_submission(struct i915_execbuffer_params *params,
-                              struct drm_i915_gem_execbuffer2 *args,
-                              struct list_head *vmas)
-{
-       struct drm_device       *dev = params->dev;
-       struct intel_engine_cs *engine = params->engine;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
-       u64 exec_start;
-       int instp_mode;
-       u32 instp_mask;
-       int ret;
-
-       instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-       instp_mask = I915_EXEC_CONSTANTS_MASK;
-       switch (instp_mode) {
-       case I915_EXEC_CONSTANTS_REL_GENERAL:
-       case I915_EXEC_CONSTANTS_ABSOLUTE:
-       case I915_EXEC_CONSTANTS_REL_SURFACE:
-               if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
-                       DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
-                       return -EINVAL;
-               }
-
-               if (instp_mode != dev_priv->relative_constants_mode) {
-                       if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
-                               DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
-                               return -EINVAL;
-                       }
-
-                       /* The HW changed the meaning on this bit on gen6 */
-                       instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
-               }
-               break;
-       default:
-               DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
-               return -EINVAL;
-       }
-
-       if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-               DRM_DEBUG("sol reset is gen7 only\n");
-               return -EINVAL;
-       }
-
-       ret = execlists_move_to_gpu(params->request, vmas);
-       if (ret)
-               return ret;
-
-       if (engine == &dev_priv->engine[RCS] &&
-           instp_mode != dev_priv->relative_constants_mode) {
-               ret = intel_ring_begin(params->request, 4);
-               if (ret)
-                       return ret;
-
-               intel_logical_ring_emit(ringbuf, MI_NOOP);
-               intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-               intel_logical_ring_emit_reg(ringbuf, INSTPM);
-               intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-               intel_logical_ring_advance(ringbuf);
-
-               dev_priv->relative_constants_mode = instp_mode;
-       }
-
-       exec_start = params->batch_obj_vm_offset +
-                    args->batch_start_offset;
-
-       ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
-       if (ret)
-               return ret;
-
-       trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
-
-       i915_gem_execbuffer_move_to_active(vmas, params->request);
-
-       return 0;
-}
-
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
-{
-       struct drm_i915_gem_request *req, *tmp;
-       LIST_HEAD(cancel_list);
-
-       WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
-
-       spin_lock_bh(&engine->execlist_lock);
-       list_replace_init(&engine->execlist_queue, &cancel_list);
-       spin_unlock_bh(&engine->execlist_lock);
-
-       list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
-               list_del(&req->execlist_link);
-               i915_gem_request_unreference(req);
-       }
-}
-
-void intel_logical_ring_stop(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       if (!intel_engine_initialized(engine))
-               return;
-
-       ret = intel_engine_idle(engine);
-       if (ret)
-               DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-                         engine->name, ret);
-
-       /* TODO: Is this correct with Execlists enabled? */
-       I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-       if (intel_wait_for_register(dev_priv,
-                                   RING_MI_MODE(engine->mmio_base),
-                                   MODE_IDLE, MODE_IDLE,
-                                   1000)) {
-               DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
-               return;
-       }
-       I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
-}
-
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       int ret;
-
-       if (!engine->gpu_caches_dirty)
-               return 0;
-
-       ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
-       if (ret)
-               return ret;
-
-       engine->gpu_caches_dirty = false;
        return 0;
 }
 
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
                                struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ctx->i915;
        struct intel_context *ce = &ctx->engine[engine->id];
        void *vaddr;
        u32 *lrc_reg_state;
@@ -966,41 +716,43 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
        if (ce->pin_count++)
                return 0;
 
-       ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
-                                   PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+       ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
+                          PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
        if (ret)
                goto err;
 
-       vaddr = i915_gem_object_pin_map(ce->state);
+       vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
                ret = PTR_ERR(vaddr);
-               goto unpin_ctx_obj;
+               goto unpin_vma;
        }
 
        lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-       ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
+       ret = intel_ring_pin(ce->ring);
        if (ret)
                goto unpin_map;
 
-       i915_gem_context_reference(ctx);
-       ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
        intel_lr_context_descriptor_update(ctx, engine);
 
-       lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
+       lrc_reg_state[CTX_RING_BUFFER_START+1] =
+               i915_ggtt_offset(ce->ring->vma);
        ce->lrc_reg_state = lrc_reg_state;
-       ce->state->dirty = true;
+       ce->state->obj->dirty = true;
 
        /* Invalidate GuC TLB. */
-       if (i915.enable_guc_submission)
+       if (i915.enable_guc_submission) {
+               struct drm_i915_private *dev_priv = ctx->i915;
                I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+       }
 
+       i915_gem_context_get(ctx);
        return 0;
 
 unpin_map:
-       i915_gem_object_unpin_map(ce->state);
-unpin_ctx_obj:
-       i915_gem_object_ggtt_unpin(ce->state);
+       i915_gem_object_unpin_map(ce->state->obj);
+unpin_vma:
+       __i915_vma_unpin(ce->state);
 err:
        ce->pin_count = 0;
        return ret;
@@ -1017,30 +769,24 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
        if (--ce->pin_count)
                return;
 
-       intel_unpin_ringbuffer_obj(ce->ringbuf);
-
-       i915_gem_object_unpin_map(ce->state);
-       i915_gem_object_ggtt_unpin(ce->state);
+       intel_ring_unpin(ce->ring);
 
-       ce->lrc_vma = NULL;
-       ce->lrc_desc = 0;
-       ce->lrc_reg_state = NULL;
+       i915_gem_object_unpin_map(ce->state->obj);
+       i915_vma_unpin(ce->state);
 
-       i915_gem_context_unreference(ctx);
+       i915_gem_context_put(ctx);
 }
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
        int ret, i;
-       struct intel_engine_cs *engine = req->engine;
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ring *ring = req->ring;
        struct i915_workarounds *w = &req->i915->workarounds;
 
        if (w->count == 0)
                return 0;
 
-       engine->gpu_caches_dirty = true;
-       ret = logical_ring_flush_all_caches(req);
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
        if (ret)
                return ret;
 
@@ -1048,17 +794,16 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
        for (i = 0; i < w->count; i++) {
-               intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
-               intel_logical_ring_emit(ringbuf, w->reg[i].value);
+               intel_ring_emit_reg(ring, w->reg[i].addr);
+               intel_ring_emit(ring, w->reg[i].value);
        }
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_emit(ring, MI_NOOP);
 
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_advance(ring);
 
-       engine->gpu_caches_dirty = true;
-       ret = logical_ring_flush_all_caches(req);
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
        if (ret)
                return ret;
 
@@ -1094,7 +839,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
  * code duplication.
  */
 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
-                                               uint32_t *const batch,
+                                               uint32_t *batch,
                                                uint32_t index)
 {
        struct drm_i915_private *dev_priv = engine->i915;
@@ -1113,7 +858,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-       wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
+       wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
        wa_ctx_emit(batch, index, 0);
 
        wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1131,7 +876,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
        wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-       wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
+       wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
        wa_ctx_emit(batch, index, 0);
 
        return index;
@@ -1156,37 +901,24 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
        return 0;
 }
 
-/**
- * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
- *
- * @engine: only applicable for RCS
- * @wa_ctx: structure representing wa_ctx
- *  offset: specifies start of the batch, should be cache-aligned. This is updated
- *    with the offset value received as input.
- *  size: size of the batch in DWORDS but HW expects in terms of cachelines
- * @batch: page in which WA are loaded
- * @offset: This field specifies the start of the batch, it should be
- *  cache-aligned otherwise it is adjusted accordingly.
- *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
- *  initialized at the beginning and shared across all contexts but this field
- *  helps us to have multiple batches at different offsets and select them based
- *  on a criteria. At the moment this batch always start at the beginning of the page
- *  and at this point we don't have multiple wa_ctx batch buffers.
- *
- *  The number of WA applied are not known at the beginning; we use this field
- *  to return the no of DWORDS written.
+/*
+ * Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ * initialized at the beginning and shared across all contexts but this field
+ * helps us to have multiple batches at different offsets and select them based
+ * on a criteria. At the moment this batch always start at the beginning of the page
+ * and at this point we don't have multiple wa_ctx batch buffers.
  *
- *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
- *  so it adds NOOPs as padding to make it cacheline aligned.
- *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
- *  makes a complete batch buffer.
+ * The number of WA applied are not known at the beginning; we use this field
+ * to return the no of DWORDS written.
  *
- * Return: non-zero if we exceed the PAGE_SIZE limit.
+ * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ * so it adds NOOPs as padding to make it cacheline aligned.
+ * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ * makes a complete batch buffer.
  */
-
 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
                                    struct i915_wa_ctx_bb *wa_ctx,
-                                   uint32_t *const batch,
+                                   uint32_t *batch,
                                    uint32_t *offset)
 {
        uint32_t scratch_addr;
@@ -1205,7 +937,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
 
        /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
        /* Actual scratch location is at 128 bytes offset */
-       scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+       scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 
        wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
        wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1230,26 +962,18 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
        return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
 }
 
-/**
- * gen8_init_perctx_bb() - initialize per ctx batch with WA
- *
- * @engine: only applicable for RCS
- * @wa_ctx: structure representing wa_ctx
- *  offset: specifies start of the batch, should be cache-aligned.
- *  size: size of the batch in DWORDS but HW expects in terms of cachelines
- * @batch: page in which WA are loaded
- * @offset: This field specifies the start of this batch.
- *   This batch is started immediately after indirect_ctx batch. Since we ensure
- *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
+/*
+ *  This batch is started immediately after indirect_ctx batch. Since we ensure
+ *  that indirect_ctx ends on a cacheline this batch is aligned automatically.
  *
- *   The number of DWORDS written are returned using this field.
+ *  The number of DWORDS written are returned using this field.
  *
  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
  */
 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
                               struct i915_wa_ctx_bb *wa_ctx,
-                              uint32_t *const batch,
+                              uint32_t *batch,
                               uint32_t *offset)
 {
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
@@ -1264,7 +988,7 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
 
 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                                    struct i915_wa_ctx_bb *wa_ctx,
-                                   uint32_t *const batch,
+                                   uint32_t *batch,
                                    uint32_t *offset)
 {
        int ret;
@@ -1282,11 +1006,18 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                return ret;
        index = ret;
 
+       /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
+       wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+       wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
+       wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
+                           GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
+       wa_ctx_emit(batch, index, MI_NOOP);
+
        /* WaClearSlmSpaceAtContextSwitch:kbl */
        /* Actual scratch location is at 128 bytes offset */
        if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
-               uint32_t scratch_addr
-                       = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+               u32 scratch_addr =
+                       i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 
                wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
                wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1332,7 +1063,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
 
 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
                               struct i915_wa_ctx_bb *wa_ctx,
-                              uint32_t *const batch,
+                              uint32_t *batch,
                               uint32_t *offset)
 {
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
@@ -1378,44 +1109,44 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 
 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 {
-       int ret;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int err;
 
-       engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
-                                                   PAGE_ALIGN(size));
-       if (IS_ERR(engine->wa_ctx.obj)) {
-               DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
-               ret = PTR_ERR(engine->wa_ctx.obj);
-               engine->wa_ctx.obj = NULL;
-               return ret;
-       }
+       obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
-       ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
-       if (ret) {
-               DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
-                                ret);
-               drm_gem_object_unreference(&engine->wa_ctx.obj->base);
-               return ret;
+       vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err;
        }
 
+       err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
+       if (err)
+               goto err;
+
+       engine->wa_ctx.vma = vma;
        return 0;
+
+err:
+       i915_gem_object_put(obj);
+       return err;
 }
 
 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
 {
-       if (engine->wa_ctx.obj) {
-               i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
-               drm_gem_object_unreference(&engine->wa_ctx.obj->base);
-               engine->wa_ctx.obj = NULL;
-       }
+       i915_vma_unpin_and_release(&engine->wa_ctx.vma);
 }
 
 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 {
-       int ret;
+       struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
        uint32_t *batch;
        uint32_t offset;
        struct page *page;
-       struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+       int ret;
 
        WARN_ON(engine->id != RCS);
 
@@ -1427,7 +1158,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
        }
 
        /* some WA perform writes to scratch page, ensure it is valid */
-       if (engine->scratch.obj == NULL) {
+       if (!engine->scratch) {
                DRM_ERROR("scratch page not allocated for %s\n", engine->name);
                return -EINVAL;
        }
@@ -1438,7 +1169,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
                return ret;
        }
 
-       page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
+       page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
        batch = kmap_atomic(page);
        offset = 0;
 
@@ -1485,55 +1216,37 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
        struct drm_i915_private *dev_priv = engine->i915;
 
        I915_WRITE(RING_HWS_PGA(engine->mmio_base),
-                  (u32)engine->status_page.gfx_addr);
+                  engine->status_page.ggtt_offset);
        POSTING_READ(RING_HWS_PGA(engine->mmio_base));
 }
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned int next_context_status_buffer_hw;
+       int ret;
+
+       ret = intel_mocs_init_engine(engine);
+       if (ret)
+               return ret;
 
        lrc_init_hws(engine);
 
-       I915_WRITE_IMR(engine,
-                      ~(engine->irq_enable_mask | engine->irq_keep_mask));
+       intel_engine_reset_irq(engine);
+
        I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
 
        I915_WRITE(RING_MODE_GEN7(engine),
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
-       POSTING_READ(RING_MODE_GEN7(engine));
-
-       /*
-        * Instead of resetting the Context Status Buffer (CSB) read pointer to
-        * zero, we need to read the write pointer from hardware and use its
-        * value because "this register is power context save restored".
-        * Effectively, these states have been observed:
-        *
-        *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
-        * BDW  | CSB regs not reset       | CSB regs reset       |
-        * CHT  | CSB regs not reset       | CSB regs not reset   |
-        * SKL  |         ?                |         ?            |
-        * BXT  |         ?                |         ?            |
-        */
-       next_context_status_buffer_hw =
-               GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
-
-       /*
-        * When the CSB registers are reset (also after power-up / gpu reset),
-        * CSB write pointer is set to all 1's, which is not valid, use '5' in
-        * this special case, so the first element read is CSB[0].
-        */
-       if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
-               next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
 
-       engine->next_context_status_buffer = next_context_status_buffer_hw;
        DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
        intel_engine_init_hangcheck(engine);
 
-       return intel_mocs_init_engine(engine);
+       if (!execlists_elsp_idle(engine))
+               execlists_submit_ports(engine);
+
+       return 0;
 }
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
@@ -1569,11 +1282,41 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
        return init_workarounds_ring(engine);
 }
 
+static void reset_common_ring(struct intel_engine_cs *engine,
+                             struct drm_i915_gem_request *request)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct execlist_port *port = engine->execlist_port;
+       struct intel_context *ce = &request->ctx->engine[engine->id];
+
+       /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
+       ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+       request->ring->head = request->postfix;
+       request->ring->last_retired_head = -1;
+       intel_ring_update_space(request->ring);
+
+       if (i915.enable_guc_submission)
+               return;
+
+       /* Catch up with any missed context-switch interrupts */
+       I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0));
+       if (request->ctx != port[0].request->ctx) {
+               i915_gem_request_put(port[0].request);
+               port[0] = port[1];
+               memset(&port[1], 0, sizeof(port[1]));
+       }
+
+       /* CS is stopped, and we will resubmit both ports on resume */
+       GEM_BUG_ON(request->ctx != port[0].request->ctx);
+       port[0].count = 0;
+       port[1].count = 0;
+}
+
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
        struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+       struct intel_ring *ring = req->ring;
        struct intel_engine_cs *engine = req->engine;
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
        const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
        int i, ret;
 
@@ -1581,28 +1324,27 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
        for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
                const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-               intel_logical_ring_emit_reg(ringbuf,
-                                           GEN8_RING_PDP_UDW(engine, i));
-               intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-               intel_logical_ring_emit_reg(ringbuf,
-                                           GEN8_RING_PDP_LDW(engine, i));
-               intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+               intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
+               intel_ring_emit(ring, upper_32_bits(pd_daddr));
+               intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
+               intel_ring_emit(ring, lower_32_bits(pd_daddr));
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
-                             u64 offset, unsigned dispatch_flags)
+                             u64 offset, u32 len,
+                             unsigned int dispatch_flags)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ring *ring = req->ring;
        bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
 
@@ -1629,14 +1371,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
                return ret;
 
        /* FIXME(BDW): Address space and security selectors. */
-       intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
-                               (ppgtt<<8) |
-                               (dispatch_flags & I915_DISPATCH_RS ?
-                                MI_BATCH_RESOURCE_STREAMER : 0));
-       intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
-       intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
+                       (ppgtt<<8) |
+                       (dispatch_flags & I915_DISPATCH_RS ?
+                        MI_BATCH_RESOURCE_STREAMER : 0));
+       intel_ring_emit(ring, lower_32_bits(offset));
+       intel_ring_emit(ring, upper_32_bits(offset));
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -1655,14 +1397,10 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
        I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
-static int gen8_emit_flush(struct drm_i915_gem_request *request,
-                          u32 invalidate_domains,
-                          u32 unused)
+static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct intel_engine_cs *engine = ringbuf->engine;
-       struct drm_i915_private *dev_priv = request->i915;
-       uint32_t cmd;
+       struct intel_ring *ring = request->ring;
+       u32 cmd;
        int ret;
 
        ret = intel_ring_begin(request, 4);
@@ -1678,30 +1416,30 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
         */
        cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 
-       if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
+       if (mode & EMIT_INVALIDATE) {
                cmd |= MI_INVALIDATE_TLB;
-               if (engine == &dev_priv->engine[VCS])
+               if (request->engine->id == VCS)
                        cmd |= MI_INVALIDATE_BSD;
        }
 
-       intel_logical_ring_emit(ringbuf, cmd);
-       intel_logical_ring_emit(ringbuf,
-                               I915_GEM_HWS_SCRATCH_ADDR |
-                               MI_FLUSH_DW_USE_GTT);
-       intel_logical_ring_emit(ringbuf, 0); /* upper addr */
-       intel_logical_ring_emit(ringbuf, 0); /* value */
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ring, cmd);
+       intel_ring_emit(ring,
+                       I915_GEM_HWS_SCRATCH_ADDR |
+                       MI_FLUSH_DW_USE_GTT);
+       intel_ring_emit(ring, 0); /* upper addr */
+       intel_ring_emit(ring, 0); /* value */
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
-                                 u32 invalidate_domains,
-                                 u32 flush_domains)
+                                 u32 mode)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct intel_engine_cs *engine = ringbuf->engine;
-       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       struct intel_ring *ring = request->ring;
+       struct intel_engine_cs *engine = request->engine;
+       u32 scratch_addr =
+               i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
        bool vf_flush_wa = false, dc_flush_wa = false;
        u32 flags = 0;
        int ret;
@@ -1709,14 +1447,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 
        flags |= PIPE_CONTROL_CS_STALL;
 
-       if (flush_domains) {
+       if (mode & EMIT_FLUSH) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
 
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -1751,40 +1489,40 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                return ret;
 
        if (vf_flush_wa) {
-               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
+               intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
        }
 
        if (dc_flush_wa) {
-               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-               intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
+               intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+               intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
        }
 
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-       intel_logical_ring_emit(ringbuf, flags);
-       intel_logical_ring_emit(ringbuf, scratch_addr);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, 0);
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(ring, flags);
+       intel_ring_emit(ring, scratch_addr);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, 0);
 
        if (dc_flush_wa) {
-               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-               intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
+               intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+               intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
        }
 
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -1813,7 +1551,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
+       struct intel_ring *ring = request->ring;
        int ret;
 
        ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1823,21 +1561,20 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
        /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
        BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-       intel_logical_ring_emit(ringbuf,
-                               (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-       intel_logical_ring_emit(ringbuf,
-                               intel_hws_seqno_address(request->engine) |
-                               MI_FLUSH_DW_USE_GTT);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, request->seqno);
-       intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       return intel_logical_ring_advance_and_submit(request);
+       intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+       intel_ring_emit(ring,
+                       intel_hws_seqno_address(request->engine) |
+                       MI_FLUSH_DW_USE_GTT);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, request->fence.seqno);
+       intel_ring_emit(ring, MI_USER_INTERRUPT);
+       intel_ring_emit(ring, MI_NOOP);
+       return intel_logical_ring_advance(request);
 }
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
+       struct intel_ring *ring = request->ring;
        int ret;
 
        ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
@@ -1851,50 +1588,19 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-       intel_logical_ring_emit(ringbuf,
-                               (PIPE_CONTROL_GLOBAL_GTT_IVB |
-                                PIPE_CONTROL_CS_STALL |
-                                PIPE_CONTROL_QW_WRITE));
-       intel_logical_ring_emit(ringbuf,
-                               intel_hws_seqno_address(request->engine));
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(ring,
+                       (PIPE_CONTROL_GLOBAL_GTT_IVB |
+                        PIPE_CONTROL_CS_STALL |
+                        PIPE_CONTROL_QW_WRITE));
+       intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, i915_gem_request_get_seqno(request));
        /* We're thrashing one dword of HWS. */
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       return intel_logical_ring_advance_and_submit(request);
-}
-
-static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
-{
-       struct render_state so;
-       int ret;
-
-       ret = i915_gem_render_state_prepare(req->engine, &so);
-       if (ret)
-               return ret;
-
-       if (so.rodata == NULL)
-               return 0;
-
-       ret = req->engine->emit_bb_start(req, so.ggtt_offset,
-                                      I915_DISPATCH_SECURE);
-       if (ret)
-               goto out;
-
-       ret = req->engine->emit_bb_start(req,
-                                      (so.ggtt_offset + so.aux_batch_offset),
-                                      I915_DISPATCH_SECURE);
-       if (ret)
-               goto out;
-
-       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
-out:
-       i915_gem_render_state_fini(&so);
-       return ret;
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_USER_INTERRUPT);
+       intel_ring_emit(ring, MI_NOOP);
+       return intel_logical_ring_advance(request);
 }
 
 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
@@ -1913,14 +1619,12 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
        if (ret)
                DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
-       return intel_lr_context_render_state_init(req);
+       return i915_gem_render_state_init(req);
 }
 
 /**
  * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
- *
  * @engine: Engine Command Streamer.
- *
  */
 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 {
@@ -1939,39 +1643,42 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        dev_priv = engine->i915;
 
        if (engine->buffer) {
-               intel_logical_ring_stop(engine);
                WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
        }
 
        if (engine->cleanup)
                engine->cleanup(engine);
 
-       i915_cmd_parser_fini_ring(engine);
-       i915_gem_batch_pool_fini(&engine->batch_pool);
-
-       intel_engine_fini_breadcrumbs(engine);
+       intel_engine_cleanup_common(engine);
 
-       if (engine->status_page.obj) {
-               i915_gem_object_unpin_map(engine->status_page.obj);
-               engine->status_page.obj = NULL;
+       if (engine->status_page.vma) {
+               i915_gem_object_unpin_map(engine->status_page.vma->obj);
+               engine->status_page.vma = NULL;
        }
        intel_lr_context_unpin(dev_priv->kernel_context, engine);
 
-       engine->idle_lite_restore_wa = 0;
-       engine->disable_lite_restore_wa = false;
-       engine->ctx_desc_template = 0;
-
        lrc_destroy_wa_ctx_obj(engine);
        engine->i915 = NULL;
 }
 
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+
+       for_each_engine(engine, dev_priv)
+               engine->submit_request = execlists_submit_request;
+}
+
 static void
 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
        /* Default vfuncs which can be overriden by each engine. */
        engine->init_hw = gen8_init_common_ring;
-       engine->emit_request = gen8_emit_request;
+       engine->reset_hw = reset_common_ring;
        engine->emit_flush = gen8_emit_flush;
+       engine->emit_request = gen8_emit_request;
+       engine->submit_request = execlists_submit_request;
+
        engine->irq_enable = gen8_logical_ring_enable_irq;
        engine->irq_disable = gen8_logical_ring_disable_irq;
        engine->emit_bb_start = gen8_emit_bb_start;
@@ -1980,41 +1687,71 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 }
 
 static inline void
-logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
+logical_ring_default_irqs(struct intel_engine_cs *engine)
 {
+       unsigned shift = engine->irq_shift;
        engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
        engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 }
 
 static int
-lrc_setup_hws(struct intel_engine_cs *engine,
-             struct drm_i915_gem_object *dctx_obj)
+lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
 {
+       const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
        void *hws;
 
        /* The HWSP is part of the default context object in LRC mode. */
-       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
-                                      LRC_PPHWSP_PN * PAGE_SIZE;
-       hws = i915_gem_object_pin_map(dctx_obj);
+       hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
        if (IS_ERR(hws))
                return PTR_ERR(hws);
-       engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
-       engine->status_page.obj = dctx_obj;
+
+       engine->status_page.page_addr = hws + hws_offset;
+       engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
+       engine->status_page.vma = vma;
 
        return 0;
 }
 
+static void
+logical_ring_setup(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       enum forcewake_domains fw_domains;
+
+       intel_engine_setup_common(engine);
+
+       /* Intentionally left blank. */
+       engine->buffer = NULL;
+
+       fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+                                                   RING_ELSP(engine),
+                                                   FW_REG_WRITE);
+
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_PTR(engine),
+                                                    FW_REG_READ | FW_REG_WRITE);
+
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_BUF_BASE(engine),
+                                                    FW_REG_READ);
+
+       engine->fw_domains = fw_domains;
+
+       tasklet_init(&engine->irq_tasklet,
+                    intel_lrc_irq_handler, (unsigned long)engine);
+
+       logical_ring_init_platform_invariants(engine);
+       logical_ring_default_vfuncs(engine);
+       logical_ring_default_irqs(engine);
+}
+
 static int
 logical_ring_init(struct intel_engine_cs *engine)
 {
        struct i915_gem_context *dctx = engine->i915->kernel_context;
        int ret;
 
-       ret = intel_engine_init_breadcrumbs(engine);
-       if (ret)
-               goto error;
-
-       ret = i915_cmd_parser_init_ring(engine);
+       ret = intel_engine_init_common(engine);
        if (ret)
                goto error;
 
@@ -2044,11 +1781,13 @@ error:
        return ret;
 }
 
-static int logical_render_ring_init(struct intel_engine_cs *engine)
+int logical_render_ring_init(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
+       logical_ring_setup(engine);
+
        if (HAS_L3_DPF(dev_priv))
                engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
@@ -2058,11 +1797,10 @@ static int logical_render_ring_init(struct intel_engine_cs *engine)
        else
                engine->init_hw = gen8_init_render_ring;
        engine->init_context = gen8_init_rcs_context;
-       engine->cleanup = intel_fini_pipe_control;
        engine->emit_flush = gen8_emit_flush_render;
        engine->emit_request = gen8_emit_request_render;
 
-       ret = intel_init_pipe_control(engine, 4096);
+       ret = intel_engine_create_scratch(engine, 4096);
        if (ret)
                return ret;
 
@@ -2085,160 +1823,11 @@ static int logical_render_ring_init(struct intel_engine_cs *engine)
        return ret;
 }
 
-static const struct logical_ring_info {
-       const char *name;
-       unsigned exec_id;
-       unsigned guc_id;
-       u32 mmio_base;
-       unsigned irq_shift;
-       int (*init)(struct intel_engine_cs *engine);
-} logical_rings[] = {
-       [RCS] = {
-               .name = "render ring",
-               .exec_id = I915_EXEC_RENDER,
-               .guc_id = GUC_RENDER_ENGINE,
-               .mmio_base = RENDER_RING_BASE,
-               .irq_shift = GEN8_RCS_IRQ_SHIFT,
-               .init = logical_render_ring_init,
-       },
-       [BCS] = {
-               .name = "blitter ring",
-               .exec_id = I915_EXEC_BLT,
-               .guc_id = GUC_BLITTER_ENGINE,
-               .mmio_base = BLT_RING_BASE,
-               .irq_shift = GEN8_BCS_IRQ_SHIFT,
-               .init = logical_ring_init,
-       },
-       [VCS] = {
-               .name = "bsd ring",
-               .exec_id = I915_EXEC_BSD,
-               .guc_id = GUC_VIDEO_ENGINE,
-               .mmio_base = GEN6_BSD_RING_BASE,
-               .irq_shift = GEN8_VCS1_IRQ_SHIFT,
-               .init = logical_ring_init,
-       },
-       [VCS2] = {
-               .name = "bsd2 ring",
-               .exec_id = I915_EXEC_BSD,
-               .guc_id = GUC_VIDEO_ENGINE2,
-               .mmio_base = GEN8_BSD2_RING_BASE,
-               .irq_shift = GEN8_VCS2_IRQ_SHIFT,
-               .init = logical_ring_init,
-       },
-       [VECS] = {
-               .name = "video enhancement ring",
-               .exec_id = I915_EXEC_VEBOX,
-               .guc_id = GUC_VIDEOENHANCE_ENGINE,
-               .mmio_base = VEBOX_RING_BASE,
-               .irq_shift = GEN8_VECS_IRQ_SHIFT,
-               .init = logical_ring_init,
-       },
-};
-
-static struct intel_engine_cs *
-logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
+int logical_xcs_ring_init(struct intel_engine_cs *engine)
 {
-       const struct logical_ring_info *info = &logical_rings[id];
-       struct intel_engine_cs *engine = &dev_priv->engine[id];
-       enum forcewake_domains fw_domains;
-
-       engine->id = id;
-       engine->name = info->name;
-       engine->exec_id = info->exec_id;
-       engine->guc_id = info->guc_id;
-       engine->mmio_base = info->mmio_base;
-
-       engine->i915 = dev_priv;
+       logical_ring_setup(engine);
 
-       /* Intentionally left blank. */
-       engine->buffer = NULL;
-
-       fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
-                                                   RING_ELSP(engine),
-                                                   FW_REG_WRITE);
-
-       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-                                                    RING_CONTEXT_STATUS_PTR(engine),
-                                                    FW_REG_READ | FW_REG_WRITE);
-
-       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-                                                    RING_CONTEXT_STATUS_BUF_BASE(engine),
-                                                    FW_REG_READ);
-
-       engine->fw_domains = fw_domains;
-
-       INIT_LIST_HEAD(&engine->active_list);
-       INIT_LIST_HEAD(&engine->request_list);
-       INIT_LIST_HEAD(&engine->buffers);
-       INIT_LIST_HEAD(&engine->execlist_queue);
-       spin_lock_init(&engine->execlist_lock);
-
-       tasklet_init(&engine->irq_tasklet,
-                    intel_lrc_irq_handler, (unsigned long)engine);
-
-       logical_ring_init_platform_invariants(engine);
-       logical_ring_default_vfuncs(engine);
-       logical_ring_default_irqs(engine, info->irq_shift);
-
-       intel_engine_init_hangcheck(engine);
-       i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
-
-       return engine;
-}
-
-/**
- * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
- * @dev: DRM device.
- *
- * This function inits the engines for an Execlists submission style (the
- * equivalent in the legacy ringbuffer submission world would be
- * i915_gem_init_engines). It does it only for those engines that are present in
- * the hardware.
- *
- * Return: non-zero if the initialization failed.
- */
-int intel_logical_rings_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       unsigned int mask = 0;
-       unsigned int i;
-       int ret;
-
-       WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
-               GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
-
-       for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
-               if (!HAS_ENGINE(dev_priv, i))
-                       continue;
-
-               if (!logical_rings[i].init)
-                       continue;
-
-               ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
-               if (ret)
-                       goto cleanup;
-
-               mask |= ENGINE_MASK(i);
-       }
-
-       /*
-        * Catch failures to update logical_rings table when the new engines
-        * are added to the driver by a warning and disabling the forgotten
-        * engines.
-        */
-       if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
-               struct intel_device_info *info =
-                       (struct intel_device_info *)&dev_priv->info;
-               info->ring_mask = mask;
-       }
-
-       return 0;
-
-cleanup:
-       for (i = 0; i < I915_NUM_ENGINES; i++)
-               intel_logical_ring_cleanup(&dev_priv->engine[i]);
-
-       return ret;
+       return logical_ring_init(engine);
 }
 
 static u32
@@ -2259,24 +1848,24 @@ make_rpcs(struct drm_i915_private *dev_priv)
         * must make an explicit request through RPCS for full
         * enablement.
        */
-       if (INTEL_INFO(dev_priv)->has_slice_pg) {
+       if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
                rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-               rpcs |= INTEL_INFO(dev_priv)->slice_total <<
+               rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
                        GEN8_RPCS_S_CNT_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
-       if (INTEL_INFO(dev_priv)->has_subslice_pg) {
+       if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
                rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-               rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
+               rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
                        GEN8_RPCS_SS_CNT_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
-       if (INTEL_INFO(dev_priv)->has_eu_pg) {
-               rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+       if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+               rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
                        GEN8_RPCS_EU_MIN_SHIFT;
-               rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+               rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
                        GEN8_RPCS_EU_MAX_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
@@ -2309,7 +1898,7 @@ static int
 populate_lr_context(struct i915_gem_context *ctx,
                    struct drm_i915_gem_object *ctx_obj,
                    struct intel_engine_cs *engine,
-                   struct intel_ringbuffer *ringbuf)
+                   struct intel_ring *ring)
 {
        struct drm_i915_private *dev_priv = ctx->i915;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
@@ -2326,7 +1915,7 @@ populate_lr_context(struct i915_gem_context *ctx,
                return ret;
        }
 
-       vaddr = i915_gem_object_pin_map(ctx_obj);
+       vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
                ret = PTR_ERR(vaddr);
                DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
@@ -2362,7 +1951,7 @@ populate_lr_context(struct i915_gem_context *ctx,
                       RING_START(engine->mmio_base), 0);
        ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
                       RING_CTL(engine->mmio_base),
-                      ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+                      ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
        ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
                       RING_BBADDR_UDW(engine->mmio_base), 0);
        ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2383,9 +1972,9 @@ populate_lr_context(struct i915_gem_context *ctx,
                               RING_INDIRECT_CTX(engine->mmio_base), 0);
                ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
                               RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
-               if (engine->wa_ctx.obj) {
+               if (engine->wa_ctx.vma) {
                        struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
-                       uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+                       u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
 
                        reg_state[CTX_RCS_INDIRECT_CTX+1] =
                                (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
@@ -2484,26 +2073,14 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
        return ret;
 }
 
-/**
- * execlists_context_deferred_alloc() - create the LRC specific bits of a context
- * @ctx: LR context to create.
- * @engine: engine to be used with the context.
- *
- * This function can be called more than once, with different engines, if we plan
- * to use the context with them. The context backing objects and the ringbuffers
- * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
- * the creation is a deferred call: it's better to make sure first that we need to use
- * a given ring with the context.
- *
- * Return: non-zero on error.
- */
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                                            struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_object *ctx_obj;
        struct intel_context *ce = &ctx->engine[engine->id];
+       struct i915_vma *vma;
        uint32_t context_size;
-       struct intel_ringbuffer *ringbuf;
+       struct intel_ring *ring;
        int ret;
 
        WARN_ON(ce->state);
@@ -2519,60 +2096,63 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                return PTR_ERR(ctx_obj);
        }
 
-       ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
-       if (IS_ERR(ringbuf)) {
-               ret = PTR_ERR(ringbuf);
+       vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto error_deref_obj;
+       }
+
+       ring = intel_engine_create_ring(engine, ctx->ring_size);
+       if (IS_ERR(ring)) {
+               ret = PTR_ERR(ring);
                goto error_deref_obj;
        }
 
-       ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
+       ret = populate_lr_context(ctx, ctx_obj, engine, ring);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
-               goto error_ringbuf;
+               goto error_ring_free;
        }
 
-       ce->ringbuf = ringbuf;
-       ce->state = ctx_obj;
+       ce->ring = ring;
+       ce->state = vma;
        ce->initialised = engine->init_context == NULL;
 
        return 0;
 
-error_ringbuf:
-       intel_ringbuffer_free(ringbuf);
+error_ring_free:
+       intel_ring_free(ring);
 error_deref_obj:
-       drm_gem_object_unreference(&ctx_obj->base);
-       ce->ringbuf = NULL;
-       ce->state = NULL;
+       i915_gem_object_put(ctx_obj);
        return ret;
 }
 
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-                           struct i915_gem_context *ctx)
+void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 {
+       struct i915_gem_context *ctx = dev_priv->kernel_context;
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv) {
                struct intel_context *ce = &ctx->engine[engine->id];
-               struct drm_i915_gem_object *ctx_obj = ce->state;
                void *vaddr;
                uint32_t *reg_state;
 
-               if (!ctx_obj)
+               if (!ce->state)
                        continue;
 
-               vaddr = i915_gem_object_pin_map(ctx_obj);
+               vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
                if (WARN_ON(IS_ERR(vaddr)))
                        continue;
 
                reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-               ctx_obj->dirty = true;
 
                reg_state[CTX_RING_HEAD+1] = 0;
                reg_state[CTX_RING_TAIL+1] = 0;
 
-               i915_gem_object_unpin_map(ctx_obj);
+               ce->state->obj->dirty = true;
+               i915_gem_object_unpin_map(ce->state->obj);
 
-               ce->ringbuf->head = 0;
-               ce->ringbuf->tail = 0;
+               ce->ring->head = 0;
+               ce->ring->tail = 0;
        }
 }
index 2b8255c19dccf59e1dfee6c6614673676c166cb1..4fed8165f98a221ea408099b75cf61988a1a850a 100644 (file)
 #define GEN8_LR_CONTEXT_ALIGN 4096
 
 /* Execlists regs */
-#define RING_ELSP(ring)                                _MMIO((ring)->mmio_base + 0x230)
-#define RING_EXECLIST_STATUS_LO(ring)          _MMIO((ring)->mmio_base + 0x234)
-#define RING_EXECLIST_STATUS_HI(ring)          _MMIO((ring)->mmio_base + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(ring)             _MMIO((ring)->mmio_base + 0x244)
+#define RING_ELSP(engine)                      _MMIO((engine)->mmio_base + 0x230)
+#define RING_EXECLIST_STATUS_LO(engine)                _MMIO((engine)->mmio_base + 0x234)
+#define RING_EXECLIST_STATUS_HI(engine)                _MMIO((engine)->mmio_base + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(engine)           _MMIO((engine)->mmio_base + 0x244)
 #define          CTX_CTRL_INHIBIT_SYN_CTX_SWITCH       (1 << 3)
 #define          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT   (1 << 0)
 #define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
-#define RING_CONTEXT_STATUS_BUF_BASE(ring)     _MMIO((ring)->mmio_base + 0x370)
-#define RING_CONTEXT_STATUS_BUF_LO(ring, i)    _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
-#define RING_CONTEXT_STATUS_BUF_HI(ring, i)    _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
-#define RING_CONTEXT_STATUS_PTR(ring)          _MMIO((ring)->mmio_base + 0x3a0)
+#define RING_CONTEXT_STATUS_BUF_BASE(engine)   _MMIO((engine)->mmio_base + 0x370)
+#define RING_CONTEXT_STATUS_BUF_LO(engine, i)  _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
+#define RING_CONTEXT_STATUS_BUF_HI(engine, i)  _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
+#define RING_CONTEXT_STATUS_PTR(engine)                _MMIO((engine)->mmio_base + 0x3a0)
 
 /* The docs specify that the write pointer wraps around after 5h, "After status
  * is written out to the last available status QW at offset 5h, this pointer
@@ -67,35 +67,10 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
 void intel_logical_ring_stop(struct intel_engine_cs *engine);
 void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-int intel_logical_rings_init(struct drm_device *dev);
+int logical_render_ring_init(struct intel_engine_cs *engine);
+int logical_xcs_ring_init(struct intel_engine_cs *engine);
 
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-/**
- * intel_logical_ring_advance() - advance the ringbuffer tail
- * @ringbuf: Ringbuffer to advance.
- *
- * The tail is only updated in our logical ringbuffer struct.
- */
-static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
-{
-       ringbuf->tail &= ringbuf->size - 1;
-}
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
-                                          u32 data)
-{
-       iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
-       ringbuf->tail += 4;
-}
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
-                                              i915_reg_t reg)
-{
-       intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
-}
+int intel_engines_init(struct drm_device *dev);
 
 /* Logical Ring Contexts */
 
@@ -112,19 +87,13 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 
 struct drm_i915_private;
 
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-                           struct i915_gem_context *ctx);
+void intel_lr_context_resume(struct drm_i915_private *dev_priv);
 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
                                     struct intel_engine_cs *engine);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
                                    int enable_execlists);
-struct i915_execbuffer_params;
-int intel_execlists_submission(struct i915_execbuffer_params *params,
-                              struct drm_i915_gem_execbuffer2 *args,
-                              struct list_head *vmas);
-
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
 
 #endif /* _INTEL_LRC_H_ */
index 49550470483ea6aaacfae338052fe1536ae09d1d..e1d47d51ea4784493fc707029c008673df7d0179 100644 (file)
@@ -48,6 +48,20 @@ struct intel_lvds_connector {
        struct notifier_block lid_notifier;
 };
 
+struct intel_lvds_pps {
+       /* 100us units */
+       int t1_t2;
+       int t3;
+       int t4;
+       int t5;
+       int tx;
+
+       int divider;
+
+       int port;
+       bool powerdown_on_reset;
+};
+
 struct intel_lvds_encoder {
        struct intel_encoder base;
 
@@ -55,6 +69,9 @@ struct intel_lvds_encoder {
        i915_reg_t reg;
        u32 a3_power;
 
+       struct intel_lvds_pps init_pps;
+       u32 init_lvds_val;
+
        struct intel_lvds_connector *attached_connector;
 };
 
@@ -136,28 +153,108 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
        pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
-static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+                                       struct intel_lvds_pps *pps)
+{
+       u32 val;
+
+       pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
+
+       val = I915_READ(PP_ON_DELAYS(0));
+       pps->port = (val & PANEL_PORT_SELECT_MASK) >>
+                   PANEL_PORT_SELECT_SHIFT;
+       pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >>
+                    PANEL_POWER_UP_DELAY_SHIFT;
+       pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >>
+                 PANEL_LIGHT_ON_DELAY_SHIFT;
+
+       val = I915_READ(PP_OFF_DELAYS(0));
+       pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >>
+                 PANEL_POWER_DOWN_DELAY_SHIFT;
+       pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >>
+                 PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+       val = I915_READ(PP_DIVISOR(0));
+       pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >>
+                      PP_REFERENCE_DIVIDER_SHIFT;
+       val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >>
+             PANEL_POWER_CYCLE_DELAY_SHIFT;
+       /*
+        * Remove the BSpec specified +1 (100ms) offset that accounts for a
+        * too short power-cycle delay due to the asynchronous programming of
+        * the register.
+        */
+       if (val)
+               val--;
+       /* Convert from 100ms to 100us units */
+       pps->t4 = val * 1000;
+
+       if (INTEL_INFO(dev_priv)->gen <= 4 &&
+           pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
+               DRM_DEBUG_KMS("Panel power timings uninitialized, "
+                             "setting defaults\n");
+               /* Set T2 to 40ms and T5 to 200ms in 100 usec units */
+               pps->t1_t2 = 40 * 10;
+               pps->t5 = 200 * 10;
+               /* Set T3 to 35ms and Tx to 200ms in 100 usec units */
+               pps->t3 = 35 * 10;
+               pps->tx = 200 * 10;
+       }
+
+       DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+                        "divider %d port %d powerdown_on_reset %d\n",
+                        pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+                        pps->divider, pps->port, pps->powerdown_on_reset);
+}
+
+static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+                                  struct intel_lvds_pps *pps)
+{
+       u32 val;
+
+       val = I915_READ(PP_CONTROL(0));
+       WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
+       if (pps->powerdown_on_reset)
+               val |= PANEL_POWER_RESET;
+       I915_WRITE(PP_CONTROL(0), val);
+
+       I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) |
+                                   (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) |
+                                   (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT));
+       I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) |
+                                    (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT));
+
+       val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT;
+       val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) <<
+              PANEL_POWER_CYCLE_DELAY_SHIFT;
+       I915_WRITE(PP_DIVISOR(0), val);
+}
+
+static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        int pipe = crtc->pipe;
        u32 temp;
 
-       if (HAS_PCH_SPLIT(dev)) {
+       if (HAS_PCH_SPLIT(dev_priv)) {
                assert_fdi_rx_pll_disabled(dev_priv, pipe);
                assert_shared_dpll_disabled(dev_priv,
-                                           crtc->config->shared_dpll);
+                                           pipe_config->shared_dpll);
        } else {
                assert_pll_disabled(dev_priv, pipe);
        }
 
-       temp = I915_READ(lvds_encoder->reg);
+       intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+
+       temp = lvds_encoder->init_lvds_val;
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
-       if (HAS_PCH_CPT(dev)) {
+       if (HAS_PCH_CPT(dev_priv)) {
                temp &= ~PORT_TRANS_SEL_MASK;
                temp |= PORT_TRANS_SEL_CPT(pipe);
        } else {
@@ -170,7 +267,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 
        /* set the corresponsding LVDS_BORDER bit */
        temp &= ~LVDS_BORDER_ENABLE;
-       temp |= crtc->config->gmch_pfit.lvds_border_bits;
+       temp |= pipe_config->gmch_pfit.lvds_border_bits;
        /* Set the B0-B3 data pairs corresponding to whether we're going to
         * set the DPLLs for dual-channel mode or not.
         */
@@ -193,7 +290,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
        if (IS_GEN4(dev_priv)) {
                /* Bspec wording suggests that LVDS port dithering only exists
                 * for 18bpp panels. */
-               if (crtc->config->dither && crtc->config->pipe_bpp == 18)
+               if (pipe_config->dither && pipe_config->pipe_bpp == 18)
                        temp |= LVDS_ENABLE_DITHER;
                else
                        temp &= ~LVDS_ENABLE_DITHER;
@@ -210,57 +307,45 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 /**
  * Sets the power state for the panel.
  */
-static void intel_enable_lvds(struct intel_encoder *encoder)
+static void intel_enable_lvds(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct intel_connector *intel_connector =
                &lvds_encoder->attached_connector->base;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       i915_reg_t ctl_reg, stat_reg;
-
-       if (HAS_PCH_SPLIT(dev)) {
-               ctl_reg = PCH_PP_CONTROL;
-               stat_reg = PCH_PP_STATUS;
-       } else {
-               ctl_reg = PP_CONTROL;
-               stat_reg = PP_STATUS;
-       }
 
        I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
 
-       I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+       I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
        POSTING_READ(lvds_encoder->reg);
-       if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
+       if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
                DRM_ERROR("timed out waiting for panel to power on\n");
 
        intel_panel_enable_backlight(intel_connector);
 }
 
-static void intel_disable_lvds(struct intel_encoder *encoder)
+static void intel_disable_lvds(struct intel_encoder *encoder,
+                              struct intel_crtc_state *old_crtc_state,
+                              struct drm_connector_state *old_conn_state)
 {
-       struct drm_device *dev = encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       i915_reg_t ctl_reg, stat_reg;
-
-       if (HAS_PCH_SPLIT(dev)) {
-               ctl_reg = PCH_PP_CONTROL;
-               stat_reg = PCH_PP_STATUS;
-       } else {
-               ctl_reg = PP_CONTROL;
-               stat_reg = PP_STATUS;
-       }
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
-       if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
+       I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
+       if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
                DRM_ERROR("timed out waiting for panel to power off\n");
 
        I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
        POSTING_READ(lvds_encoder->reg);
 }
 
-static void gmch_disable_lvds(struct intel_encoder *encoder)
+static void gmch_disable_lvds(struct intel_encoder *encoder,
+                             struct intel_crtc_state *old_crtc_state,
+                             struct drm_connector_state *old_conn_state)
+
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct intel_connector *intel_connector =
@@ -268,10 +353,12 @@ static void gmch_disable_lvds(struct intel_encoder *encoder)
 
        intel_panel_disable_backlight(intel_connector);
 
-       intel_disable_lvds(encoder);
+       intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_lvds(struct intel_encoder *encoder)
+static void pch_disable_lvds(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct intel_connector *intel_connector =
@@ -280,9 +367,11 @@ static void pch_disable_lvds(struct intel_encoder *encoder)
        intel_panel_disable_backlight(intel_connector);
 }
 
-static void pch_post_disable_lvds(struct intel_encoder *encoder)
+static void pch_post_disable_lvds(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
-       intel_disable_lvds(encoder);
+       intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
 }
 
 static enum drm_mode_status
@@ -304,7 +393,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
-                                     struct intel_crtc_state *pipe_config)
+                                     struct intel_crtc_state *pipe_config,
+                                     struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = intel_encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder =
@@ -900,17 +990,6 @@ void intel_lvds_init(struct drm_device *dev)
        int pipe;
        u8 pin;
 
-       /*
-        * Unlock registers and just leave them unlocked. Do this before
-        * checking quirk lists to avoid bogus WARNINGs.
-        */
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_PP_CONTROL,
-                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-       } else if (INTEL_INFO(dev_priv)->gen < 5) {
-               I915_WRITE(PP_CONTROL,
-                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
-       }
        if (!intel_lvds_supported(dev))
                return;
 
@@ -943,18 +1022,6 @@ void intel_lvds_init(struct drm_device *dev)
                DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
        }
 
-        /* Set the Panel Power On/Off timings if uninitialized. */
-       if (INTEL_INFO(dev_priv)->gen < 5 &&
-           I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
-               /* Set T2 to 40ms and T5 to 200ms */
-               I915_WRITE(PP_ON_DELAYS, 0x019007d0);
-
-               /* Set T3 to 35ms and Tx to 200ms */
-               I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
-
-               DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n");
-       }
-
        lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
        if (!lvds_encoder)
                return;
@@ -1020,6 +1087,10 @@ void intel_lvds_init(struct drm_device *dev)
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_ASPECT);
        intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+
+       intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+       lvds_encoder->init_lvds_val = lvds;
+
        /*
         * LVDS discovery:
         * 1) check for EDID on DDC
@@ -1054,17 +1125,6 @@ void intel_lvds_init(struct drm_device *dev)
        }
        lvds_connector->base.edid = edid;
 
-       if (IS_ERR_OR_NULL(edid)) {
-               /* Didn't get an EDID, so
-                * Set wide sync ranges so we get all modes
-                * handed to valid_mode for checking
-                */
-               connector->display_info.min_vfreq = 0;
-               connector->display_info.max_vfreq = 200;
-               connector->display_info.min_hfreq = 0;
-               connector->display_info.max_hfreq = 200;
-       }
-
        list_for_each_entry(scan, &connector->probed_modes, head) {
                if (scan->type & DRM_MODE_TYPE_PREFERRED) {
                        DRM_DEBUG_KMS("using preferred mode from EDID: ");
index 927825f5b28447f95fd021ca5af9c62f95eda4a3..80bb9247ce664ffdeb62689ce3f77891f3d68c07 100644 (file)
@@ -97,7 +97,8 @@ struct drm_i915_mocs_table {
  *       end.
  */
 static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
-       { /* 0x00000009 */
+       [I915_MOCS_UNCACHED] = {
+         /* 0x00000009 */
          .control_value = LE_CACHEABILITY(LE_UC) |
                           LE_TGT_CACHE(LE_TC_LLC_ELLC) |
                           LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
@@ -106,7 +107,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
          /* 0x0010 */
          .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
        },
-       {
+       [I915_MOCS_PTE] = {
          /* 0x00000038 */
          .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
                           LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -115,7 +116,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
          /* 0x0030 */
          .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
        },
-       {
+       [I915_MOCS_CACHED] = {
          /* 0x0000003b */
          .control_value = LE_CACHEABILITY(LE_WB) |
                           LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -128,7 +129,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
 
 /* NOTE: the LE_TGT_CACHE is not used on Broxton */
 static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
-       {
+       [I915_MOCS_UNCACHED] = {
          /* 0x00000009 */
          .control_value = LE_CACHEABILITY(LE_UC) |
                           LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -138,7 +139,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
          /* 0x0010 */
          .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
        },
-       {
+       [I915_MOCS_PTE] = {
          /* 0x00000038 */
          .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
                           LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -148,7 +149,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
          /* 0x0030 */
          .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
        },
-       {
+       [I915_MOCS_CACHED] = {
          /* 0x00000039 */
          .control_value = LE_CACHEABILITY(LE_UC) |
                           LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -203,9 +204,9 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
        return result;
 }
 
-static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
+static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
 {
-       switch (ring) {
+       switch (engine_id) {
        case RCS:
                return GEN9_GFX_MOCS(index);
        case VCS:
@@ -217,7 +218,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
        case VCS2:
                return GEN9_MFX1_MOCS(index);
        default:
-               MISSING_CASE(ring);
+               MISSING_CASE(engine_id);
                return INVALID_MMIO_REG;
        }
 }
@@ -275,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
                                   const struct drm_i915_mocs_table *table)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ring *ring = req->ring;
        enum intel_engine_id engine = req->engine->id;
        unsigned int index;
        int ret;
@@ -287,14 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_logical_ring_emit(ringbuf,
-                               MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
        for (index = 0; index < table->size; index++) {
-               intel_logical_ring_emit_reg(ringbuf,
-                                           mocs_register(engine, index));
-               intel_logical_ring_emit(ringbuf,
-                                       table->table[index].control_value);
+               intel_ring_emit_reg(ring, mocs_register(engine, index));
+               intel_ring_emit(ring, table->table[index].control_value);
        }
 
        /*
@@ -306,14 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
         * that value to all the used entries.
         */
        for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-               intel_logical_ring_emit_reg(ringbuf,
-                                           mocs_register(engine, index));
-               intel_logical_ring_emit(ringbuf,
-                                       table->table[0].control_value);
+               intel_ring_emit_reg(ring, mocs_register(engine, index));
+               intel_ring_emit(ring, table->table[0].control_value);
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -340,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
                                const struct drm_i915_mocs_table *table)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ring *ring = req->ring;
        unsigned int i;
        int ret;
 
@@ -351,19 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_logical_ring_emit(ringbuf,
+       intel_ring_emit(ring,
                        MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
        for (i = 0; i < table->size/2; i++) {
-               intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf,
-                                       l3cc_combine(table, 2*i, 2*i+1));
+               intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+               intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
        }
 
        if (table->size & 0x01) {
                /* Odd table size - 1 left over */
-               intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+               intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+               intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
                i++;
        }
 
@@ -373,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
         * they are reserved by the hardware.
         */
        for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-               intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
+               intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+               intel_ring_emit(ring, l3cc_combine(table, 0, 0));
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
index 4640299e04ecb95b5d2f9fd8c56b6d92b22b7bd9..a8bd9f7bfecec26dccf53b7238af58aeaba5ffa9 100644 (file)
@@ -54,6 +54,6 @@
 
 int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
 void intel_mocs_init_l3cc_table(struct drm_device *dev);
-int intel_mocs_init_engine(struct intel_engine_cs *ring);
+int intel_mocs_init_engine(struct intel_engine_cs *engine);
 
 #endif
index f2584d0a01ab61cf09bc5561071e0c7d22130ffb..951e834dd2744f7f2fe84a54723136f743d185cf 100644 (file)
@@ -25,7 +25,6 @@
 
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/fb.h>
 #include <drm/drm_edid.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
index adca262d591ac02c46577ee05fa6741741433d41..7acbbbf97833a36cff79bc27b479a9791d5cc574 100644 (file)
@@ -1047,6 +1047,23 @@ err_out:
        return err;
 }
 
+static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
+{
+       DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id intel_use_opregion_panel_type[] = {
+       {
+               .callback = intel_use_opregion_panel_type_callback,
+               .ident = "Conrac GmbH IX45GM2",
+               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
+                           DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
+               },
+       },
+       { }
+};
+
 int
 intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 {
@@ -1072,6 +1089,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
                return -ENODEV;
        }
 
+       /*
+        * So far we know that some machined must use it, others must not use it.
+        * There doesn't seem to be any way to determine which way to go, except
+        * via a quirk list :(
+        */
+       if (!dmi_check_system(intel_use_opregion_panel_type)) {
+               DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+               return -ENODEV;
+       }
+
        /*
         * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
         * low vswing for eDP, whereas the VBT panel type (2) gives us normal
index 3212d8806b5ad13b0f88cc1c1f56bc4ef2d953b8..a24bc8c7889f04ce3f3d7e2351404e43f777ceef 100644 (file)
@@ -30,6 +30,7 @@
 #include "i915_drv.h"
 #include "i915_reg.h"
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 
 /* Limits for overlay size. According to intel doc, the real limits are:
  * Y width: 4095, UV width (planar): 2047, Y height: 2047,
@@ -170,8 +171,8 @@ struct overlay_registers {
 struct intel_overlay {
        struct drm_i915_private *i915;
        struct intel_crtc *crtc;
-       struct drm_i915_gem_object *vid_bo;
-       struct drm_i915_gem_object *old_vid_bo;
+       struct i915_vma *vma;
+       struct i915_vma *old_vma;
        bool active;
        bool pfit_active;
        u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -183,8 +184,7 @@ struct intel_overlay {
        u32 flip_addr;
        struct drm_i915_gem_object *reg_bo;
        /* flip handling */
-       struct drm_i915_gem_request *last_flip_req;
-       void (*flip_tail)(struct intel_overlay *);
+       struct i915_gem_active last_flip;
 };
 
 static struct overlay_registers __iomem *
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
        if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+               regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
                                         overlay->flip_addr,
                                         PAGE_SIZE);
 
@@ -210,37 +210,46 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
                io_mapping_unmap(regs);
 }
 
-static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+static void intel_overlay_submit_request(struct intel_overlay *overlay,
                                         struct drm_i915_gem_request *req,
-                                        void (*tail)(struct intel_overlay *))
+                                        i915_gem_retire_fn retire)
 {
-       int ret;
-
-       WARN_ON(overlay->last_flip_req);
-       i915_gem_request_assign(&overlay->last_flip_req, req);
+       GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
+                                       &overlay->i915->drm.struct_mutex));
+       overlay->last_flip.retire = retire;
+       i915_gem_active_set(&overlay->last_flip, req);
        i915_add_request(req);
+}
 
-       overlay->flip_tail = tail;
-       ret = i915_wait_request(overlay->last_flip_req);
-       if (ret)
-               return ret;
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+                                        struct drm_i915_gem_request *req,
+                                        i915_gem_retire_fn retire)
+{
+       intel_overlay_submit_request(overlay, req, retire);
+       return i915_gem_active_retire(&overlay->last_flip,
+                                     &overlay->i915->drm.struct_mutex);
+}
 
-       i915_gem_request_assign(&overlay->last_flip_req, NULL);
-       return 0;
+static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+{
+       struct drm_i915_private *dev_priv = overlay->i915;
+       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+
+       return i915_gem_request_alloc(engine, dev_priv->kernel_context);
 }
 
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
+       struct intel_ring *ring;
        int ret;
 
        WARN_ON(overlay->active);
        WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-       req = i915_gem_request_alloc(engine, NULL);
+       req = alloc_request(overlay);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -252,11 +261,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
        overlay->active = true;
 
-       intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-       intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
-       intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       ring = req->ring;
+       intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+       intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -266,8 +276,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
                                  bool load_polyphase_filter)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
+       struct intel_ring *ring;
        u32 flip_addr = overlay->flip_addr;
        u32 tmp;
        int ret;
@@ -282,7 +292,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
        if (tmp & (1 << 17))
                DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-       req = i915_gem_request_alloc(engine, NULL);
+       req = alloc_request(overlay);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -292,38 +302,48 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
                return ret;
        }
 
-       intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-       intel_ring_emit(engine, flip_addr);
-       intel_ring_advance(engine);
+       ring = req->ring;
+       intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+       intel_ring_emit(ring, flip_addr);
+       intel_ring_advance(ring);
 
-       WARN_ON(overlay->last_flip_req);
-       i915_gem_request_assign(&overlay->last_flip_req, req);
-       i915_add_request(req);
+       intel_overlay_submit_request(overlay, req, NULL);
 
        return 0;
 }
 
-static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
+                                              struct drm_i915_gem_request *req)
 {
-       struct drm_i915_gem_object *obj = overlay->old_vid_bo;
+       struct intel_overlay *overlay =
+               container_of(active, typeof(*overlay), last_flip);
+       struct i915_vma *vma;
 
-       i915_gem_object_ggtt_unpin(obj);
-       drm_gem_object_unreference(&obj->base);
+       vma = fetch_and_zero(&overlay->old_vma);
+       if (WARN_ON(!vma))
+               return;
 
-       overlay->old_vid_bo = NULL;
+       i915_gem_track_fb(vma->obj, NULL,
+                         INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+
+       i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_put(vma);
 }
 
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
+static void intel_overlay_off_tail(struct i915_gem_active *active,
+                                  struct drm_i915_gem_request *req)
 {
-       struct drm_i915_gem_object *obj = overlay->vid_bo;
+       struct intel_overlay *overlay =
+               container_of(active, typeof(*overlay), last_flip);
+       struct i915_vma *vma;
 
        /* never have the overlay hw on without showing a frame */
-       if (WARN_ON(!obj))
+       vma = fetch_and_zero(&overlay->vma);
+       if (WARN_ON(!vma))
                return;
 
-       i915_gem_object_ggtt_unpin(obj);
-       drm_gem_object_unreference(&obj->base);
-       overlay->vid_bo = NULL;
+       i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_put(vma);
 
        overlay->crtc->overlay = NULL;
        overlay->crtc = NULL;
@@ -334,8 +354,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
+       struct intel_ring *ring;
        u32 flip_addr = overlay->flip_addr;
        int ret;
 
@@ -347,7 +367,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         * of the hw. Do it in both cases */
        flip_addr |= OFC_UPDATE;
 
-       req = i915_gem_request_alloc(engine, NULL);
+       req = alloc_request(overlay);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -357,46 +377,36 @@ static int intel_overlay_off(struct intel_overlay *overlay)
                return ret;
        }
 
+       ring = req->ring;
        /* wait for overlay to go idle */
-       intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-       intel_ring_emit(engine, flip_addr);
-       intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+       intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+       intel_ring_emit(ring, flip_addr);
+       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
        /* turn overlay off */
        if (IS_I830(dev_priv)) {
                /* Workaround: Don't disable the overlay fully, since otherwise
                 * it dies on the next OVERLAY_ON cmd. */
-               intel_ring_emit(engine, MI_NOOP);
-               intel_ring_emit(engine, MI_NOOP);
-               intel_ring_emit(engine, MI_NOOP);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(ring, MI_NOOP);
        } else {
-               intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-               intel_ring_emit(engine, flip_addr);
-               intel_ring_emit(engine,
+               intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+               intel_ring_emit(ring, flip_addr);
+               intel_ring_emit(ring,
                                MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
        }
-       intel_ring_advance(engine);
+       intel_ring_advance(ring);
 
-       return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
+       return intel_overlay_do_wait_request(overlay, req,
+                                            intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
  * We have to be careful not to repeat work forever an make forward progess. */
 static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
-       int ret;
-
-       if (overlay->last_flip_req == NULL)
-               return 0;
-
-       ret = i915_wait_request(overlay->last_flip_req);
-       if (ret)
-               return ret;
-
-       if (overlay->flip_tail)
-               overlay->flip_tail(overlay);
-
-       i915_gem_request_assign(&overlay->last_flip_req, NULL);
-       return 0;
+       return i915_gem_active_retire(&overlay->last_flip,
+                                     &overlay->i915->drm.struct_mutex);
 }
 
 /* Wait for pending overlay flip and release old frame.
@@ -406,7 +416,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        int ret;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -414,14 +423,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
        /* Only wait if there is actually an old frame to release to
         * guarantee forward progress.
         */
-       if (!overlay->old_vid_bo)
+       if (!overlay->old_vma)
                return 0;
 
        if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
                /* synchronous slowpath */
                struct drm_i915_gem_request *req;
+               struct intel_ring *ring;
 
-               req = i915_gem_request_alloc(engine, NULL);
+               req = alloc_request(overlay);
                if (IS_ERR(req))
                        return PTR_ERR(req);
 
@@ -431,22 +441,19 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
                        return ret;
                }
 
-               intel_ring_emit(engine,
+               ring = req->ring;
+               intel_ring_emit(ring,
                                MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-               intel_ring_emit(engine, MI_NOOP);
-               intel_ring_advance(engine);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
 
                ret = intel_overlay_do_wait_request(overlay, req,
                                                    intel_overlay_release_old_vid_tail);
                if (ret)
                        return ret;
-       }
+       } else
+               intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
 
-       intel_overlay_release_old_vid_tail(overlay);
-
-
-       i915_gem_track_fb(overlay->old_vid_bo, NULL,
-                         INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
        return 0;
 }
 
@@ -459,7 +466,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
 
        intel_overlay_release_old_vid(overlay);
 
-       overlay->last_flip_req = NULL;
        overlay->old_xscale = 0;
        overlay->old_yscale = 0;
        overlay->crtc = NULL;
@@ -740,6 +746,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        struct drm_i915_private *dev_priv = overlay->i915;
        u32 swidth, swidthsw, sheight, ostride;
        enum pipe pipe = overlay->crtc->pipe;
+       struct i915_vma *vma;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
        WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -748,12 +755,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        if (ret != 0)
                return ret;
 
-       ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
+       vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
                                                   &i915_ggtt_view_normal);
-       if (ret != 0)
-               return ret;
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
 
-       ret = i915_gem_object_put_fence(new_bo);
+       ret = i915_vma_put_fence(vma);
        if (ret)
                goto out_unpin;
 
@@ -794,7 +801,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        swidth = params->src_w;
        swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
        sheight = params->src_h;
-       iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
+       iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
        ostride = params->stride_Y;
 
        if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -808,8 +815,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                                      params->src_w/uv_hscale);
                swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
                sheight |= (params->src_h/uv_vscale) << 16;
-               iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
-               iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
+               iowrite32(i915_ggtt_offset(vma) + params->offset_U,
+                         &regs->OBUF_0U);
+               iowrite32(i915_ggtt_offset(vma) + params->offset_V,
+                         &regs->OBUF_0V);
                ostride |= params->stride_UV << 16;
        }
 
@@ -830,19 +839,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        if (ret)
                goto out_unpin;
 
-       i915_gem_track_fb(overlay->vid_bo, new_bo,
+       i915_gem_track_fb(overlay->vma->obj, new_bo,
                          INTEL_FRONTBUFFER_OVERLAY(pipe));
 
-       overlay->old_vid_bo = overlay->vid_bo;
-       overlay->vid_bo = new_bo;
+       overlay->old_vma = overlay->vma;
+       overlay->vma = vma;
 
-       intel_frontbuffer_flip(&dev_priv->drm,
-                              INTEL_FRONTBUFFER_OVERLAY(pipe));
+       intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
 
        return 0;
 
 out_unpin:
-       i915_gem_object_ggtt_unpin(new_bo);
+       i915_gem_object_unpin_from_display_plane(vma);
        return ret;
 }
 
@@ -870,12 +878,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
        iowrite32(0, &regs->OCMD);
        intel_overlay_unmap_regs(overlay, regs);
 
-       ret = intel_overlay_off(overlay);
-       if (ret != 0)
-               return ret;
-
-       intel_overlay_off_tail(overlay);
-       return 0;
+       return intel_overlay_off(overlay);
 }
 
 static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
@@ -1122,9 +1125,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
        }
        crtc = to_intel_crtc(drmmode_crtc);
 
-       new_bo = to_intel_bo(drm_gem_object_lookup(file_priv,
-                                                  put_image_rec->bo_handle));
-       if (&new_bo->base == NULL) {
+       new_bo = i915_gem_object_lookup(file_priv, put_image_rec->bo_handle);
+       if (!new_bo) {
                ret = -ENOENT;
                goto out_free;
        }
@@ -1132,7 +1134,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
        drm_modeset_lock_all(dev);
        mutex_lock(&dev->struct_mutex);
 
-       if (new_bo->tiling_mode) {
+       if (i915_gem_object_is_tiled(new_bo)) {
                DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
                ret = -EINVAL;
                goto out_unlock;
@@ -1220,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        drm_modeset_unlock_all(dev);
-       drm_gem_object_unreference_unlocked(&new_bo->base);
+       i915_gem_object_put_unlocked(new_bo);
 out_free:
        kfree(params);
 
@@ -1371,6 +1373,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        struct intel_overlay *overlay;
        struct drm_i915_gem_object *reg_bo;
        struct overlay_registers __iomem *regs;
+       struct i915_vma *vma = NULL;
        int ret;
 
        if (!HAS_OVERLAY(dev_priv))
@@ -1404,12 +1407,14 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
                }
                overlay->flip_addr = reg_bo->phys_handle->busaddr;
        } else {
-               ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
-               if (ret) {
+               vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
+                                              0, PAGE_SIZE, PIN_MAPPABLE);
+               if (IS_ERR(vma)) {
                        DRM_ERROR("failed to pin overlay register bo\n");
+                       ret = PTR_ERR(vma);
                        goto out_free_bo;
                }
-               overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
+               overlay->flip_addr = i915_ggtt_offset(vma);
 
                ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
                if (ret) {
@@ -1441,10 +1446,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        return;
 
 out_unpin_bo:
-       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               i915_gem_object_ggtt_unpin(reg_bo);
+       if (vma)
+               i915_vma_unpin(vma);
 out_free_bo:
-       drm_gem_object_unreference(&reg_bo->base);
+       i915_gem_object_put(reg_bo);
 out_free:
        mutex_unlock(&dev_priv->drm.struct_mutex);
        kfree(overlay);
@@ -1461,7 +1466,7 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
         * hardware should be off already */
        WARN_ON(dev_priv->overlay->active);
 
-       drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+       i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
        kfree(dev_priv->overlay);
 }
 
@@ -1484,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
                regs = (struct overlay_registers __iomem *)
                        overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
                                                overlay->flip_addr);
 
        return regs;
index 96c65d77e8866b0c25beadb46e342ecc8e37e9d6..c10e9b0405e8035d96d990286a65ed37bc550a39 100644 (file)
@@ -1430,10 +1430,11 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = lpt_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
-               panel->backlight.level != 0;
+       panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
 
        return 0;
 }
@@ -1459,11 +1460,13 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = pch_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
        cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
        panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
-               (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+               (pch_ctl1 & BLM_PCH_PWM_ENABLE);
 
        return 0;
 }
@@ -1498,9 +1501,11 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = i9xx_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = panel->backlight.level != 0;
+       panel->backlight.enabled = val != 0;
 
        return 0;
 }
@@ -1530,10 +1535,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = i9xx_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
-               panel->backlight.level != 0;
+       panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
 
        return 0;
 }
@@ -1562,10 +1568,11 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = _vlv_get_backlight(dev_priv, pipe);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
-               panel->backlight.level != 0;
+       panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
 
        return 0;
 }
@@ -1607,10 +1614,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
                return -ENODEV;
 
        val = bxt_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
-               panel->backlight.level != 0;
+       panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
 
        return 0;
 }
index f4f3fcc8b3becb59c0ed5d6bdd5dd27a177f3731..2df06b703e3df9363772c7fc037457a89ec693a9 100644 (file)
@@ -340,6 +340,11 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
                I915_WRITE(FW_BLC_SELF, val);
                POSTING_READ(FW_BLC_SELF);
        } else if (IS_I915GM(dev)) {
+               /*
+                * FIXME can't find a bit like this for 915G, and
+                * and yet it does have the related watermark in
+                * FW_BLC_SELF. What's going on?
+                */
                val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
                               _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
                I915_WRITE(INSTPM, val);
@@ -960,7 +965,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
        if (dev_priv->wm.pri_latency[level] == 0)
                return USHRT_MAX;
 
-       if (!state->visible)
+       if (!state->base.visible)
                return 0;
 
        cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
@@ -1002,7 +1007,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
                if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
                        continue;
 
-               if (state->visible) {
+               if (state->base.visible) {
                        wm_state->num_active_planes++;
                        total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
                }
@@ -1018,7 +1023,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
                        continue;
                }
 
-               if (!state->visible) {
+               if (!state->base.visible) {
                        plane->wm.fifo_size = 0;
                        continue;
                }
@@ -1118,7 +1123,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
                struct intel_plane_state *state =
                        to_intel_plane_state(plane->base.state);
 
-               if (!state->visible)
+               if (!state->base.visible)
                        continue;
 
                /* normal watermarks */
@@ -1580,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
                obj = intel_fb_obj(enabled->primary->state->fb);
 
                /* self-refresh seems busted with untiled */
-               if (obj->tiling_mode == I915_TILING_NONE)
+               if (!i915_gem_object_is_tiled(obj))
                        enabled = NULL;
        }
 
@@ -1604,6 +1609,9 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
                unsigned long line_time_us;
                int entries;
 
+               if (IS_I915GM(dev) || IS_I945GM(dev))
+                       cpp = 4;
+
                line_time_us = max(htotal * 1000 / clock, 1);
 
                /* Use ns/us then divide to preserve precision */
@@ -1618,7 +1626,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
                if (IS_I945G(dev) || IS_I945GM(dev))
                        I915_WRITE(FW_BLC_SELF,
                                   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
-               else if (IS_I915GM(dev))
+               else
                        I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
        }
 
@@ -1767,7 +1775,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
                drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
        uint32_t method1, method2;
 
-       if (!cstate->base.active || !pstate->visible)
+       if (!cstate->base.active || !pstate->base.visible)
                return 0;
 
        method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
@@ -1777,7 +1785,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
 
        method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
-                                drm_rect_width(&pstate->dst),
+                                drm_rect_width(&pstate->base.dst),
                                 cpp, mem_value);
 
        return min(method1, method2);
@@ -1795,13 +1803,13 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
                drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
        uint32_t method1, method2;
 
-       if (!cstate->base.active || !pstate->visible)
+       if (!cstate->base.active || !pstate->base.visible)
                return 0;
 
        method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
        method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
-                                drm_rect_width(&pstate->dst),
+                                drm_rect_width(&pstate->base.dst),
                                 cpp, mem_value);
        return min(method1, method2);
 }
@@ -1820,7 +1828,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
         * this is necessary to avoid flickering.
         */
        int cpp = 4;
-       int width = pstate->visible ? pstate->base.crtc_w : 64;
+       int width = pstate->base.visible ? pstate->base.crtc_w : 64;
 
        if (!cstate->base.active)
                return 0;
@@ -1838,10 +1846,10 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
        int cpp = pstate->base.fb ?
                drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 
-       if (!cstate->base.active || !pstate->visible)
+       if (!cstate->base.active || !pstate->base.visible)
                return 0;
 
-       return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
+       return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
 }
 
 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
@@ -2358,10 +2366,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 
        pipe_wm->pipe_enabled = cstate->base.active;
        if (sprstate) {
-               pipe_wm->sprites_enabled = sprstate->visible;
-               pipe_wm->sprites_scaled = sprstate->visible &&
-                       (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
-                        drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
+               pipe_wm->sprites_enabled = sprstate->base.visible;
+               pipe_wm->sprites_scaled = sprstate->base.visible &&
+                       (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
+                        drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
        }
 
        usable_level = max_level;
@@ -2845,13 +2853,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
        return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
-/*
- * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
- * different active planes.
- */
-
-#define SKL_DDB_SIZE           896     /* in blocks */
-#define BXT_DDB_SIZE           512
+#define SKL_SAGV_BLOCK_TIME    30 /* µs */
 
 /*
  * Return the index of a plane in the SKL DDB and wm result arrays.  Primary
@@ -2875,6 +2877,153 @@ skl_wm_plane_id(const struct intel_plane *plane)
        }
 }
 
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ *  - <= 1 pipe enabled
+ *  - All planes can enable watermarks for latencies >= SAGV engine block time
+ *  - We're not using an interlaced display configuration
+ */
+int
+skl_enable_sagv(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+           dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
+               return 0;
+
+       DRM_DEBUG_KMS("Enabling the SAGV\n");
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+                                     GEN9_SAGV_ENABLE);
+
+       /* We don't need to wait for the SAGV when enabling */
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       /*
+        * Some skl systems, pre-release machines in particular,
+        * don't actually have an SAGV.
+        */
+       if (ret == -ENXIO) {
+               DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+               dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+               return 0;
+       } else if (ret < 0) {
+               DRM_ERROR("Failed to enable the SAGV\n");
+               return ret;
+       }
+
+       dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+       return 0;
+}
+
+static int
+skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+{
+       int ret;
+       uint32_t temp = GEN9_SAGV_DISABLE;
+
+       ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+                                    &temp);
+       if (ret)
+               return ret;
+       else
+               return temp & GEN9_SAGV_IS_DISABLED;
+}
+
+int
+skl_disable_sagv(struct drm_i915_private *dev_priv)
+{
+       int ret, result;
+
+       if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+           dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
+               return 0;
+
+       DRM_DEBUG_KMS("Disabling the SAGV\n");
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       /* bspec says to keep retrying for at least 1 ms */
+       ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       if (ret == -ETIMEDOUT) {
+               DRM_ERROR("Request to disable SAGV timed out\n");
+               return -ETIMEDOUT;
+       }
+
+       /*
+        * Some skl systems, pre-release machines in particular,
+        * don't actually have an SAGV.
+        */
+       if (result == -ENXIO) {
+               DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+               dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+               return 0;
+       } else if (result < 0) {
+               DRM_ERROR("Failed to disable the SAGV\n");
+               return result;
+       }
+
+       dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+       return 0;
+}
+
+bool skl_can_enable_sagv(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_crtc *crtc;
+       enum pipe pipe;
+       int level, plane;
+
+       /*
+        * SKL workaround: bspec recommends we disable the SAGV when we have
+        * more then one pipe enabled
+        *
+        * If there are no active CRTCs, no additional checks need be performed
+        */
+       if (hweight32(intel_state->active_crtcs) == 0)
+               return true;
+       else if (hweight32(intel_state->active_crtcs) > 1)
+               return false;
+
+       /* Since we're now guaranteed to only have one active CRTC... */
+       pipe = ffs(intel_state->active_crtcs) - 1;
+       crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+       if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+               return false;
+
+       for_each_plane(dev_priv, pipe, plane) {
+               /* Skip this plane if it's not enabled */
+               if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+                       continue;
+
+               /* Find the highest enabled wm level for this plane */
+               for (level = ilk_wm_max_level(dev);
+                    intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+                    { }
+
+               /*
+                * If any of the planes on this pipe don't enable wm levels
+                * that incur memory latencies higher then 30µs we can't enable
+                * the SAGV
+                */
+               if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+                       return false;
+       }
+
+       return true;
+}
+
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
                                   const struct intel_crtc_state *cstate,
@@ -2901,10 +3050,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
        else
                *num_active = hweight32(dev_priv->active_crtcs);
 
-       if (IS_BROXTON(dev))
-               ddb_size = BXT_DDB_SIZE;
-       else
-               ddb_size = SKL_DDB_SIZE;
+       ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+       WARN_ON(ddb_size == 0);
 
        ddb_size -= 4; /* 4 blocks for bypass path allocation */
 
@@ -2996,14 +3143,14 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate)
        uint32_t downscale_h, downscale_w;
        uint32_t src_w, src_h, dst_w, dst_h;
 
-       if (WARN_ON(!pstate->visible))
+       if (WARN_ON(!pstate->base.visible))
                return DRM_PLANE_HELPER_NO_SCALING;
 
        /* n.b., src is 16.16 fixed point, dst is whole integer */
-       src_w = drm_rect_width(&pstate->src);
-       src_h = drm_rect_height(&pstate->src);
-       dst_w = drm_rect_width(&pstate->dst);
-       dst_h = drm_rect_height(&pstate->dst);
+       src_w = drm_rect_width(&pstate->base.src);
+       src_h = drm_rect_height(&pstate->base.src);
+       dst_w = drm_rect_width(&pstate->base.dst);
+       dst_h = drm_rect_height(&pstate->base.dst);
        if (intel_rotation_90_or_270(pstate->base.rotation))
                swap(dst_w, dst_h);
 
@@ -3025,15 +3172,15 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
        uint32_t width = 0, height = 0;
        unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
 
-       if (!intel_pstate->visible)
+       if (!intel_pstate->base.visible)
                return 0;
        if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
                return 0;
        if (y && format != DRM_FORMAT_NV12)
                return 0;
 
-       width = drm_rect_width(&intel_pstate->src) >> 16;
-       height = drm_rect_height(&intel_pstate->src) >> 16;
+       width = drm_rect_width(&intel_pstate->base.src) >> 16;
+       height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
        if (intel_rotation_90_or_270(pstate->rotation))
                swap(width, height);
@@ -3107,8 +3254,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
                total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
        }
 
-       WARN_ON(cstate->plane_mask && total_data_rate == 0);
-
        return total_data_rate;
 }
 
@@ -3134,8 +3279,8 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
            fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
                return 8;
 
-       src_w = drm_rect_width(&intel_pstate->src) >> 16;
-       src_h = drm_rect_height(&intel_pstate->src) >> 16;
+       src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
+       src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
 
        if (intel_rotation_90_or_270(pstate->rotation))
                swap(src_w, src_h);
@@ -3226,7 +3371,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                if (intel_plane->pipe != pipe)
                        continue;
 
-               if (!to_intel_plane_state(pstate)->visible) {
+               if (!to_intel_plane_state(pstate)->base.visible) {
                        minimum[id] = 0;
                        y_minimum[id] = 0;
                        continue;
@@ -3344,6 +3489,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
                plane_bytes_per_line *= 4;
                plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
                plane_blocks_per_line /= 4;
+       } else if (tiling == DRM_FORMAT_MOD_NONE) {
+               plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
        } else {
                plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
        }
@@ -3363,7 +3510,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
        uint64_t pixel_rate;
 
        /* Shouldn't reach here on disabled planes... */
-       if (WARN_ON(!pstate->visible))
+       if (WARN_ON(!pstate->base.visible))
                return 0;
 
        /*
@@ -3399,13 +3546,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
        uint32_t width = 0, height = 0;
        uint32_t plane_pixel_rate;
 
-       if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
+       if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
                *enabled = false;
                return 0;
        }
 
-       width = drm_rect_width(&intel_pstate->src) >> 16;
-       height = drm_rect_height(&intel_pstate->src) >> 16;
+       width = drm_rect_width(&intel_pstate->base.src) >> 16;
+       height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
        if (intel_rotation_90_or_270(pstate->rotation))
                swap(width, height);
@@ -3680,183 +3827,82 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
                I915_WRITE(reg, 0);
 }
 
-static void skl_write_wm_values(struct drm_i915_private *dev_priv,
-                               const struct skl_wm_values *new)
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+                       const struct skl_wm_values *wm,
+                       int plane)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_crtc *crtc;
-
-       for_each_intel_crtc(dev, crtc) {
-               int i, level, max_level = ilk_wm_max_level(dev);
-               enum pipe pipe = crtc->pipe;
-
-               if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
-                       continue;
-               if (!crtc->active)
-                       continue;
-
-               I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
-
-               for (level = 0; level <= max_level; level++) {
-                       for (i = 0; i < intel_num_planes(crtc); i++)
-                               I915_WRITE(PLANE_WM(pipe, i, level),
-                                          new->plane[pipe][i][level]);
-                       I915_WRITE(CUR_WM(pipe, level),
-                                  new->plane[pipe][PLANE_CURSOR][level]);
-               }
-               for (i = 0; i < intel_num_planes(crtc); i++)
-                       I915_WRITE(PLANE_WM_TRANS(pipe, i),
-                                  new->plane_trans[pipe][i]);
-               I915_WRITE(CUR_WM_TRANS(pipe),
-                          new->plane_trans[pipe][PLANE_CURSOR]);
-
-               for (i = 0; i < intel_num_planes(crtc); i++) {
-                       skl_ddb_entry_write(dev_priv,
-                                           PLANE_BUF_CFG(pipe, i),
-                                           &new->ddb.plane[pipe][i]);
-                       skl_ddb_entry_write(dev_priv,
-                                           PLANE_NV12_BUF_CFG(pipe, i),
-                                           &new->ddb.y_plane[pipe][i]);
-               }
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int level, max_level = ilk_wm_max_level(dev);
+       enum pipe pipe = intel_crtc->pipe;
 
-               skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
-                                   &new->ddb.plane[pipe][PLANE_CURSOR]);
+       for (level = 0; level <= max_level; level++) {
+               I915_WRITE(PLANE_WM(pipe, plane, level),
+                          wm->plane[pipe][plane][level]);
        }
-}
+       I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
 
-/*
- * When setting up a new DDB allocation arrangement, we need to correctly
- * sequence the times at which the new allocations for the pipes are taken into
- * account or we'll have pipes fetching from space previously allocated to
- * another pipe.
- *
- * Roughly the sequence looks like:
- *  1. re-allocate the pipe(s) with the allocation being reduced and not
- *     overlapping with a previous light-up pipe (another way to put it is:
- *     pipes with their new allocation strickly included into their old ones).
- *  2. re-allocate the other pipes that get their allocation reduced
- *  3. allocate the pipes having their allocation increased
- *
- * Steps 1. and 2. are here to take care of the following case:
- * - Initially DDB looks like this:
- *     |   B    |   C    |
- * - enable pipe A.
- * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
- *   allocation
- *     |  A  |  B  |  C  |
- *
- * We need to sequence the re-allocation: C, B, A (and not B, C, A).
- */
+       skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
+                           &wm->ddb.plane[pipe][plane]);
+       skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
+                           &wm->ddb.y_plane[pipe][plane]);
+}
 
-static void
-skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+                        const struct skl_wm_values *wm)
 {
-       int plane;
-
-       DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int level, max_level = ilk_wm_max_level(dev);
+       enum pipe pipe = intel_crtc->pipe;
 
-       for_each_plane(dev_priv, pipe, plane) {
-               I915_WRITE(PLANE_SURF(pipe, plane),
-                          I915_READ(PLANE_SURF(pipe, plane)));
+       for (level = 0; level <= max_level; level++) {
+               I915_WRITE(CUR_WM(pipe, level),
+                          wm->plane[pipe][PLANE_CURSOR][level]);
        }
-       I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+       I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+
+       skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+                           &wm->ddb.plane[pipe][PLANE_CURSOR]);
 }
 
-static bool
-skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
-                           const struct skl_ddb_allocation *new,
-                           enum pipe pipe)
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+                              const struct skl_ddb_allocation *new,
+                              enum pipe pipe)
 {
-       uint16_t old_size, new_size;
-
-       old_size = skl_ddb_entry_size(&old->pipe[pipe]);
-       new_size = skl_ddb_entry_size(&new->pipe[pipe]);
-
-       return old_size != new_size &&
-              new->pipe[pipe].start >= old->pipe[pipe].start &&
-              new->pipe[pipe].end <= old->pipe[pipe].end;
+       return new->pipe[pipe].start == old->pipe[pipe].start &&
+              new->pipe[pipe].end == old->pipe[pipe].end;
 }
 
-static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
-                               struct skl_wm_values *new_values)
+static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+                                          const struct skl_ddb_entry *b)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct skl_ddb_allocation *cur_ddb, *new_ddb;
-       bool reallocated[I915_MAX_PIPES] = {};
-       struct intel_crtc *crtc;
-       enum pipe pipe;
-
-       new_ddb = &new_values->ddb;
-       cur_ddb = &dev_priv->wm.skl_hw.ddb;
-
-       /*
-        * First pass: flush the pipes with the new allocation contained into
-        * the old space.
-        *
-        * We'll wait for the vblank on those pipes to ensure we can safely
-        * re-allocate the freed space without this pipe fetching from it.
-        */
-       for_each_intel_crtc(dev, crtc) {
-               if (!crtc->active)
-                       continue;
-
-               pipe = crtc->pipe;
-
-               if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
-                       continue;
-
-               skl_wm_flush_pipe(dev_priv, pipe, 1);
-               intel_wait_for_vblank(dev, pipe);
-
-               reallocated[pipe] = true;
-       }
-
+       return a->start < b->end && b->start < a->end;
+}
 
-       /*
-        * Second pass: flush the pipes that are having their allocation
-        * reduced, but overlapping with a previous allocation.
-        *
-        * Here as well we need to wait for the vblank to make sure the freed
-        * space is not used anymore.
-        */
-       for_each_intel_crtc(dev, crtc) {
-               if (!crtc->active)
-                       continue;
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+                                const struct skl_ddb_allocation *old,
+                                const struct skl_ddb_allocation *new,
+                                enum pipe pipe)
+{
+       struct drm_device *dev = state->dev;
+       struct intel_crtc *intel_crtc;
+       enum pipe otherp;
 
-               pipe = crtc->pipe;
+       for_each_intel_crtc(dev, intel_crtc) {
+               otherp = intel_crtc->pipe;
 
-               if (reallocated[pipe])
+               if (otherp == pipe)
                        continue;
 
-               if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
-                   skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
-                       skl_wm_flush_pipe(dev_priv, pipe, 2);
-                       intel_wait_for_vblank(dev, pipe);
-                       reallocated[pipe] = true;
-               }
+               if (skl_ddb_entries_overlap(&new->pipe[pipe],
+                                           &old->pipe[otherp]))
+                       return true;
        }
 
-       /*
-        * Third pass: flush the pipes that got more space allocated.
-        *
-        * We don't need to actively wait for the update here, next vblank
-        * will just get more DDB space with the correct WM values.
-        */
-       for_each_intel_crtc(dev, crtc) {
-               if (!crtc->active)
-                       continue;
-
-               pipe = crtc->pipe;
-
-               /*
-                * At this point, only the pipes more space than before are
-                * left to re-allocate.
-                */
-               if (reallocated[pipe])
-                       continue;
-
-               skl_wm_flush_pipe(dev_priv, pipe, 3);
-       }
+       return false;
 }
 
 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
@@ -3910,9 +3956,24 @@ skl_compute_ddb(struct drm_atomic_state *state)
         * pretend that all pipes switched active status so that we'll
         * ensure a full DDB recompute.
         */
-       if (dev_priv->wm.distrust_bios_wm)
+       if (dev_priv->wm.distrust_bios_wm) {
+               ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+                                      state->acquire_ctx);
+               if (ret)
+                       return ret;
+
                intel_state->active_pipe_changes = ~0;
 
+               /*
+                * We usually only initialize intel_state->active_crtcs if we
+                * we're doing a modeset; make sure this field is always
+                * initialized during the sanitization process that happens
+                * on the first commit too.
+                */
+               if (!intel_state->modeset)
+                       intel_state->active_crtcs = dev_priv->active_crtcs;
+       }
+
        /*
         * If the modeset changes which CRTC's are active, we need to
         * recompute the DDB allocation for *all* active pipes, even
@@ -3941,11 +4002,33 @@ skl_compute_ddb(struct drm_atomic_state *state)
                ret = skl_allocate_pipe_ddb(cstate, ddb);
                if (ret)
                        return ret;
+
+               ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
+               if (ret)
+                       return ret;
        }
 
        return 0;
 }
 
+static void
+skl_copy_wm_for_pipe(struct skl_wm_values *dst,
+                    struct skl_wm_values *src,
+                    enum pipe pipe)
+{
+       dst->wm_linetime[pipe] = src->wm_linetime[pipe];
+       memcpy(dst->plane[pipe], src->plane[pipe],
+              sizeof(dst->plane[pipe]));
+       memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
+              sizeof(dst->plane_trans[pipe]));
+
+       dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
+       memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
+              sizeof(dst->ddb.y_plane[pipe]));
+       memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
+              sizeof(dst->ddb.plane[pipe]));
+}
+
 static int
 skl_compute_wm(struct drm_atomic_state *state)
 {
@@ -4018,8 +4101,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_wm_values *results = &dev_priv->wm.skl_results;
+       struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
        struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+       enum pipe pipe = intel_crtc->pipe;
 
        if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
                return;
@@ -4028,11 +4113,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
 
        mutex_lock(&dev_priv->wm.wm_mutex);
 
-       skl_write_wm_values(dev_priv, results);
-       skl_flush_wm_values(dev_priv, results);
+       /*
+        * If this pipe isn't active already, we're going to be enabling it
+        * very soon. Since it's safe to update a pipe's ddb allocation while
+        * the pipe's shut off, just do so here. Already active pipes will have
+        * their watermarks updated once we update their planes.
+        */
+       if (crtc->state->active_changed) {
+               int plane;
+
+               for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
+                       skl_write_plane_wm(intel_crtc, results, plane);
+
+               skl_write_cursor_wm(intel_crtc, results);
+       }
 
-       /* store the new configuration */
-       dev_priv->wm.skl_hw = *results;
+       skl_copy_wm_for_pipe(hw_vals, results, pipe);
 
        mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -4892,7 +4988,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
                else
                        gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
                dev_priv->rps.last_adj = 0;
-               I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+               I915_WRITE(GEN6_PMINTRMSK,
+                          gen6_sanitize_rps_pm_mask(dev_priv, ~0));
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -4911,7 +5008,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
         */
        if (!(dev_priv->gt.awake &&
              dev_priv->rps.enabled &&
-             dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
+             dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
                return;
 
        /* Force a RPS boost (and don't count it against the client) if
@@ -5102,35 +5199,31 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
 
 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 {
-       uint32_t rp_state_cap;
-       u32 ddcc_status = 0;
-       int ret;
-
        /* All of these values are in units of 50MHz */
-       dev_priv->rps.cur_freq          = 0;
+
        /* static values from HW: RP0 > RP1 > RPn (min_freq) */
        if (IS_BROXTON(dev_priv)) {
-               rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+               u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
                dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
                dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
                dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
        } else {
-               rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+               u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
                dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
                dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
                dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
        }
-
        /* hw_max = RP0 until we check for overclocking */
-       dev_priv->rps.max_freq          = dev_priv->rps.rp0_freq;
+       dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
 
        dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
            IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-               ret = sandybridge_pcode_read(dev_priv,
-                                       HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
-                                       &ddcc_status);
-               if (0 == ret)
+               u32 ddcc_status = 0;
+
+               if (sandybridge_pcode_read(dev_priv,
+                                          HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+                                          &ddcc_status) == 0)
                        dev_priv->rps.efficient_freq =
                                clamp_t(u8,
                                        ((ddcc_status >> 8) & 0xff),
@@ -5140,29 +5233,26 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 
        if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                /* Store the frequency values in 16.66 MHZ units, which is
-                  the natural hardware unit for SKL */
+                * the natural hardware unit for SKL
+                */
                dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
                dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
                dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
                dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
                dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
        }
+}
 
-       dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+static void reset_rps(struct drm_i915_private *dev_priv,
+                     void (*set)(struct drm_i915_private *, u8))
+{
+       u8 freq = dev_priv->rps.cur_freq;
 
-       /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_freq_softlimit == 0)
-               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+       /* force a reset */
+       dev_priv->rps.power = -1;
+       dev_priv->rps.cur_freq = -1;
 
-       if (dev_priv->rps.min_freq_softlimit == 0) {
-               if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-                       dev_priv->rps.min_freq_softlimit =
-                               max_t(int, dev_priv->rps.efficient_freq,
-                                     intel_freq_opcode(dev_priv, 450));
-               else
-                       dev_priv->rps.min_freq_softlimit =
-                               dev_priv->rps.min_freq;
-       }
+       set(dev_priv, freq);
 }
 
 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
@@ -5170,8 +5260,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 {
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       gen6_init_rps_frequencies(dev_priv);
-
        /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                /*
@@ -5201,8 +5289,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
        /* Leaning on the below call to gen6_set_rps to program/setup the
         * Up/Down EI & threshold registers, as well as the RP_CONTROL,
         * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
-       dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+       reset_rps(dev_priv, gen6_set_rps);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5289,9 +5376,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
        /* 2a: Disable RC states. */
        I915_WRITE(GEN6_RC_CONTROL, 0);
 
-       /* Initialize rps frequencies */
-       gen6_init_rps_frequencies(dev_priv);
-
        /* 2b: Program RC6 thresholds.*/
        I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
        I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
@@ -5348,8 +5432,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 
        /* 6: Ring frequency + overclocking (our driver does this later */
 
-       dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+       reset_rps(dev_priv, gen6_set_rps);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5357,7 +5440,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
-       u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
+       u32 rc6vids, rc6_mask = 0;
        u32 gtfifodbg;
        int rc6_mode;
        int ret;
@@ -5381,9 +5464,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       /* Initialize rps frequencies */
-       gen6_init_rps_frequencies(dev_priv);
-
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
 
@@ -5434,16 +5514,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
        if (ret)
                DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
 
-       ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
-       if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
-               DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
-                                (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
-                                (pcu_mbox & 0xff) * 50);
-               dev_priv->rps.max_freq = pcu_mbox & 0xff;
-       }
-
-       dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+       reset_rps(dev_priv, gen6_set_rps);
 
        rc6vids = 0;
        ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -5462,7 +5533,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 {
        int min_freq = 15;
        unsigned int gpu_freq;
@@ -5546,23 +5617,13 @@ static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
        }
 }
 
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
-       if (!HAS_CORE_RING_FREQ(dev_priv))
-               return;
-
-       mutex_lock(&dev_priv->rps.hw_lock);
-       __gen6_update_ring_freq(dev_priv);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
 {
        u32 val, rp0;
 
        val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
 
-       switch (INTEL_INFO(dev_priv)->eu_total) {
+       switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
        case 8:
                /* (2 * 4) config */
                rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5700,8 +5761,6 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
        u32 pcbr;
        int pctx_size = 24*1024;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-
        pcbr = I915_READ(VLV_PCBR);
        if (pcbr) {
                /* BIOS set it up already, grab the pre-alloc'd space */
@@ -5737,7 +5796,6 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
 out:
        DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
        dev_priv->vlv_pctx = pctx;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
@@ -5745,7 +5803,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
        if (WARN_ON(!dev_priv->vlv_pctx))
                return;
 
-       drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
+       i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
        dev_priv->vlv_pctx = NULL;
 }
 
@@ -5768,8 +5826,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
 
        vlv_init_gpll_ref_freq(dev_priv);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
-
        val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
        switch ((val >> 6) & 3) {
        case 0:
@@ -5805,17 +5861,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
                         intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
                         dev_priv->rps.min_freq);
-
-       dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
-
-       /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_freq_softlimit == 0)
-               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
-       if (dev_priv->rps.min_freq_softlimit == 0)
-               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
-       mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
@@ -5826,8 +5871,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
 
        vlv_init_gpll_ref_freq(dev_priv);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
-
        mutex_lock(&dev_priv->sb_lock);
        val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
        mutex_unlock(&dev_priv->sb_lock);
@@ -5869,17 +5912,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
                   dev_priv->rps.rp1_freq |
                   dev_priv->rps.min_freq) & 1,
                  "Odd GPU freq values\n");
-
-       dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
-
-       /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_freq_softlimit == 0)
-               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
-       if (dev_priv->rps.min_freq_softlimit == 0)
-               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
-       mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -5970,16 +6002,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
        DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
-       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
-                        dev_priv->rps.cur_freq);
-
-       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
-                        dev_priv->rps.idle_freq);
-
-       valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
+       reset_rps(dev_priv, valleyview_set_rps);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -6059,16 +6082,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
        DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
-       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
-                        dev_priv->rps.cur_freq);
-
-       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
-                        dev_priv->rps.idle_freq);
-
-       valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
+       reset_rps(dev_priv, valleyview_set_rps);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -6397,19 +6411,11 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
  */
 bool i915_gpu_busy(void)
 {
-       struct drm_i915_private *dev_priv;
-       struct intel_engine_cs *engine;
        bool ret = false;
 
        spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev)
-               goto out_unlock;
-       dev_priv = i915_mch_dev;
-
-       for_each_engine(engine, dev_priv)
-               ret |= !list_empty(&engine->request_list);
-
-out_unlock:
+       if (i915_mch_dev)
+               ret = i915_mch_dev->gt.awake;
        spin_unlock_irq(&mchdev_lock);
 
        return ret;
@@ -6565,30 +6571,62 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
                intel_runtime_pm_get(dev_priv);
        }
 
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       /* Initialize RPS limits (for userspace) */
        if (IS_CHERRYVIEW(dev_priv))
                cherryview_init_gt_powersave(dev_priv);
        else if (IS_VALLEYVIEW(dev_priv))
                valleyview_init_gt_powersave(dev_priv);
+       else if (INTEL_GEN(dev_priv) >= 6)
+               gen6_init_rps_frequencies(dev_priv);
+
+       /* Derive initial user preferences/limits from the hardware limits */
+       dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+       dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
+
+       dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+       dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               dev_priv->rps.min_freq_softlimit =
+                       max_t(int,
+                             dev_priv->rps.efficient_freq,
+                             intel_freq_opcode(dev_priv, 450));
+
+       /* After setting max-softlimit, find the overclock max freq */
+       if (IS_GEN6(dev_priv) ||
+           IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+               u32 params = 0;
+
+               sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
+               if (params & BIT(31)) { /* OC supported */
+                       DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+                                        (dev_priv->rps.max_freq & 0xff) * 50,
+                                        (params & 0xff) * 50);
+                       dev_priv->rps.max_freq = params & 0xff;
+               }
+       }
+
+       /* Finally allow us to boost to max by default */
+       dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       intel_autoenable_gt_powersave(dev_priv);
 }
 
 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       if (IS_CHERRYVIEW(dev_priv))
-               return;
-       else if (IS_VALLEYVIEW(dev_priv))
+       if (IS_VALLEYVIEW(dev_priv))
                valleyview_cleanup_gt_powersave(dev_priv);
 
        if (!i915.enable_rc6)
                intel_runtime_pm_put(dev_priv);
 }
 
-static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
-{
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-       gen6_disable_rps_interrupts(dev_priv);
-}
-
 /**
  * intel_suspend_gt_powersave - suspend PM work and helper threads
  * @dev_priv: i915 device
@@ -6602,60 +6640,76 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
        if (INTEL_GEN(dev_priv) < 6)
                return;
 
-       gen6_suspend_rps(dev_priv);
+       if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
+               intel_runtime_pm_put(dev_priv);
 
-       /* Force GPU to min freq during suspend */
-       gen6_rps_idle(dev_priv);
+       /* gen6_rps_idle() will be called later to disable interrupts */
+}
+
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
+{
+       dev_priv->rps.enabled = true; /* force disabling */
+       intel_disable_gt_powersave(dev_priv);
+
+       gen6_reset_rps_interrupts(dev_priv);
 }
 
 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       if (IS_IRONLAKE_M(dev_priv)) {
-               ironlake_disable_drps(dev_priv);
-       } else if (INTEL_INFO(dev_priv)->gen >= 6) {
-               intel_suspend_gt_powersave(dev_priv);
+       if (!READ_ONCE(dev_priv->rps.enabled))
+               return;
 
-               mutex_lock(&dev_priv->rps.hw_lock);
-               if (INTEL_INFO(dev_priv)->gen >= 9) {
-                       gen9_disable_rc6(dev_priv);
-                       gen9_disable_rps(dev_priv);
-               } else if (IS_CHERRYVIEW(dev_priv))
-                       cherryview_disable_rps(dev_priv);
-               else if (IS_VALLEYVIEW(dev_priv))
-                       valleyview_disable_rps(dev_priv);
-               else
-                       gen6_disable_rps(dev_priv);
+       mutex_lock(&dev_priv->rps.hw_lock);
 
-               dev_priv->rps.enabled = false;
-               mutex_unlock(&dev_priv->rps.hw_lock);
+       if (INTEL_GEN(dev_priv) >= 9) {
+               gen9_disable_rc6(dev_priv);
+               gen9_disable_rps(dev_priv);
+       } else if (IS_CHERRYVIEW(dev_priv)) {
+               cherryview_disable_rps(dev_priv);
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               valleyview_disable_rps(dev_priv);
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               gen6_disable_rps(dev_priv);
+       }  else if (IS_IRONLAKE_M(dev_priv)) {
+               ironlake_disable_drps(dev_priv);
        }
+
+       dev_priv->rps.enabled = false;
+       mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-static void intel_gen6_powersave_work(struct work_struct *work)
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private,
-                            rps.delayed_resume_work.work);
+       /* We shouldn't be disabling as we submit, so this should be less
+        * racy than it appears!
+        */
+       if (READ_ONCE(dev_priv->rps.enabled))
+               return;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       /* Powersaving is controlled by the host when inside a VM */
+       if (intel_vgpu_active(dev_priv))
+               return;
 
-       gen6_reset_rps_interrupts(dev_priv);
+       mutex_lock(&dev_priv->rps.hw_lock);
 
        if (IS_CHERRYVIEW(dev_priv)) {
                cherryview_enable_rps(dev_priv);
        } else if (IS_VALLEYVIEW(dev_priv)) {
                valleyview_enable_rps(dev_priv);
-       } else if (INTEL_INFO(dev_priv)->gen >= 9) {
+       } else if (INTEL_GEN(dev_priv) >= 9) {
                gen9_enable_rc6(dev_priv);
                gen9_enable_rps(dev_priv);
                if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-                       __gen6_update_ring_freq(dev_priv);
+                       gen6_update_ring_freq(dev_priv);
        } else if (IS_BROADWELL(dev_priv)) {
                gen8_enable_rps(dev_priv);
-               __gen6_update_ring_freq(dev_priv);
-       } else {
+               gen6_update_ring_freq(dev_priv);
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                gen6_enable_rps(dev_priv);
-               __gen6_update_ring_freq(dev_priv);
+               gen6_update_ring_freq(dev_priv);
+       } else if (IS_IRONLAKE_M(dev_priv)) {
+               ironlake_enable_drps(dev_priv);
+               intel_init_emon(dev_priv);
        }
 
        WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6665,25 +6719,52 @@ static void intel_gen6_powersave_work(struct work_struct *work)
        WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
 
        dev_priv->rps.enabled = true;
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
 
-       gen6_enable_rps_interrupts(dev_priv);
+static void __intel_autoenable_gt_powersave(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
+       struct intel_engine_cs *rcs;
+       struct drm_i915_gem_request *req;
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       if (READ_ONCE(dev_priv->rps.enabled))
+               goto out;
+
+       rcs = &dev_priv->engine[RCS];
+       if (rcs->last_context)
+               goto out;
+
+       if (!rcs->init_context)
+               goto out;
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
 
+       req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
+       if (IS_ERR(req))
+               goto unlock;
+
+       if (!i915.enable_execlists && i915_switch_context(req) == 0)
+               rcs->init_context(req);
+
+       /* Mark the device busy, calling intel_enable_gt_powersave() */
+       i915_add_request_no_flush(req);
+
+unlock:
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+out:
        intel_runtime_pm_put(dev_priv);
 }
 
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       /* Powersaving is controlled by the host when inside a VM */
-       if (intel_vgpu_active(dev_priv))
+       if (READ_ONCE(dev_priv->rps.enabled))
                return;
 
        if (IS_IRONLAKE_M(dev_priv)) {
                ironlake_enable_drps(dev_priv);
-               mutex_lock(&dev_priv->drm.struct_mutex);
                intel_init_emon(dev_priv);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
        } else if (INTEL_INFO(dev_priv)->gen >= 6) {
                /*
                 * PCU communication is slow and this doesn't need to be
@@ -6697,21 +6778,13 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
                 * paths, so the _noresume version is enough (and in case of
                 * runtime resume it's necessary).
                 */
-               if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
-                                          round_jiffies_up_relative(HZ)))
+               if (queue_delayed_work(dev_priv->wq,
+                                      &dev_priv->rps.autoenable_work,
+                                      round_jiffies_up_relative(HZ)))
                        intel_runtime_pm_get_noresume(dev_priv);
        }
 }
 
-void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_INFO(dev_priv)->gen < 6)
-               return;
-
-       gen6_suspend_rps(dev_priv);
-       dev_priv->rps.enabled = false;
-}
-
 static void ibx_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -7657,8 +7730,54 @@ void intel_init_pm(struct drm_device *dev)
        }
 }
 
+static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+       uint32_t flags =
+               I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+       switch (flags) {
+       case GEN6_PCODE_SUCCESS:
+               return 0;
+       case GEN6_PCODE_UNIMPLEMENTED_CMD:
+       case GEN6_PCODE_ILLEGAL_CMD:
+               return -ENXIO;
+       case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+               return -EOVERFLOW;
+       case GEN6_PCODE_TIMEOUT:
+               return -ETIMEDOUT;
+       default:
+               MISSING_CASE(flags)
+               return 0;
+       }
+}
+
+static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+       uint32_t flags =
+               I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+       switch (flags) {
+       case GEN6_PCODE_SUCCESS:
+               return 0;
+       case GEN6_PCODE_ILLEGAL_CMD:
+               return -ENXIO;
+       case GEN7_PCODE_TIMEOUT:
+               return -ETIMEDOUT;
+       case GEN7_PCODE_ILLEGAL_DATA:
+               return -EINVAL;
+       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+               return -EOVERFLOW;
+       default:
+               MISSING_CASE(flags);
+               return 0;
+       }
+}
+
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
 {
+       int status;
+
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
        /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7685,12 +7804,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
        *val = I915_READ_FW(GEN6_PCODE_DATA);
        I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
+       if (INTEL_GEN(dev_priv) > 6)
+               status = gen7_check_mailbox_status(dev_priv);
+       else
+               status = gen6_check_mailbox_status(dev_priv);
+
+       if (status) {
+               DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
+                                status);
+               return status;
+       }
+
        return 0;
 }
 
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
-                              u32 mbox, u32 val)
+                           u32 mbox, u32 val)
 {
+       int status;
+
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
        /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7715,6 +7847,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
 
        I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
+       if (INTEL_GEN(dev_priv) > 6)
+               status = gen7_check_mailbox_status(dev_priv);
+       else
+               status = gen6_check_mailbox_status(dev_priv);
+
+       if (status) {
+               DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
+                                status);
+               return status;
+       }
+
        return 0;
 }
 
@@ -7786,7 +7929,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
        if (!i915_gem_request_completed(req))
                gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
 
-       i915_gem_request_unreference(req);
+       i915_gem_request_put(req);
        kfree(boost);
 }
 
@@ -7804,8 +7947,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
        if (boost == NULL)
                return;
 
-       i915_gem_request_reference(req);
-       boost->req = req;
+       boost->req = i915_gem_request_get(req);
 
        INIT_WORK(&boost->work, __intel_rps_boost_work);
        queue_work(req->i915->wq, &boost->work);
@@ -7818,11 +7960,9 @@ void intel_pm_setup(struct drm_device *dev)
        mutex_init(&dev_priv->rps.hw_lock);
        spin_lock_init(&dev_priv->rps.client_lock);
 
-       INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
-                         intel_gen6_powersave_work);
+       INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
+                         __intel_autoenable_gt_powersave);
        INIT_LIST_HEAD(&dev_priv->rps.clients);
-       INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
-       INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
 
        dev_priv->pm.suspended = false;
        atomic_set(&dev_priv->pm.wakeref_count, 0);
index 2b0d1baf15b3cc6c2b57db06e91fe774be4d99ea..108ba1e5d65872cfcf33ee4b7bc6ca9ea2ed625e 100644 (file)
@@ -255,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        uint32_t max_sleep_time = 0x1f;
-       /* Lately it was identified that depending on panel idle frame count
-        * calculated at HW can be off by 1. So let's use what came
-        * from VBT + 1.
-        * There are also other cases where panel demands at least 4
-        * but VBT is not being set. To cover these 2 cases lets use
-        * at least 5 when VBT isn't set to be on the safest side.
+       /*
+        * Let's respect VBT in case VBT asks a higher idle_frame value.
+        * Let's use 6 as the minimum to cover all known cases including
+        * the off-by-one issue that HW has in some cases. Also there are
+        * cases where sink should be able to train
+        * with the 5 or 6 idle patterns.
         */
-       uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
+       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
        uint32_t val = EDP_PSR_ENABLE;
 
        val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
@@ -645,9 +645,8 @@ unlock:
        mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void intel_psr_exit(struct drm_device *dev)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dp *intel_dp = dev_priv->psr.enabled;
        struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -656,7 +655,7 @@ static void intel_psr_exit(struct drm_device *dev)
        if (!dev_priv->psr.active)
                return;
 
-       if (HAS_DDI(dev)) {
+       if (HAS_DDI(dev_priv)) {
                val = I915_READ(EDP_PSR_CTL);
 
                WARN_ON(!(val & EDP_PSR_ENABLE));
@@ -691,7 +690,7 @@ static void intel_psr_exit(struct drm_device *dev)
 
 /**
  * intel_psr_single_frame_update - Single Frame Update
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Some platforms support a single frame update feature that is used to
@@ -699,10 +698,9 @@ static void intel_psr_exit(struct drm_device *dev)
  * So far it is only implemented for Valleyview and Cherryview because
  * hardware requires this to be done before a page flip.
  */
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
                                   unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
        u32 val;
@@ -711,7 +709,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
         * Single frame update is already supported on BDW+ but it requires
         * many W/A and it isn't really needed.
         */
-       if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                return;
 
        mutex_lock(&dev_priv->psr.lock);
@@ -737,7 +735,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
 
 /**
  * intel_psr_invalidate - Invalidade PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
@@ -747,10 +745,9 @@ void intel_psr_single_frame_update(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
                          unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -767,14 +764,14 @@ void intel_psr_invalidate(struct drm_device *dev,
        dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 
        if (frontbuffer_bits)
-               intel_psr_exit(dev);
+               intel_psr_exit(dev_priv);
 
        mutex_unlock(&dev_priv->psr.lock);
 }
 
 /**
  * intel_psr_flush - Flush PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -785,10 +782,9 @@ void intel_psr_invalidate(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  */
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
                     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -806,7 +802,7 @@ void intel_psr_flush(struct drm_device *dev,
 
        /* By definition flush = invalidate + flush */
        if (frontbuffer_bits)
-               intel_psr_exit(dev);
+               intel_psr_exit(dev_priv);
 
        if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
                if (!work_busy(&dev_priv->psr.work.work))
index 5bd69852752c663ae22ccf4aedd1616518962225..08f6fea05a2c40a7710eea1cfa26de693671e7c0 100644 (file)
 #ifndef _INTEL_RENDERSTATE_H
 #define _INTEL_RENDERSTATE_H
 
-#include "i915_drv.h"
+#include <linux/types.h>
 
-extern const struct intel_renderstate_rodata gen6_null_state;
-extern const struct intel_renderstate_rodata gen7_null_state;
-extern const struct intel_renderstate_rodata gen8_null_state;
-extern const struct intel_renderstate_rodata gen9_null_state;
+struct intel_renderstate_rodata {
+       const u32 *reloc;
+       const u32 *batch;
+       const u32 batch_items;
+};
 
 #define RO_RENDERSTATE(_g)                                             \
        const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
@@ -38,4 +39,9 @@ extern const struct intel_renderstate_rodata gen9_null_state;
                .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
        }
 
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
+
 #endif /* INTEL_RENDERSTATE_H */
index cca7792f26d5058c99edfee0d99c76d7706b0ebe..7a74750076c57cb21b91461cbefe386e372ca783 100644 (file)
@@ -47,57 +47,44 @@ int __intel_ring_space(int head, int tail, int size)
        return space - I915_RING_FREE_SPACE;
 }
 
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+void intel_ring_update_space(struct intel_ring *ring)
 {
-       if (ringbuf->last_retired_head != -1) {
-               ringbuf->head = ringbuf->last_retired_head;
-               ringbuf->last_retired_head = -1;
+       if (ring->last_retired_head != -1) {
+               ring->head = ring->last_retired_head;
+               ring->last_retired_head = -1;
        }
 
-       ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
-                                           ringbuf->tail, ringbuf->size);
-}
-
-static void __intel_ring_advance(struct intel_engine_cs *engine)
-{
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       ringbuf->tail &= ringbuf->size - 1;
-       engine->write_tail(engine, ringbuf->tail);
+       ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
+                                        ring->tail, ring->size);
 }
 
 static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32      invalidate_domains,
-                      u32      flush_domains)
+gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        u32 cmd;
        int ret;
 
        cmd = MI_FLUSH;
-       if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
-               cmd |= MI_NO_WRITE_FLUSH;
 
-       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+       if (mode & EMIT_INVALIDATE)
                cmd |= MI_READ_FLUSH;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, cmd);
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, cmd);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32      invalidate_domains,
-                      u32      flush_domains)
+gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        u32 cmd;
        int ret;
 
@@ -129,23 +116,20 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
         * are flushed at any MI_FLUSH.
         */
 
-       cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-       if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
-               cmd &= ~MI_NO_WRITE_FLUSH;
-       if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+       cmd = MI_FLUSH;
+       if (mode & EMIT_INVALIDATE) {
                cmd |= MI_EXE_FLUSH;
-
-       if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-           (IS_G4X(req->i915) || IS_GEN5(req->i915)))
-               cmd |= MI_INVALIDATE_ISP;
+               if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+                       cmd |= MI_INVALIDATE_ISP;
+       }
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, cmd);
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, cmd);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -190,45 +174,46 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->engine;
-       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       struct intel_ring *ring = req->ring;
+       u32 scratch_addr =
+               i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
        int ret;
 
        ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
-       intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+       intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
                        PIPE_CONTROL_STALL_AT_SCOREBOARD);
-       intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-       intel_ring_emit(engine, 0); /* low dword */
-       intel_ring_emit(engine, 0); /* high dword */
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, 0); /* low dword */
+       intel_ring_emit(ring, 0); /* high dword */
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
-       intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
-       intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-       intel_ring_emit(engine, 0);
-       intel_ring_emit(engine, 0);
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+       intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+       intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
+       u32 scratch_addr =
+               i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
        u32 flags = 0;
-       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -240,7 +225,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
         * number of bits based on the write domains has little performance
         * impact.
         */
-       if (flush_domains) {
+       if (mode & EMIT_FLUSH) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                /*
@@ -249,7 +234,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
                 */
                flags |= PIPE_CONTROL_CS_STALL;
        }
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -266,11 +251,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-       intel_ring_emit(engine, flags);
-       intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, 0);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+       intel_ring_emit(ring, flags);
+       intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, 0);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -278,30 +263,31 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-       intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
-                             PIPE_CONTROL_STALL_AT_SCOREBOARD);
-       intel_ring_emit(engine, 0);
-       intel_ring_emit(engine, 0);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+       intel_ring_emit(ring,
+                       PIPE_CONTROL_CS_STALL |
+                       PIPE_CONTROL_STALL_AT_SCOREBOARD);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, 0);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32 invalidate_domains, u32 flush_domains)
+gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
+       u32 scratch_addr =
+               i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
        u32 flags = 0;
-       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /*
@@ -318,13 +304,13 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
         * number of bits based on the write domains has little performance
         * impact.
         */
-       if (flush_domains) {
+       if (mode & EMIT_FLUSH) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -350,11 +336,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-       intel_ring_emit(engine, flags);
-       intel_ring_emit(engine, scratch_addr);
-       intel_ring_emit(engine, 0);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+       intel_ring_emit(ring, flags);
+       intel_ring_emit(ring, scratch_addr);
+       intel_ring_emit(ring, 0);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -363,41 +349,41 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
                       u32 flags, u32 scratch_addr)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
-       intel_ring_emit(engine, flags);
-       intel_ring_emit(engine, scratch_addr);
-       intel_ring_emit(engine, 0);
-       intel_ring_emit(engine, 0);
-       intel_ring_emit(engine, 0);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(ring, flags);
+       intel_ring_emit(ring, scratch_addr);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, 0);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int
-gen8_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32 invalidate_domains, u32 flush_domains)
+gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
+       u32 scratch_addr =
+               i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
        u32 flags = 0;
-       u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        flags |= PIPE_CONTROL_CS_STALL;
 
-       if (flush_domains) {
+       if (mode & EMIT_FLUSH) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -419,14 +405,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
        return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-static void ring_write_tail(struct intel_engine_cs *engine,
-                           u32 value)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       I915_WRITE_TAIL(engine, value);
-}
-
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
        u64 acthd;
@@ -488,7 +467,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
                mmio = RING_HWS_PGA(engine->mmio_base);
        }
 
-       I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
+       I915_WRITE(mmio, engine->status_page.ggtt_offset);
        POSTING_READ(mmio);
 
        /*
@@ -519,7 +498,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
-       if (!IS_GEN2(dev_priv)) {
+       if (INTEL_GEN(dev_priv) > 2) {
                I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
                if (intel_wait_for_register(dev_priv,
                                            RING_MI_MODE(engine->mmio_base),
@@ -539,9 +518,9 @@ static bool stop_ring(struct intel_engine_cs *engine)
 
        I915_WRITE_CTL(engine, 0);
        I915_WRITE_HEAD(engine, 0);
-       engine->write_tail(engine, 0);
+       I915_WRITE_TAIL(engine, 0);
 
-       if (!IS_GEN2(dev_priv)) {
+       if (INTEL_GEN(dev_priv) > 2) {
                (void)I915_READ_CTL(engine);
                I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
        }
@@ -549,16 +528,10 @@ static bool stop_ring(struct intel_engine_cs *engine)
        return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
 }
 
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
-       memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-}
-
 static int init_ring_common(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       struct drm_i915_gem_object *obj = ringbuf->obj;
+       struct intel_ring *ring = engine->buffer;
        int ret = 0;
 
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -586,10 +559,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
                }
        }
 
-       if (I915_NEED_GFX_HWS(dev_priv))
-               intel_ring_setup_status_page(engine);
-       else
+       if (HWS_NEEDS_PHYSICAL(dev_priv))
                ring_setup_phys_status_page(engine);
+       else
+               intel_ring_setup_status_page(engine);
+
+       intel_engine_reset_irq(engine);
 
        /* Enforce ordering by reading HEAD register back */
        I915_READ_HEAD(engine);
@@ -598,40 +573,39 @@ static int init_ring_common(struct intel_engine_cs *engine)
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
-       I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
+       I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
 
        /* WaClearRingBufHeadRegAtInit:ctg,elk */
        if (I915_READ_HEAD(engine))
                DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
                          engine->name, I915_READ_HEAD(engine));
-       I915_WRITE_HEAD(engine, 0);
-       (void)I915_READ_HEAD(engine);
+
+       intel_ring_update_space(ring);
+       I915_WRITE_HEAD(engine, ring->head);
+       I915_WRITE_TAIL(engine, ring->tail);
+       (void)I915_READ_TAIL(engine);
 
        I915_WRITE_CTL(engine,
-                       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+                       ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
-       if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
-                    I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
-                    (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
+       if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
+                                      RING_VALID, RING_VALID,
+                                      50)) {
                DRM_ERROR("%s initialization failed "
-                         "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
+                         "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
                          engine->name,
                          I915_READ_CTL(engine),
                          I915_READ_CTL(engine) & RING_VALID,
-                         I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+                         I915_READ_HEAD(engine), ring->head,
+                         I915_READ_TAIL(engine), ring->tail,
                          I915_READ_START(engine),
-                         (unsigned long)i915_gem_obj_ggtt_offset(obj));
+                         i915_ggtt_offset(ring->vma));
                ret = -EIO;
                goto out;
        }
 
-       ringbuf->last_retired_head = -1;
-       ringbuf->head = I915_READ_HEAD(engine);
-       ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
-       intel_ring_update_space(ringbuf);
-
        intel_engine_init_hangcheck(engine);
 
 out:
@@ -640,59 +614,25 @@ out:
        return ret;
 }
 
-void intel_fini_pipe_control(struct intel_engine_cs *engine)
-{
-       if (engine->scratch.obj == NULL)
-               return;
-
-       i915_gem_object_ggtt_unpin(engine->scratch.obj);
-       drm_gem_object_unreference(&engine->scratch.obj->base);
-       engine->scratch.obj = NULL;
-}
-
-int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
+static void reset_ring_common(struct intel_engine_cs *engine,
+                             struct drm_i915_gem_request *request)
 {
-       struct drm_i915_gem_object *obj;
-       int ret;
-
-       WARN_ON(engine->scratch.obj);
+       struct intel_ring *ring = request->ring;
 
-       obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
-       if (!obj)
-               obj = i915_gem_object_create(&engine->i915->drm, size);
-       if (IS_ERR(obj)) {
-               DRM_ERROR("Failed to allocate scratch page\n");
-               ret = PTR_ERR(obj);
-               goto err;
-       }
-
-       ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
-       if (ret)
-               goto err_unref;
-
-       engine->scratch.obj = obj;
-       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
-       DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-                        engine->name, engine->scratch.gtt_offset);
-       return 0;
-
-err_unref:
-       drm_gem_object_unreference(&engine->scratch.obj->base);
-err:
-       return ret;
+       ring->head = request->postfix;
+       ring->last_retired_head = -1;
 }
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        struct i915_workarounds *w = &req->i915->workarounds;
        int ret, i;
 
        if (w->count == 0)
                return 0;
 
-       engine->gpu_caches_dirty = true;
-       ret = intel_ring_flush_all_caches(req);
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
        if (ret)
                return ret;
 
@@ -700,17 +640,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
        for (i = 0; i < w->count; i++) {
-               intel_ring_emit_reg(engine, w->reg[i].addr);
-               intel_ring_emit(engine, w->reg[i].value);
+               intel_ring_emit_reg(ring, w->reg[i].addr);
+               intel_ring_emit(ring, w->reg[i].value);
        }
-       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_emit(ring, MI_NOOP);
 
-       intel_ring_advance(engine);
+       intel_ring_advance(ring);
 
-       engine->gpu_caches_dirty = true;
-       ret = intel_ring_flush_all_caches(req);
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
        if (ret)
                return ret;
 
@@ -1022,7 +961,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
                 * Only consider slices where one, and only one, subslice has 7
                 * EUs
                 */
-               if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
+               if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
                        continue;
 
                /*
@@ -1031,7 +970,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
                 *
                 * ->    0 <= ss <= 3;
                 */
-               ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+               ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
                vals[i] = 3 - ss;
        }
 
@@ -1178,8 +1117,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
                I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
                                           L3_HIGH_PRIO_CREDITS(2));
 
-       /* WaInsertDummyPushConstPs:bxt */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+       /* WaToEnableHwFixForPushConstHWBug:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
                                  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
@@ -1222,8 +1161,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
                I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
                           GEN8_LQSC_RO_PERF_DIS);
 
-       /* WaInsertDummyPushConstPs:kbl */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+       /* WaToEnableHwFixForPushConstHWBug:kbl */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
                                  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
@@ -1329,191 +1268,194 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
-       if (dev_priv->semaphore_obj) {
-               i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
-               drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
-               dev_priv->semaphore_obj = NULL;
-       }
-
-       intel_fini_pipe_control(engine);
+       i915_vma_unpin_and_release(&dev_priv->semaphore);
 }
 
-static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
-                          unsigned int num_dwords)
+static int gen8_rcs_signal(struct drm_i915_gem_request *req)
 {
-#define MBOX_UPDATE_DWORDS 8
-       struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_i915_private *dev_priv = signaller_req->i915;
+       struct intel_ring *ring = req->ring;
+       struct drm_i915_private *dev_priv = req->i915;
        struct intel_engine_cs *waiter;
        enum intel_engine_id id;
        int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-       num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
-       ret = intel_ring_begin(signaller_req, num_dwords);
+       num_rings = INTEL_INFO(dev_priv)->num_rings;
+       ret = intel_ring_begin(req, (num_rings-1) * 8);
        if (ret)
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+               u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
-               intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
-                                          PIPE_CONTROL_QW_WRITE |
-                                          PIPE_CONTROL_CS_STALL);
-               intel_ring_emit(signaller, lower_32_bits(gtt_offset));
-               intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, signaller_req->seqno);
-               intel_ring_emit(signaller, 0);
-               intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-                                          MI_SEMAPHORE_TARGET(waiter->hw_id));
-               intel_ring_emit(signaller, 0);
+               intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+               intel_ring_emit(ring,
+                               PIPE_CONTROL_GLOBAL_GTT_IVB |
+                               PIPE_CONTROL_QW_WRITE |
+                               PIPE_CONTROL_CS_STALL);
+               intel_ring_emit(ring, lower_32_bits(gtt_offset));
+               intel_ring_emit(ring, upper_32_bits(gtt_offset));
+               intel_ring_emit(ring, req->fence.seqno);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring,
+                               MI_SEMAPHORE_SIGNAL |
+                               MI_SEMAPHORE_TARGET(waiter->hw_id));
+               intel_ring_emit(ring, 0);
        }
+       intel_ring_advance(ring);
 
        return 0;
 }
 
-static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
-                          unsigned int num_dwords)
+static int gen8_xcs_signal(struct drm_i915_gem_request *req)
 {
-#define MBOX_UPDATE_DWORDS 6
-       struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_i915_private *dev_priv = signaller_req->i915;
+       struct intel_ring *ring = req->ring;
+       struct drm_i915_private *dev_priv = req->i915;
        struct intel_engine_cs *waiter;
        enum intel_engine_id id;
        int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-       num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
-       ret = intel_ring_begin(signaller_req, num_dwords);
+       num_rings = INTEL_INFO(dev_priv)->num_rings;
+       ret = intel_ring_begin(req, (num_rings-1) * 6);
        if (ret)
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+               u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
-                                          MI_FLUSH_DW_OP_STOREDW);
-               intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
-                                          MI_FLUSH_DW_USE_GTT);
-               intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, signaller_req->seqno);
-               intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-                                          MI_SEMAPHORE_TARGET(waiter->hw_id));
-               intel_ring_emit(signaller, 0);
+               intel_ring_emit(ring,
+                               (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+               intel_ring_emit(ring,
+                               lower_32_bits(gtt_offset) |
+                               MI_FLUSH_DW_USE_GTT);
+               intel_ring_emit(ring, upper_32_bits(gtt_offset));
+               intel_ring_emit(ring, req->fence.seqno);
+               intel_ring_emit(ring,
+                               MI_SEMAPHORE_SIGNAL |
+                               MI_SEMAPHORE_TARGET(waiter->hw_id));
+               intel_ring_emit(ring, 0);
        }
+       intel_ring_advance(ring);
 
        return 0;
 }
 
-static int gen6_signal(struct drm_i915_gem_request *signaller_req,
-                      unsigned int num_dwords)
+static int gen6_signal(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_i915_private *dev_priv = signaller_req->i915;
-       struct intel_engine_cs *useless;
-       enum intel_engine_id id;
+       struct intel_ring *ring = req->ring;
+       struct drm_i915_private *dev_priv = req->i915;
+       struct intel_engine_cs *engine;
        int ret, num_rings;
 
-#define MBOX_UPDATE_DWORDS 3
-       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-       num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
-#undef MBOX_UPDATE_DWORDS
-
-       ret = intel_ring_begin(signaller_req, num_dwords);
+       num_rings = INTEL_INFO(dev_priv)->num_rings;
+       ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
        if (ret)
                return ret;
 
-       for_each_engine_id(useless, dev_priv, id) {
-               i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
+       for_each_engine(engine, dev_priv) {
+               i915_reg_t mbox_reg;
+
+               if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
+                       continue;
 
+               mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
                if (i915_mmio_reg_valid(mbox_reg)) {
-                       intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
-                       intel_ring_emit_reg(signaller, mbox_reg);
-                       intel_ring_emit(signaller, signaller_req->seqno);
+                       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+                       intel_ring_emit_reg(ring, mbox_reg);
+                       intel_ring_emit(ring, req->fence.seqno);
                }
        }
 
        /* If num_dwords was rounded, make sure the tail pointer is correct */
        if (num_rings % 2 == 0)
-               intel_ring_emit(signaller, MI_NOOP);
+               intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+static void i9xx_submit_request(struct drm_i915_gem_request *request)
+{
+       struct drm_i915_private *dev_priv = request->i915;
+
+       I915_WRITE_TAIL(request->engine,
+                       intel_ring_offset(request->ring, request->tail));
+}
+
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
+{
+       struct intel_ring *ring = req->ring;
+       int ret;
+
+       ret = intel_ring_begin(req, 4);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(ring, req->fence.seqno);
+       intel_ring_emit(ring, MI_USER_INTERRUPT);
+       intel_ring_advance(ring);
+
+       req->tail = ring->tail;
 
        return 0;
 }
 
 /**
- * gen6_add_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_request - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int
-gen6_add_request(struct drm_i915_gem_request *req)
+static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->engine;
        int ret;
 
-       if (engine->semaphore.signal)
-               ret = engine->semaphore.signal(req, 4);
-       else
-               ret = intel_ring_begin(req, 4);
-
+       ret = req->engine->semaphore.signal(req);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
-       intel_ring_emit(engine,
-                       I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, req->seqno);
-       intel_ring_emit(engine, MI_USER_INTERRUPT);
-       __intel_ring_advance(engine);
-
-       return 0;
+       return i9xx_emit_request(req);
 }
 
-static int
-gen8_render_add_request(struct drm_i915_gem_request *req)
+static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int ret;
 
-       if (engine->semaphore.signal)
-               ret = engine->semaphore.signal(req, 8);
-       else
-               ret = intel_ring_begin(req, 8);
+       if (engine->semaphore.signal) {
+               ret = engine->semaphore.signal(req);
+               if (ret)
+                       return ret;
+       }
+
+       ret = intel_ring_begin(req, 8);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
-       intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
-                                PIPE_CONTROL_CS_STALL |
-                                PIPE_CONTROL_QW_WRITE));
-       intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
-       intel_ring_emit(engine, 0);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+                              PIPE_CONTROL_CS_STALL |
+                              PIPE_CONTROL_QW_WRITE));
+       intel_ring_emit(ring, intel_hws_seqno_address(engine));
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        /* We're thrashing one dword of HWS. */
-       intel_ring_emit(engine, 0);
-       intel_ring_emit(engine, MI_USER_INTERRUPT);
-       intel_ring_emit(engine, MI_NOOP);
-       __intel_ring_advance(engine);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_USER_INTERRUPT);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
-       return 0;
-}
+       req->tail = ring->tail;
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
-                                             u32 seqno)
-{
-       return dev_priv->last_seqno < seqno;
+       return 0;
 }
 
 /**
@@ -1525,82 +1467,71 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
  */
 
 static int
-gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
-              struct intel_engine_cs *signaller,
-              u32 seqno)
+gen8_ring_sync_to(struct drm_i915_gem_request *req,
+                 struct drm_i915_gem_request *signal)
 {
-       struct intel_engine_cs *waiter = waiter_req->engine;
-       struct drm_i915_private *dev_priv = waiter_req->i915;
-       u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+       struct intel_ring *ring = req->ring;
+       struct drm_i915_private *dev_priv = req->i915;
+       u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
        struct i915_hw_ppgtt *ppgtt;
        int ret;
 
-       ret = intel_ring_begin(waiter_req, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
-                               MI_SEMAPHORE_GLOBAL_GTT |
-                               MI_SEMAPHORE_SAD_GTE_SDD);
-       intel_ring_emit(waiter, seqno);
-       intel_ring_emit(waiter, lower_32_bits(offset));
-       intel_ring_emit(waiter, upper_32_bits(offset));
-       intel_ring_advance(waiter);
+       intel_ring_emit(ring,
+                       MI_SEMAPHORE_WAIT |
+                       MI_SEMAPHORE_GLOBAL_GTT |
+                       MI_SEMAPHORE_SAD_GTE_SDD);
+       intel_ring_emit(ring, signal->fence.seqno);
+       intel_ring_emit(ring, lower_32_bits(offset));
+       intel_ring_emit(ring, upper_32_bits(offset));
+       intel_ring_advance(ring);
 
        /* When the !RCS engines idle waiting upon a semaphore, they lose their
         * pagetables and we must reload them before executing the batch.
         * We do this on the i915_switch_context() following the wait and
         * before the dispatch.
         */
-       ppgtt = waiter_req->ctx->ppgtt;
-       if (ppgtt && waiter_req->engine->id != RCS)
-               ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
+       ppgtt = req->ctx->ppgtt;
+       if (ppgtt && req->engine->id != RCS)
+               ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
        return 0;
 }
 
 static int
-gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
-              struct intel_engine_cs *signaller,
-              u32 seqno)
+gen6_ring_sync_to(struct drm_i915_gem_request *req,
+                 struct drm_i915_gem_request *signal)
 {
-       struct intel_engine_cs *waiter = waiter_req->engine;
+       struct intel_ring *ring = req->ring;
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
-       u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+       u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
        int ret;
 
-       /* Throughout all of the GEM code, seqno passed implies our current
-        * seqno is >= the last seqno executed. However for hardware the
-        * comparison is strictly greater than.
-        */
-       seqno -= 1;
-
        WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-       ret = intel_ring_begin(waiter_req, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
-       /* If seqno wrap happened, omit the wait with no-ops */
-       if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
-               intel_ring_emit(waiter, dw1 | wait_mbox);
-               intel_ring_emit(waiter, seqno);
-               intel_ring_emit(waiter, 0);
-               intel_ring_emit(waiter, MI_NOOP);
-       } else {
-               intel_ring_emit(waiter, MI_NOOP);
-               intel_ring_emit(waiter, MI_NOOP);
-               intel_ring_emit(waiter, MI_NOOP);
-               intel_ring_emit(waiter, MI_NOOP);
-       }
-       intel_ring_advance(waiter);
+       intel_ring_emit(ring, dw1 | wait_mbox);
+       /* Throughout all of the GEM code, seqno passed implies our current
+        * seqno is >= the last seqno executed. However for hardware the
+        * comparison is strictly greater than.
+        */
+       intel_ring_emit(ring, signal->fence.seqno - 1);
+       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static void
-gen5_seqno_barrier(struct intel_engine_cs *ring)
+gen5_seqno_barrier(struct intel_engine_cs *engine)
 {
        /* MI_STORE are internally buffered by the GPU and not flushed
         * either by MI_FLUSH or SyncFlush or any other combination of
@@ -1693,40 +1624,18 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 }
 
 static int
-bsd_ring_flush(struct drm_i915_gem_request *req,
-              u32     invalidate_domains,
-              u32     flush_domains)
+bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_FLUSH);
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
-       return 0;
-}
-
-static int
-i9xx_add_request(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       int ret;
-
-       ret = intel_ring_begin(req, 4);
-       if (ret)
-               return ret;
-
-       intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
-       intel_ring_emit(engine,
-                       I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, req->seqno);
-       intel_ring_emit(engine, MI_USER_INTERRUPT);
-       __intel_ring_advance(engine);
-
+       intel_ring_emit(ring, MI_FLUSH);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
        return 0;
 }
 
@@ -1788,24 +1697,24 @@ gen8_irq_disable(struct intel_engine_cs *engine)
 }
 
 static int
-i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
-                        u64 offset, u32 length,
-                        unsigned dispatch_flags)
+i965_emit_bb_start(struct drm_i915_gem_request *req,
+                  u64 offset, u32 length,
+                  unsigned int dispatch_flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine,
+       intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        MI_BATCH_GTT |
                        (dispatch_flags & I915_DISPATCH_SECURE ?
                         0 : MI_BATCH_NON_SECURE_I965));
-       intel_ring_emit(engine, offset);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -1815,12 +1724,12 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
-                        u64 offset, u32 len,
-                        unsigned dispatch_flags)
+i830_emit_bb_start(struct drm_i915_gem_request *req,
+                  u64 offset, u32 len,
+                  unsigned int dispatch_flags)
 {
-       struct intel_engine_cs *engine = req->engine;
-       u32 cs_offset = engine->scratch.gtt_offset;
+       struct intel_ring *ring = req->ring;
+       u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
        int ret;
 
        ret = intel_ring_begin(req, 6);
@@ -1828,13 +1737,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                return ret;
 
        /* Evict the invalid PTE TLBs */
-       intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-       intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-       intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-       intel_ring_emit(engine, cs_offset);
-       intel_ring_emit(engine, 0xdeadbeef);
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+       intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+       intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+       intel_ring_emit(ring, cs_offset);
+       intel_ring_emit(ring, 0xdeadbeef);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
                if (len > I830_BATCH_LIMIT)
@@ -1848,17 +1757,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                 * stable batch scratch bo area (so that the CS never
                 * stumbles over its tlb invalidation bug) ...
                 */
-               intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-               intel_ring_emit(engine,
+               intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+               intel_ring_emit(ring,
                                BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-               intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-               intel_ring_emit(engine, cs_offset);
-               intel_ring_emit(engine, 4096);
-               intel_ring_emit(engine, offset);
+               intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+               intel_ring_emit(ring, cs_offset);
+               intel_ring_emit(ring, 4096);
+               intel_ring_emit(ring, offset);
 
-               intel_ring_emit(engine, MI_FLUSH);
-               intel_ring_emit(engine, MI_NOOP);
-               intel_ring_advance(engine);
+               intel_ring_emit(ring, MI_FLUSH);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
 
                /* ... and execute it. */
                offset = cs_offset;
@@ -1868,30 +1777,30 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-       intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-                                         0 : MI_BATCH_NON_SECURE));
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+       intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+                                       0 : MI_BATCH_NON_SECURE));
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int
-i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
-                        u64 offset, u32 len,
-                        unsigned dispatch_flags)
+i915_emit_bb_start(struct drm_i915_gem_request *req,
+                  u64 offset, u32 len,
+                  unsigned int dispatch_flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-       intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-                                         0 : MI_BATCH_NON_SECURE));
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+       intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+                                       0 : MI_BATCH_NON_SECURE));
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -1909,79 +1818,79 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
 
-       obj = engine->status_page.obj;
-       if (obj == NULL)
+       vma = fetch_and_zero(&engine->status_page.vma);
+       if (!vma)
                return;
 
-       kunmap(sg_page(obj->pages->sgl));
-       i915_gem_object_ggtt_unpin(obj);
-       drm_gem_object_unreference(&obj->base);
-       engine->status_page.obj = NULL;
+       i915_vma_unpin(vma);
+       i915_gem_object_unpin_map(vma->obj);
+       i915_vma_put(vma);
 }
 
 static int init_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_object *obj = engine->status_page.obj;
-
-       if (obj == NULL) {
-               unsigned flags;
-               int ret;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       unsigned int flags;
+       int ret;
 
-               obj = i915_gem_object_create(&engine->i915->drm, 4096);
-               if (IS_ERR(obj)) {
-                       DRM_ERROR("Failed to allocate status page\n");
-                       return PTR_ERR(obj);
-               }
+       obj = i915_gem_object_create(&engine->i915->drm, 4096);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate status page\n");
+               return PTR_ERR(obj);
+       }
 
-               ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-               if (ret)
-                       goto err_unref;
-
-               flags = 0;
-               if (!HAS_LLC(engine->i915))
-                       /* On g33, we cannot place HWS above 256MiB, so
-                        * restrict its pinning to the low mappable arena.
-                        * Though this restriction is not documented for
-                        * gen4, gen5, or byt, they also behave similarly
-                        * and hang if the HWS is placed at the top of the
-                        * GTT. To generalise, it appears that all !llc
-                        * platforms have issues with us placing the HWS
-                        * above the mappable region (even though we never
-                        * actualy map it).
-                        */
-                       flags |= PIN_MAPPABLE;
-               ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
-               if (ret) {
-err_unref:
-                       drm_gem_object_unreference(&obj->base);
-                       return ret;
-               }
+       ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+       if (ret)
+               goto err;
 
-               engine->status_page.obj = obj;
+       vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err;
        }
 
-       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
-       engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
-       memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+       flags = PIN_GLOBAL;
+       if (!HAS_LLC(engine->i915))
+               /* On g33, we cannot place HWS above 256MiB, so
+                * restrict its pinning to the low mappable arena.
+                * Though this restriction is not documented for
+                * gen4, gen5, or byt, they also behave similarly
+                * and hang if the HWS is placed at the top of the
+                * GTT. To generalise, it appears that all !llc
+                * platforms have issues with us placing the HWS
+                * above the mappable region (even though we never
+                * actualy map it).
+                */
+               flags |= PIN_MAPPABLE;
+       ret = i915_vma_pin(vma, 0, 4096, flags);
+       if (ret)
+               goto err;
 
-       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
-                       engine->name, engine->status_page.gfx_addr);
+       engine->status_page.vma = vma;
+       engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
+       engine->status_page.page_addr =
+               i915_gem_object_pin_map(obj, I915_MAP_WB);
 
+       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+                        engine->name, i915_ggtt_offset(vma));
        return 0;
+
+err:
+       i915_gem_object_put(obj);
+       return ret;
 }
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
-       if (!dev_priv->status_page_dmah) {
-               dev_priv->status_page_dmah =
-                       drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
-               if (!dev_priv->status_page_dmah)
-                       return -ENOMEM;
-       }
+       dev_priv->status_page_dmah =
+               drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
+       if (!dev_priv->status_page_dmah)
+               return -ENOMEM;
 
        engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
        memset(engine->status_page.page_addr, 0, PAGE_SIZE);
@@ -1989,115 +1898,105 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
        return 0;
 }
 
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
-{
-       GEM_BUG_ON(ringbuf->vma == NULL);
-       GEM_BUG_ON(ringbuf->virtual_start == NULL);
-
-       if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-               i915_gem_object_unpin_map(ringbuf->obj);
-       else
-               i915_vma_unpin_iomap(ringbuf->vma);
-       ringbuf->virtual_start = NULL;
-
-       i915_gem_object_ggtt_unpin(ringbuf->obj);
-       ringbuf->vma = NULL;
-}
-
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
-                                    struct intel_ringbuffer *ringbuf)
+int intel_ring_pin(struct intel_ring *ring)
 {
-       struct drm_i915_gem_object *obj = ringbuf->obj;
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-       unsigned flags = PIN_OFFSET_BIAS | 4096;
+       unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
+       enum i915_map_type map;
+       struct i915_vma *vma = ring->vma;
        void *addr;
        int ret;
 
-       if (HAS_LLC(dev_priv) && !obj->stolen) {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
-               if (ret)
-                       return ret;
+       GEM_BUG_ON(ring->vaddr);
 
-               ret = i915_gem_object_set_to_cpu_domain(obj, true);
-               if (ret)
-                       goto err_unpin;
+       map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
 
-               addr = i915_gem_object_pin_map(obj);
-               if (IS_ERR(addr)) {
-                       ret = PTR_ERR(addr);
-                       goto err_unpin;
-               }
-       } else {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-                                           flags | PIN_MAPPABLE);
-               if (ret)
-                       return ret;
+       if (vma->obj->stolen)
+               flags |= PIN_MAPPABLE;
 
-               ret = i915_gem_object_set_to_gtt_domain(obj, true);
-               if (ret)
-                       goto err_unpin;
+       if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+               if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
+                       ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+               else
+                       ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
+               if (unlikely(ret))
+                       return ret;
+       }
 
-               /* Access through the GTT requires the device to be awake. */
-               assert_rpm_wakelock_held(dev_priv);
+       ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
+       if (unlikely(ret))
+               return ret;
 
-               addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
-               if (IS_ERR(addr)) {
-                       ret = PTR_ERR(addr);
-                       goto err_unpin;
-               }
-       }
+       if (i915_vma_is_map_and_fenceable(vma))
+               addr = (void __force *)i915_vma_pin_iomap(vma);
+       else
+               addr = i915_gem_object_pin_map(vma->obj, map);
+       if (IS_ERR(addr))
+               goto err;
 
-       ringbuf->virtual_start = addr;
-       ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+       ring->vaddr = addr;
        return 0;
 
-err_unpin:
-       i915_gem_object_ggtt_unpin(obj);
-       return ret;
+err:
+       i915_vma_unpin(vma);
+       return PTR_ERR(addr);
 }
 
-static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_ring_unpin(struct intel_ring *ring)
 {
-       drm_gem_object_unreference(&ringbuf->obj->base);
-       ringbuf->obj = NULL;
+       GEM_BUG_ON(!ring->vma);
+       GEM_BUG_ON(!ring->vaddr);
+
+       if (i915_vma_is_map_and_fenceable(ring->vma))
+               i915_vma_unpin_iomap(ring->vma);
+       else
+               i915_gem_object_unpin_map(ring->vma->obj);
+       ring->vaddr = NULL;
+
+       i915_vma_unpin(ring->vma);
 }
 
-static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-                                     struct intel_ringbuffer *ringbuf)
+static struct i915_vma *
+intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
 {
        struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
 
-       obj = NULL;
-       if (!HAS_LLC(dev))
-               obj = i915_gem_object_create_stolen(dev, ringbuf->size);
-       if (obj == NULL)
-               obj = i915_gem_object_create(dev, ringbuf->size);
+       obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
+       if (!obj)
+               obj = i915_gem_object_create(&dev_priv->drm, size);
        if (IS_ERR(obj))
-               return PTR_ERR(obj);
+               return ERR_CAST(obj);
 
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
 
-       ringbuf->obj = obj;
+       vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+       if (IS_ERR(vma))
+               goto err;
 
-       return 0;
+       return vma;
+
+err:
+       i915_gem_object_put(obj);
+       return vma;
 }
 
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 {
-       struct intel_ringbuffer *ring;
-       int ret;
+       struct intel_ring *ring;
+       struct i915_vma *vma;
+
+       GEM_BUG_ON(!is_power_of_2(size));
 
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-       if (ring == NULL) {
-               DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
-                                engine->name);
+       if (!ring)
                return ERR_PTR(-ENOMEM);
-       }
 
        ring->engine = engine;
-       list_add(&ring->link, &engine->buffers);
+
+       INIT_LIST_HEAD(&ring->request_list);
 
        ring->size = size;
        /* Workaround an erratum on the i830 which causes a hang if
@@ -2111,23 +2010,20 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
        ring->last_retired_head = -1;
        intel_ring_update_space(ring);
 
-       ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
-                                engine->name, ret);
-               list_del(&ring->link);
+       vma = intel_ring_create_vma(engine->i915, size);
+       if (IS_ERR(vma)) {
                kfree(ring);
-               return ERR_PTR(ret);
+               return ERR_CAST(vma);
        }
+       ring->vma = vma;
 
        return ring;
 }
 
 void
-intel_ringbuffer_free(struct intel_ringbuffer *ring)
+intel_ring_free(struct intel_ring *ring)
 {
-       intel_destroy_ringbuffer_obj(ring);
-       list_del(&ring->link);
+       i915_vma_put(ring->vma);
        kfree(ring);
 }
 
@@ -2143,7 +2039,12 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
                return 0;
 
        if (ce->state) {
-               ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+               ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
+               if (ret)
+                       goto error;
+
+               ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
+                                  PIN_GLOBAL | PIN_HIGH);
                if (ret)
                        goto error;
        }
@@ -2158,7 +2059,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
        if (ctx == ctx->i915->kernel_context)
                ce->initialised = true;
 
-       i915_gem_context_reference(ctx);
+       i915_gem_context_get(ctx);
        return 0;
 
 error:
@@ -2177,30 +2078,25 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
                return;
 
        if (ce->state)
-               i915_gem_object_ggtt_unpin(ce->state);
+               i915_vma_unpin(ce->state);
 
-       i915_gem_context_unreference(ctx);
+       i915_gem_context_put(ctx);
 }
 
-static int intel_init_ring_buffer(struct drm_device *dev,
-                                 struct intel_engine_cs *engine)
+static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_ringbuffer *ringbuf;
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct intel_ring *ring;
        int ret;
 
        WARN_ON(engine->buffer);
 
-       engine->i915 = dev_priv;
-       INIT_LIST_HEAD(&engine->active_list);
-       INIT_LIST_HEAD(&engine->request_list);
-       INIT_LIST_HEAD(&engine->execlist_queue);
-       INIT_LIST_HEAD(&engine->buffers);
-       i915_gem_batch_pool_init(dev, &engine->batch_pool);
+       intel_engine_setup_common(engine);
+
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));
 
-       ret = intel_engine_init_breadcrumbs(engine);
+       ret = intel_engine_init_common(engine);
        if (ret)
                goto error;
 
@@ -2215,44 +2111,38 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        if (ret)
                goto error;
 
-       ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
-       if (IS_ERR(ringbuf)) {
-               ret = PTR_ERR(ringbuf);
+       ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
+       if (IS_ERR(ring)) {
+               ret = PTR_ERR(ring);
                goto error;
        }
-       engine->buffer = ringbuf;
 
-       if (I915_NEED_GFX_HWS(dev_priv)) {
-               ret = init_status_page(engine);
+       if (HWS_NEEDS_PHYSICAL(dev_priv)) {
+               WARN_ON(engine->id != RCS);
+               ret = init_phys_status_page(engine);
                if (ret)
                        goto error;
        } else {
-               WARN_ON(engine->id != RCS);
-               ret = init_phys_status_page(engine);
+               ret = init_status_page(engine);
                if (ret)
                        goto error;
        }
 
-       ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
+       ret = intel_ring_pin(ring);
        if (ret) {
-               DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
-                               engine->name, ret);
-               intel_destroy_ringbuffer_obj(ringbuf);
+               intel_ring_free(ring);
                goto error;
        }
-
-       ret = i915_cmd_parser_init_ring(engine);
-       if (ret)
-               goto error;
+       engine->buffer = ring;
 
        return 0;
 
 error:
-       intel_cleanup_engine(engine);
+       intel_engine_cleanup(engine);
        return ret;
 }
 
-void intel_cleanup_engine(struct intel_engine_cs *engine)
+void intel_engine_cleanup(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv;
 
@@ -2262,49 +2152,39 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
        dev_priv = engine->i915;
 
        if (engine->buffer) {
-               intel_stop_engine(engine);
-               WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+               WARN_ON(INTEL_GEN(dev_priv) > 2 &&
+                       (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-               intel_unpin_ringbuffer_obj(engine->buffer);
-               intel_ringbuffer_free(engine->buffer);
+               intel_ring_unpin(engine->buffer);
+               intel_ring_free(engine->buffer);
                engine->buffer = NULL;
        }
 
        if (engine->cleanup)
                engine->cleanup(engine);
 
-       if (I915_NEED_GFX_HWS(dev_priv)) {
-               cleanup_status_page(engine);
-       } else {
+       if (HWS_NEEDS_PHYSICAL(dev_priv)) {
                WARN_ON(engine->id != RCS);
                cleanup_phys_status_page(engine);
+       } else {
+               cleanup_status_page(engine);
        }
 
-       i915_cmd_parser_fini_ring(engine);
-       i915_gem_batch_pool_fini(&engine->batch_pool);
-       intel_engine_fini_breadcrumbs(engine);
+       intel_engine_cleanup_common(engine);
 
        intel_ring_context_unpin(dev_priv->kernel_context, engine);
 
        engine->i915 = NULL;
 }
 
-int intel_engine_idle(struct intel_engine_cs *engine)
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_gem_request *req;
-
-       /* Wait upon the last request to be completed */
-       if (list_empty(&engine->request_list))
-               return 0;
+       struct intel_engine_cs *engine;
 
-       req = list_entry(engine->request_list.prev,
-                        struct drm_i915_gem_request,
-                        list);
-
-       /* Make sure we do not trigger any retires */
-       return __i915_wait_request(req,
-                                  req->i915->mm.interruptible,
-                                  NULL, NULL);
+       for_each_engine(engine, dev_priv) {
+               engine->buffer->head = engine->buffer->tail;
+               engine->buffer->last_retired_head = -1;
+       }
 }
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -2317,7 +2197,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
         */
        request->reserved_space += LEGACY_REQUEST_SIZE;
 
-       request->ringbuf = request->engine->buffer;
+       request->ring = request->engine->buffer;
 
        ret = intel_ring_begin(request, 0);
        if (ret)
@@ -2329,12 +2209,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        struct drm_i915_gem_request *target;
+       int ret;
 
-       intel_ring_update_space(ringbuf);
-       if (ringbuf->space >= bytes)
+       intel_ring_update_space(ring);
+       if (ring->space >= bytes)
                return 0;
 
        /*
@@ -2348,35 +2228,37 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
         */
        GEM_BUG_ON(!req->reserved_space);
 
-       list_for_each_entry(target, &engine->request_list, list) {
+       list_for_each_entry(target, &ring->request_list, ring_link) {
                unsigned space;
 
-               /*
-                * The request queue is per-engine, so can contain requests
-                * from multiple ringbuffers. Here, we must ignore any that
-                * aren't from the ringbuffer we're considering.
-                */
-               if (target->ringbuf != ringbuf)
-                       continue;
-
                /* Would completion of this request free enough space? */
-               space = __intel_ring_space(target->postfix, ringbuf->tail,
-                                          ringbuf->size);
+               space = __intel_ring_space(target->postfix, ring->tail,
+                                          ring->size);
                if (space >= bytes)
                        break;
        }
 
-       if (WARN_ON(&target->list == &engine->request_list))
+       if (WARN_ON(&target->ring_link == &ring->request_list))
                return -ENOSPC;
 
-       return i915_wait_request(target);
+       ret = i915_wait_request(target,
+                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                               NULL, NO_WAITBOOST);
+       if (ret)
+               return ret;
+
+       i915_gem_request_retire_upto(target);
+
+       intel_ring_update_space(ring);
+       GEM_BUG_ON(ring->space < bytes);
+       return 0;
 }
 
 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
-       int remain_actual = ringbuf->size - ringbuf->tail;
-       int remain_usable = ringbuf->effective_size - ringbuf->tail;
+       struct intel_ring *ring = req->ring;
+       int remain_actual = ring->size - ring->tail;
+       int remain_usable = ring->effective_size - ring->tail;
        int bytes = num_dwords * sizeof(u32);
        int total_bytes, wait_bytes;
        bool need_wrap = false;
@@ -2403,37 +2285,33 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
                wait_bytes = total_bytes;
        }
 
-       if (wait_bytes > ringbuf->space) {
+       if (wait_bytes > ring->space) {
                int ret = wait_for_space(req, wait_bytes);
                if (unlikely(ret))
                        return ret;
-
-               intel_ring_update_space(ringbuf);
-               if (unlikely(ringbuf->space < wait_bytes))
-                       return -EAGAIN;
        }
 
        if (unlikely(need_wrap)) {
-               GEM_BUG_ON(remain_actual > ringbuf->space);
-               GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
+               GEM_BUG_ON(remain_actual > ring->space);
+               GEM_BUG_ON(ring->tail + remain_actual > ring->size);
 
                /* Fill the tail with MI_NOOP */
-               memset(ringbuf->virtual_start + ringbuf->tail,
-                      0, remain_actual);
-               ringbuf->tail = 0;
-               ringbuf->space -= remain_actual;
+               memset(ring->vaddr + ring->tail, 0, remain_actual);
+               ring->tail = 0;
+               ring->space -= remain_actual;
        }
 
-       ringbuf->space -= bytes;
-       GEM_BUG_ON(ringbuf->space < 0);
+       ring->space -= bytes;
+       GEM_BUG_ON(ring->space < 0);
        return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->engine;
-       int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+       struct intel_ring *ring = req->ring;
+       int num_dwords =
+               (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
        int ret;
 
        if (num_dwords == 0)
@@ -2445,61 +2323,16 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
                return ret;
 
        while (num_dwords--)
-               intel_ring_emit(engine, MI_NOOP);
+               intel_ring_emit(ring, MI_NOOP);
 
-       intel_ring_advance(engine);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-
-       /* Our semaphore implementation is strictly monotonic (i.e. we proceed
-        * so long as the semaphore value in the register/page is greater
-        * than the sync value), so whenever we reset the seqno,
-        * so long as we reset the tracking semaphore value to 0, it will
-        * always be before the next request's seqno. If we don't reset
-        * the semaphore value, then when the seqno moves backwards all
-        * future waits will complete instantly (causing rendering corruption).
-        */
-       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
-               I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
-               I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
-               if (HAS_VEBOX(dev_priv))
-                       I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
-       }
-       if (dev_priv->semaphore_obj) {
-               struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
-               struct page *page = i915_gem_object_get_dirty_page(obj, 0);
-               void *semaphores = kmap(page);
-               memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
-                      0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
-               kunmap(page);
-       }
-       memset(engine->semaphore.sync_seqno, 0,
-              sizeof(engine->semaphore.sync_seqno));
-
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-       if (engine->irq_seqno_barrier)
-               engine->irq_seqno_barrier(engine);
-       engine->last_submitted_seqno = seqno;
-
-       engine->hangcheck.seqno = seqno;
-
-       /* After manually advancing the seqno, fake the interrupt in case
-        * there are any waiters for that seqno.
-        */
-       rcu_read_lock();
-       intel_engine_wakeup(engine);
-       rcu_read_unlock();
-}
-
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
-                                    u32 value)
+static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
+       struct drm_i915_private *dev_priv = request->i915;
 
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
@@ -2523,8 +2356,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
                DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
        /* Now that the ring is fully powered up, update the tail */
-       I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
-       POSTING_READ_FW(RING_TAIL(engine->mmio_base));
+       i9xx_submit_request(request);
 
        /* Let the ring send IDLE messages to the GT again,
         * and so let it sleep to conserve power when idle.
@@ -2535,10 +2367,9 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
-                              u32 invalidate, u32 flush)
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        uint32_t cmd;
        int ret;
 
@@ -2563,30 +2394,29 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
         * operation is complete. This bit is only valid when the
         * Post-Sync Operation field is a value of 1h or 3h."
         */
-       if (invalidate & I915_GEM_GPU_DOMAINS)
+       if (mode & EMIT_INVALIDATE)
                cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-       intel_ring_emit(engine, cmd);
-       intel_ring_emit(engine,
-                       I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+       intel_ring_emit(ring, cmd);
+       intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
        if (INTEL_GEN(req->i915) >= 8) {
-               intel_ring_emit(engine, 0); /* upper addr */
-               intel_ring_emit(engine, 0); /* value */
+               intel_ring_emit(ring, 0); /* upper addr */
+               intel_ring_emit(ring, 0); /* value */
        } else  {
-               intel_ring_emit(engine, 0);
-               intel_ring_emit(engine, MI_NOOP);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, MI_NOOP);
        }
-       intel_ring_advance(engine);
+       intel_ring_advance(ring);
        return 0;
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-                             u64 offset, u32 len,
-                             unsigned dispatch_flags)
+gen8_emit_bb_start(struct drm_i915_gem_request *req,
+                  u64 offset, u32 len,
+                  unsigned int dispatch_flags)
 {
-       struct intel_engine_cs *engine = req->engine;
-       bool ppgtt = USES_PPGTT(engine->dev) &&
+       struct intel_ring *ring = req->ring;
+       bool ppgtt = USES_PPGTT(req->i915) &&
                        !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
 
@@ -2595,71 +2425,70 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
                return ret;
 
        /* FIXME(BDW): Address space and security selectors. */
-       intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
                        (dispatch_flags & I915_DISPATCH_RS ?
                         MI_BATCH_RESOURCE_STREAMER : 0));
-       intel_ring_emit(engine, lower_32_bits(offset));
-       intel_ring_emit(engine, upper_32_bits(offset));
-       intel_ring_emit(engine, MI_NOOP);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, lower_32_bits(offset));
+       intel_ring_emit(ring, upper_32_bits(offset));
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-                            u64 offset, u32 len,
-                            unsigned dispatch_flags)
+hsw_emit_bb_start(struct drm_i915_gem_request *req,
+                 u64 offset, u32 len,
+                 unsigned int dispatch_flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine,
+       intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        (dispatch_flags & I915_DISPATCH_SECURE ?
                         0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
                        (dispatch_flags & I915_DISPATCH_RS ?
                         MI_BATCH_RESOURCE_STREAMER : 0));
        /* bit0-7 is the length on GEN6+ */
-       intel_ring_emit(engine, offset);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-                             u64 offset, u32 len,
-                             unsigned dispatch_flags)
+gen6_emit_bb_start(struct drm_i915_gem_request *req,
+                  u64 offset, u32 len,
+                  unsigned int dispatch_flags)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(engine,
+       intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        (dispatch_flags & I915_DISPATCH_SECURE ?
                         0 : MI_BATCH_NON_SECURE_I965));
        /* bit0-7 is the length on GEN6+ */
-       intel_ring_emit(engine, offset);
-       intel_ring_advance(engine);
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct drm_i915_gem_request *req,
-                          u32 invalidate, u32 flush)
+static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_ring *ring = req->ring;
        uint32_t cmd;
        int ret;
 
@@ -2684,19 +2513,19 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
         * operation is complete. This bit is only valid when the
         * Post-Sync Operation field is a value of 1h or 3h."
         */
-       if (invalidate & I915_GEM_DOMAIN_RENDER)
+       if (mode & EMIT_INVALIDATE)
                cmd |= MI_INVALIDATE_TLB;
-       intel_ring_emit(engine, cmd);
-       intel_ring_emit(engine,
+       intel_ring_emit(ring, cmd);
+       intel_ring_emit(ring,
                        I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
        if (INTEL_GEN(req->i915) >= 8) {
-               intel_ring_emit(engine, 0); /* upper addr */
-               intel_ring_emit(engine, 0); /* value */
+               intel_ring_emit(ring, 0); /* upper addr */
+               intel_ring_emit(ring, 0); /* value */
        } else  {
-               intel_ring_emit(engine, 0);
-               intel_ring_emit(engine, MI_NOOP);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, MI_NOOP);
        }
-       intel_ring_advance(engine);
+       intel_ring_advance(ring);
 
        return 0;
 }
@@ -2707,38 +2536,39 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
        struct drm_i915_gem_object *obj;
        int ret, i;
 
-       if (!i915_semaphore_is_enabled(dev_priv))
+       if (!i915.semaphores)
                return;
 
-       if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+       if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
+               struct i915_vma *vma;
+
                obj = i915_gem_object_create(&dev_priv->drm, 4096);
-               if (IS_ERR(obj)) {
-                       DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
-                       i915.semaphores = 0;
-               } else {
-                       i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-                       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
-                       if (ret != 0) {
-                               drm_gem_object_unreference(&obj->base);
-                               DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-                               i915.semaphores = 0;
-                       } else {
-                               dev_priv->semaphore_obj = obj;
-                       }
-               }
-       }
+               if (IS_ERR(obj))
+                       goto err;
 
-       if (!i915_semaphore_is_enabled(dev_priv))
-               return;
+               vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+               if (IS_ERR(vma))
+                       goto err_obj;
+
+               ret = i915_gem_object_set_to_gtt_domain(obj, false);
+               if (ret)
+                       goto err_obj;
+
+               ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+               if (ret)
+                       goto err_obj;
+
+               dev_priv->semaphore = vma;
+       }
 
        if (INTEL_GEN(dev_priv) >= 8) {
-               u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+               u32 offset = i915_ggtt_offset(dev_priv->semaphore);
 
-               engine->semaphore.sync_to = gen8_ring_sync;
+               engine->semaphore.sync_to = gen8_ring_sync_to;
                engine->semaphore.signal = gen8_xcs_signal;
 
                for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       u64 ring_offset;
+                       u32 ring_offset;
 
                        if (i != engine->id)
                                ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
@@ -2748,7 +2578,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
                        engine->semaphore.signal_ggtt[i] = ring_offset;
                }
        } else if (INTEL_GEN(dev_priv) >= 6) {
-               engine->semaphore.sync_to = gen6_ring_sync;
+               engine->semaphore.sync_to = gen6_ring_sync_to;
                engine->semaphore.signal = gen6_signal;
 
                /*
@@ -2758,52 +2588,62 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
                 * initialized as INVALID.  Gen8 will initialize the
                 * sema between VCS2 and RCS later.
                 */
-               for (i = 0; i < I915_NUM_ENGINES; i++) {
+               for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
                        static const struct {
                                u32 wait_mbox;
                                i915_reg_t mbox_reg;
-                       } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
-                               [RCS] = {
-                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
-                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
-                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+                       } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
+                               [RCS_HW] = {
+                                       [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
+                                       [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
+                                       [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
                                },
-                               [VCS] = {
-                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
-                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
-                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+                               [VCS_HW] = {
+                                       [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
+                                       [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
+                                       [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
                                },
-                               [BCS] = {
-                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
-                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
-                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+                               [BCS_HW] = {
+                                       [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
+                                       [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
+                                       [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
                                },
-                               [VECS] = {
-                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
-                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
-                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+                               [VECS_HW] = {
+                                       [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+                                       [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+                                       [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
                                },
                        };
                        u32 wait_mbox;
                        i915_reg_t mbox_reg;
 
-                       if (i == engine->id || i == VCS2) {
+                       if (i == engine->hw_id) {
                                wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
                                mbox_reg = GEN6_NOSYNC;
                        } else {
-                               wait_mbox = sem_data[engine->id][i].wait_mbox;
-                               mbox_reg = sem_data[engine->id][i].mbox_reg;
+                               wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
+                               mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
                        }
 
                        engine->semaphore.mbox.wait[i] = wait_mbox;
                        engine->semaphore.mbox.signal[i] = mbox_reg;
                }
        }
+
+       return;
+
+err_obj:
+       i915_gem_object_put(obj);
+err:
+       DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
+       i915.semaphores = 0;
 }
 
 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
                                struct intel_engine_cs *engine)
 {
+       engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
+
        if (INTEL_GEN(dev_priv) >= 8) {
                engine->irq_enable = gen8_irq_enable;
                engine->irq_disable = gen8_irq_disable;
@@ -2828,83 +2668,76 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
                                      struct intel_engine_cs *engine)
 {
+       intel_ring_init_irq(dev_priv, engine);
+       intel_ring_init_semaphores(dev_priv, engine);
+
        engine->init_hw = init_ring_common;
-       engine->write_tail = ring_write_tail;
+       engine->reset_hw = reset_ring_common;
 
-       engine->add_request = i9xx_add_request;
-       if (INTEL_GEN(dev_priv) >= 6)
-               engine->add_request = gen6_add_request;
+       engine->emit_request = i9xx_emit_request;
+       if (i915.semaphores)
+               engine->emit_request = gen6_sema_emit_request;
+       engine->submit_request = i9xx_submit_request;
 
        if (INTEL_GEN(dev_priv) >= 8)
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+               engine->emit_bb_start = gen8_emit_bb_start;
        else if (INTEL_GEN(dev_priv) >= 6)
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+               engine->emit_bb_start = gen6_emit_bb_start;
        else if (INTEL_GEN(dev_priv) >= 4)
-               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+               engine->emit_bb_start = i965_emit_bb_start;
        else if (IS_I830(dev_priv) || IS_845G(dev_priv))
-               engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+               engine->emit_bb_start = i830_emit_bb_start;
        else
-               engine->dispatch_execbuffer = i915_dispatch_execbuffer;
-
-       intel_ring_init_irq(dev_priv, engine);
-       intel_ring_init_semaphores(dev_priv, engine);
+               engine->emit_bb_start = i915_emit_bb_start;
 }
 
-int intel_init_render_ring_buffer(struct drm_device *dev)
+int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
-       engine->name = "render ring";
-       engine->id = RCS;
-       engine->exec_id = I915_EXEC_RENDER;
-       engine->hw_id = 0;
-       engine->mmio_base = RENDER_RING_BASE;
-
        intel_ring_default_vfuncs(dev_priv, engine);
 
-       engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
        if (HAS_L3_DPF(dev_priv))
                engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
        if (INTEL_GEN(dev_priv) >= 8) {
                engine->init_context = intel_rcs_ctx_init;
-               engine->add_request = gen8_render_add_request;
-               engine->flush = gen8_render_ring_flush;
-               if (i915_semaphore_is_enabled(dev_priv))
+               engine->emit_request = gen8_render_emit_request;
+               engine->emit_flush = gen8_render_ring_flush;
+               if (i915.semaphores)
                        engine->semaphore.signal = gen8_rcs_signal;
        } else if (INTEL_GEN(dev_priv) >= 6) {
                engine->init_context = intel_rcs_ctx_init;
-               engine->flush = gen7_render_ring_flush;
+               engine->emit_flush = gen7_render_ring_flush;
                if (IS_GEN6(dev_priv))
-                       engine->flush = gen6_render_ring_flush;
+                       engine->emit_flush = gen6_render_ring_flush;
        } else if (IS_GEN5(dev_priv)) {
-               engine->flush = gen4_render_ring_flush;
+               engine->emit_flush = gen4_render_ring_flush;
        } else {
                if (INTEL_GEN(dev_priv) < 4)
-                       engine->flush = gen2_render_ring_flush;
+                       engine->emit_flush = gen2_render_ring_flush;
                else
-                       engine->flush = gen4_render_ring_flush;
+                       engine->emit_flush = gen4_render_ring_flush;
                engine->irq_enable_mask = I915_USER_INTERRUPT;
        }
 
        if (IS_HASWELL(dev_priv))
-               engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+               engine->emit_bb_start = hsw_emit_bb_start;
 
        engine->init_hw = init_render_ring;
        engine->cleanup = render_ring_cleanup;
 
-       ret = intel_init_ring_buffer(dev, engine);
+       ret = intel_init_ring_buffer(engine);
        if (ret)
                return ret;
 
        if (INTEL_GEN(dev_priv) >= 6) {
-               ret = intel_init_pipe_control(engine, 4096);
+               ret = intel_engine_create_scratch(engine, 4096);
                if (ret)
                        return ret;
        } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
-               ret = intel_init_pipe_control(engine, I830_WA_SIZE);
+               ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
                if (ret)
                        return ret;
        }
@@ -2912,166 +2745,71 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        return 0;
 }
 
-int intel_init_bsd_ring_buffer(struct drm_device *dev)
+int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_engine_cs *engine = &dev_priv->engine[VCS];
-
-       engine->name = "bsd ring";
-       engine->id = VCS;
-       engine->exec_id = I915_EXEC_BSD;
-       engine->hw_id = 1;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        intel_ring_default_vfuncs(dev_priv, engine);
 
        if (INTEL_GEN(dev_priv) >= 6) {
-               engine->mmio_base = GEN6_BSD_RING_BASE;
                /* gen6 bsd needs a special wa for tail updates */
                if (IS_GEN6(dev_priv))
-                       engine->write_tail = gen6_bsd_ring_write_tail;
-               engine->flush = gen6_bsd_ring_flush;
-               if (INTEL_GEN(dev_priv) >= 8)
-                       engine->irq_enable_mask =
-                               GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-               else
+                       engine->submit_request = gen6_bsd_submit_request;
+               engine->emit_flush = gen6_bsd_ring_flush;
+               if (INTEL_GEN(dev_priv) < 8)
                        engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
        } else {
                engine->mmio_base = BSD_RING_BASE;
-               engine->flush = bsd_ring_flush;
+               engine->emit_flush = bsd_ring_flush;
                if (IS_GEN5(dev_priv))
                        engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
                else
                        engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
        }
 
-       return intel_init_ring_buffer(dev, engine);
+       return intel_init_ring_buffer(engine);
 }
 
 /**
  * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
  */
-int intel_init_bsd2_ring_buffer(struct drm_device *dev)
+int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
-
-       engine->name = "bsd2 ring";
-       engine->id = VCS2;
-       engine->exec_id = I915_EXEC_BSD;
-       engine->hw_id = 4;
-       engine->mmio_base = GEN8_BSD2_RING_BASE;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        intel_ring_default_vfuncs(dev_priv, engine);
 
-       engine->flush = gen6_bsd_ring_flush;
-       engine->irq_enable_mask =
-                       GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+       engine->emit_flush = gen6_bsd_ring_flush;
 
-       return intel_init_ring_buffer(dev, engine);
+       return intel_init_ring_buffer(engine);
 }
 
-int intel_init_blt_ring_buffer(struct drm_device *dev)
+int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_engine_cs *engine = &dev_priv->engine[BCS];
-
-       engine->name = "blitter ring";
-       engine->id = BCS;
-       engine->exec_id = I915_EXEC_BLT;
-       engine->hw_id = 2;
-       engine->mmio_base = BLT_RING_BASE;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        intel_ring_default_vfuncs(dev_priv, engine);
 
-       engine->flush = gen6_ring_flush;
-       if (INTEL_GEN(dev_priv) >= 8)
-               engine->irq_enable_mask =
-                       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-       else
+       engine->emit_flush = gen6_ring_flush;
+       if (INTEL_GEN(dev_priv) < 8)
                engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
-       return intel_init_ring_buffer(dev, engine);
+       return intel_init_ring_buffer(engine);
 }
 
-int intel_init_vebox_ring_buffer(struct drm_device *dev)
+int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_engine_cs *engine = &dev_priv->engine[VECS];
-
-       engine->name = "video enhancement ring";
-       engine->id = VECS;
-       engine->exec_id = I915_EXEC_VEBOX;
-       engine->hw_id = 3;
-       engine->mmio_base = VEBOX_RING_BASE;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        intel_ring_default_vfuncs(dev_priv, engine);
 
-       engine->flush = gen6_ring_flush;
+       engine->emit_flush = gen6_ring_flush;
 
-       if (INTEL_GEN(dev_priv) >= 8) {
-               engine->irq_enable_mask =
-                       GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-       } else {
+       if (INTEL_GEN(dev_priv) < 8) {
                engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
                engine->irq_enable = hsw_vebox_irq_enable;
                engine->irq_disable = hsw_vebox_irq_disable;
        }
 
-       return intel_init_ring_buffer(dev, engine);
-}
-
-int
-intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       int ret;
-
-       if (!engine->gpu_caches_dirty)
-               return 0;
-
-       ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
-       if (ret)
-               return ret;
-
-       trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
-
-       engine->gpu_caches_dirty = false;
-       return 0;
-}
-
-int
-intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       uint32_t flush_domains;
-       int ret;
-
-       flush_domains = 0;
-       if (engine->gpu_caches_dirty)
-               flush_domains = I915_GEM_GPU_DOMAINS;
-
-       ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-       if (ret)
-               return ret;
-
-       trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-
-       engine->gpu_caches_dirty = false;
-       return 0;
-}
-
-void
-intel_stop_engine(struct intel_engine_cs *engine)
-{
-       int ret;
-
-       if (!intel_engine_initialized(engine))
-               return;
-
-       ret = intel_engine_idle(engine);
-       if (ret)
-               DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-                         engine->name, ret);
-
-       stop_ring(engine);
+       return intel_init_ring_buffer(engine);
 }
index 12cb7ed90014bd7a1b974a026b73487d0d3e2d1a..7f64d611159b6e557060d9c44f215296871a0a68 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/hashtable.h>
 #include "i915_gem_batch_pool.h"
+#include "i915_gem_request.h"
 
 #define I915_CMD_HASH_ORDER 9
 
  */
 #define I915_RING_FREE_SPACE 64
 
-struct  intel_hw_status_page {
-       u32             *page_addr;
-       unsigned int    gfx_addr;
-       struct          drm_i915_gem_object *obj;
+struct intel_hw_status_page {
+       struct i915_vma *vma;
+       u32 *page_addr;
+       u32 ggtt_offset;
 };
 
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
+#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
 
-#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
+#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
 
-#define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
+#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
 
-#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
+#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
 
-#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
+#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
 
-#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
-#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
+#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
 
 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
@@ -56,13 +57,13 @@ struct  intel_hw_status_page {
 #define GEN8_SEMAPHORE_OFFSET(__from, __to)                         \
        (((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
 #define GEN8_SIGNAL_OFFSET(__ring, to)                      \
-       (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+       (dev_priv->semaphore->node.start + \
         GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
 #define GEN8_WAIT_OFFSET(__ring, from)                      \
-       (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+       (dev_priv->semaphore->node.start + \
         GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 
-enum intel_ring_hangcheck_action {
+enum intel_engine_hangcheck_action {
        HANGCHECK_IDLE = 0,
        HANGCHECK_WAIT,
        HANGCHECK_ACTIVE,
@@ -72,23 +73,22 @@ enum intel_ring_hangcheck_action {
 
 #define HANGCHECK_SCORE_RING_HUNG 31
 
-struct intel_ring_hangcheck {
+struct intel_engine_hangcheck {
        u64 acthd;
-       unsigned long user_interrupts;
        u32 seqno;
        int score;
-       enum intel_ring_hangcheck_action action;
+       enum intel_engine_hangcheck_action action;
        int deadlock;
        u32 instdone[I915_NUM_INSTDONE_REG];
 };
 
-struct intel_ringbuffer {
-       struct drm_i915_gem_object *obj;
-       void __iomem *virtual_start;
+struct intel_ring {
        struct i915_vma *vma;
+       void *vaddr;
 
        struct intel_engine_cs *engine;
-       struct list_head link;
+
+       struct list_head request_list;
 
        u32 head;
        u32 tail;
@@ -121,12 +121,12 @@ struct drm_i915_reg_table;
  *    an option for future use.
  *  size: size of the batch in DWORDS
  */
-struct  i915_ctx_workarounds {
+struct i915_ctx_workarounds {
        struct i915_wa_ctx_bb {
                u32 offset;
                u32 size;
        } indirect_ctx, per_ctx;
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
 };
 
 struct drm_i915_gem_request;
@@ -144,11 +144,18 @@ struct intel_engine_cs {
 #define I915_NUM_ENGINES 5
 #define _VCS(n) (VCS + (n))
        unsigned int exec_id;
-       unsigned int hw_id;
-       unsigned int guc_id; /* XXX same as hw_id? */
+       enum intel_engine_hw_id {
+               RCS_HW = 0,
+               VCS_HW,
+               BCS_HW,
+               VECS_HW,
+               VCS2_HW
+       } hw_id;
+       enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
+       u64 fence_context;
        u32             mmio_base;
-       struct intel_ringbuffer *buffer;
-       struct list_head buffers;
+       unsigned int irq_shift;
+       struct intel_ring *buffer;
 
        /* Rather than have every client wait upon all user interrupts,
         * with the herd waking after every interrupt and each doing the
@@ -167,8 +174,7 @@ struct intel_engine_cs {
         * the overhead of waking that client is much preferred.
         */
        struct intel_breadcrumbs {
-               struct task_struct *irq_seqno_bh; /* bh for user interrupts */
-               unsigned long irq_wakeups;
+               struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
                bool irq_posted;
 
                spinlock_t lock; /* protects the lists of requests */
@@ -178,6 +184,9 @@ struct intel_engine_cs {
                struct task_struct *signaler; /* used for fence signalling */
                struct drm_i915_gem_request *first_signal;
                struct timer_list fake_irq; /* used after a missed interrupt */
+               struct timer_list hangcheck; /* detect missed interrupts */
+
+               unsigned long timeout;
 
                bool irq_enabled : 1;
                bool rpm_wakelock : 1;
@@ -192,36 +201,48 @@ struct intel_engine_cs {
 
        struct intel_hw_status_page status_page;
        struct i915_ctx_workarounds wa_ctx;
+       struct i915_vma *scratch;
 
        u32             irq_keep_mask; /* always keep these interrupts */
        u32             irq_enable_mask; /* bitmask to enable ring interrupt */
-       void            (*irq_enable)(struct intel_engine_cs *ring);
-       void            (*irq_disable)(struct intel_engine_cs *ring);
+       void            (*irq_enable)(struct intel_engine_cs *engine);
+       void            (*irq_disable)(struct intel_engine_cs *engine);
 
-       int             (*init_hw)(struct intel_engine_cs *ring);
+       int             (*init_hw)(struct intel_engine_cs *engine);
+       void            (*reset_hw)(struct intel_engine_cs *engine,
+                                   struct drm_i915_gem_request *req);
 
        int             (*init_context)(struct drm_i915_gem_request *req);
 
-       void            (*write_tail)(struct intel_engine_cs *ring,
-                                     u32 value);
-       int __must_check (*flush)(struct drm_i915_gem_request *req,
-                                 u32   invalidate_domains,
-                                 u32   flush_domains);
-       int             (*add_request)(struct drm_i915_gem_request *req);
+       int             (*emit_flush)(struct drm_i915_gem_request *request,
+                                     u32 mode);
+#define EMIT_INVALIDATE        BIT(0)
+#define EMIT_FLUSH     BIT(1)
+#define EMIT_BARRIER   (EMIT_INVALIDATE | EMIT_FLUSH)
+       int             (*emit_bb_start)(struct drm_i915_gem_request *req,
+                                        u64 offset, u32 length,
+                                        unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+#define I915_DISPATCH_RS     BIT(2)
+       int             (*emit_request)(struct drm_i915_gem_request *req);
+
+       /* Pass the request to the hardware queue (e.g. directly into
+        * the legacy ringbuffer or to the end of an execlist).
+        *
+        * This is called from an atomic context with irqs disabled; must
+        * be irq safe.
+        */
+       void            (*submit_request)(struct drm_i915_gem_request *req);
+
        /* Some chipsets are not quite as coherent as advertised and need
         * an expensive kick to force a true read of the up-to-date seqno.
         * However, the up-to-date seqno is not always required and the last
         * seen value is good enough. Note that the seqno will always be
         * monotonic, even if not coherent.
         */
-       void            (*irq_seqno_barrier)(struct intel_engine_cs *ring);
-       int             (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
-                                              u64 offset, u32 length,
-                                              unsigned dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
-       void            (*cleanup)(struct intel_engine_cs *ring);
+       void            (*irq_seqno_barrier)(struct intel_engine_cs *engine);
+       void            (*cleanup)(struct intel_engine_cs *engine);
 
        /* GEN8 signal/wait table - never trust comments!
         *        signal to     signal to    signal to   signal to      signal to
@@ -264,51 +285,36 @@ struct intel_engine_cs {
                u32     sync_seqno[I915_NUM_ENGINES-1];
 
                union {
+#define GEN6_SEMAPHORE_LAST    VECS_HW
+#define GEN6_NUM_SEMAPHORES    (GEN6_SEMAPHORE_LAST + 1)
+#define GEN6_SEMAPHORES_MASK   GENMASK(GEN6_SEMAPHORE_LAST, 0)
                        struct {
                                /* our mbox written by others */
-                               u32             wait[I915_NUM_ENGINES];
+                               u32             wait[GEN6_NUM_SEMAPHORES];
                                /* mboxes this ring signals to */
-                               i915_reg_t      signal[I915_NUM_ENGINES];
+                               i915_reg_t      signal[GEN6_NUM_SEMAPHORES];
                        } mbox;
                        u64             signal_ggtt[I915_NUM_ENGINES];
                };
 
                /* AKA wait() */
-               int     (*sync_to)(struct drm_i915_gem_request *to_req,
-                                  struct intel_engine_cs *from,
-                                  u32 seqno);
-               int     (*signal)(struct drm_i915_gem_request *signaller_req,
-                                 /* num_dwords needed by caller */
-                                 unsigned int num_dwords);
+               int     (*sync_to)(struct drm_i915_gem_request *req,
+                                  struct drm_i915_gem_request *signal);
+               int     (*signal)(struct drm_i915_gem_request *req);
        } semaphore;
 
        /* Execlists */
        struct tasklet_struct irq_tasklet;
        spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
+       struct execlist_port {
+               struct drm_i915_gem_request *request;
+               unsigned int count;
+       } execlist_port[2];
        struct list_head execlist_queue;
        unsigned int fw_domains;
-       unsigned int next_context_status_buffer;
-       unsigned int idle_lite_restore_wa;
        bool disable_lite_restore_wa;
+       bool preempt_wa;
        u32 ctx_desc_template;
-       int             (*emit_request)(struct drm_i915_gem_request *request);
-       int             (*emit_flush)(struct drm_i915_gem_request *request,
-                                     u32 invalidate_domains,
-                                     u32 flush_domains);
-       int             (*emit_bb_start)(struct drm_i915_gem_request *req,
-                                        u64 offset, unsigned dispatch_flags);
-
-       /**
-        * List of objects currently involved in rendering from the
-        * ringbuffer.
-        *
-        * Includes buffers having the contents of their GPU caches
-        * flushed, not necessarily primitives.  last_read_req
-        * represents when the rendering involved will be completed.
-        *
-        * A reference is held on the buffer while on this list.
-        */
-       struct list_head active_list;
 
        /**
         * List of breadcrumbs associated with GPU requests currently
@@ -323,22 +329,22 @@ struct intel_engine_cs {
         */
        u32 last_submitted_seqno;
 
-       bool gpu_caches_dirty;
+       /* An RCU guarded pointer to the last request. No reference is
+        * held to the request, users must carefully acquire a reference to
+        * the request using i915_gem_active_get_rcu(), or hold the
+        * struct_mutex.
+        */
+       struct i915_gem_active last_request;
 
        struct i915_gem_context *last_context;
 
-       struct intel_ring_hangcheck hangcheck;
-
-       struct {
-               struct drm_i915_gem_object *obj;
-               u32 gtt_offset;
-       } scratch;
+       struct intel_engine_hangcheck hangcheck;
 
        bool needs_cmd_parser;
 
        /*
         * Table of commands the command parser needs to know about
-        * for this ring.
+        * for this engine.
         */
        DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
 
@@ -352,11 +358,11 @@ struct intel_engine_cs {
         * Returns the bitmask for the length field of the specified command.
         * Return 0 for an unrecognized/invalid command.
         *
-        * If the command parser finds an entry for a command in the ring's
+        * If the command parser finds an entry for a command in the engine's
         * cmd_tables, it gets the command's length based on the table entry.
-        * If not, it calls this function to determine the per-ring length field
-        * encoding for the command (i.e. certain opcode ranges use certain bits
-        * to encode the command length in the header).
+        * If not, it calls this function to determine the per-engine length
+        * field encoding for the command (i.e. different opcode ranges use
+        * certain bits to encode the command length in the header).
         */
        u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
@@ -374,8 +380,8 @@ intel_engine_flag(const struct intel_engine_cs *engine)
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_engine_cs *engine,
-                     struct intel_engine_cs *other)
+intel_engine_sync_index(struct intel_engine_cs *engine,
+                       struct intel_engine_cs *other)
 {
        int idx;
 
@@ -437,55 +443,77 @@ intel_write_status_page(struct intel_engine_cs *engine,
 #define I915_GEM_HWS_SCRATCH_INDEX     0x40
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
-                                    struct intel_ringbuffer *ringbuf);
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-void intel_ringbuffer_free(struct intel_ringbuffer *ring);
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
+void intel_ring_free(struct intel_ring *ring);
+
+void intel_engine_stop(struct intel_engine_cs *engine);
+void intel_engine_cleanup(struct intel_engine_cs *engine);
 
-void intel_stop_engine(struct intel_engine_cs *engine);
-void intel_cleanup_engine(struct intel_engine_cs *engine);
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *engine,
-                                  u32 data)
+
+static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
 {
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
-       ringbuf->tail += 4;
+       *(uint32_t *)(ring->vaddr + ring->tail) = data;
+       ring->tail += 4;
 }
-static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
-                                      i915_reg_t reg)
+
+static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
 {
-       intel_ring_emit(engine, i915_mmio_reg_offset(reg));
+       intel_ring_emit(ring, i915_mmio_reg_offset(reg));
 }
-static inline void intel_ring_advance(struct intel_engine_cs *engine)
+
+static inline void intel_ring_advance(struct intel_ring *ring)
 {
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       ringbuf->tail &= ringbuf->size - 1;
+       /* Dummy function.
+        *
+        * This serves as a placeholder in the code so that the reader
+        * can compare against the preceding intel_ring_begin() and
+        * check that the number of dwords emitted matches the space
+        * reserved for the command packet (i.e. the value passed to
+        * intel_ring_begin()).
+        */
+}
+
+static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+{
+       /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
+       return value & (ring->size - 1);
 }
+
 int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
+void intel_ring_update_space(struct intel_ring *ring);
 
-int __must_check intel_engine_idle(struct intel_engine_cs *engine);
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_reset_irq(struct intel_engine_cs *engine);
 
-int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
-void intel_fini_pipe_control(struct intel_engine_cs *engine);
+void intel_engine_setup_common(struct intel_engine_cs *engine);
+int intel_engine_init_common(struct intel_engine_cs *engine);
+int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
+void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
-int intel_init_render_ring_buffer(struct drm_device *dev);
-int intel_init_bsd_ring_buffer(struct drm_device *dev);
-int intel_init_bsd2_ring_buffer(struct drm_device *dev);
-int intel_init_blt_ring_buffer(struct drm_device *dev);
-int intel_init_vebox_ring_buffer(struct drm_device *dev);
+static inline int intel_engine_idle(struct intel_engine_cs *engine,
+                                   unsigned int flags)
+{
+       /* Wait upon the last request to be completed */
+       return i915_gem_active_wait_unlocked(&engine->last_request,
+                                            flags, NULL, NULL);
+}
+
+int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {
        return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
@@ -493,11 +521,6 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
-{
-       return ringbuf->tail;
-}
-
 /*
  * Arbitrary size for largest possible 'add request' sequence. The code paths
  * are complex and variable. Empirical measurement shows that the worst case
@@ -509,21 +532,10 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
 
 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
 {
-       return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+       return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
 }
 
 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
-struct intel_wait {
-       struct rb_node node;
-       struct task_struct *tsk;
-       u32 seqno;
-};
-
-struct intel_signal_node {
-       struct rb_node node;
-       struct intel_wait wait;
-};
-
 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
 
 static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
@@ -543,31 +555,42 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
                              struct intel_wait *wait);
 void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
 
-static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
 {
-       return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+       return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
 }
 
-static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
 {
        bool wakeup = false;
-       struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+
        /* Note that for this not to dangerously chase a dangling pointer,
-        * the caller is responsible for ensure that the task remain valid for
-        * wake_up_process() i.e. that the RCU grace period cannot expire.
+        * we must hold the rcu_read_lock here.
         *
         * Also note that tsk is likely to be in !TASK_RUNNING state so an
         * early test for tsk->state != TASK_RUNNING before wake_up_process()
         * is unlikely to be beneficial.
         */
-       if (tsk)
-               wakeup = wake_up_process(tsk);
+       if (intel_engine_has_waiter(engine)) {
+               struct task_struct *tsk;
+
+               rcu_read_lock();
+               tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
+               if (tsk)
+                       wakeup = wake_up_process(tsk);
+               rcu_read_unlock();
+       }
+
        return wakeup;
 }
 
-void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 unsigned int intel_kick_waiters(struct drm_i915_private *i915);
 unsigned int intel_kick_signalers(struct drm_i915_private *i915);
 
+static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
+{
+       return i915_gem_active_isset(&engine->last_request);
+}
+
 #endif /* _INTEL_RINGBUFFER_H_ */
index 1c603bbe5784fa7e21049e267eab52af25a60360..6c11168facd63c7fd18eeb64e49abfa283e9c87a 100644 (file)
@@ -287,6 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  */
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_device *dev = &dev_priv->drm;
 
        /*
@@ -299,9 +300,9 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
         * sure vgacon can keep working normally without triggering interrupts
         * and error messages.
         */
-       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+       vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
        outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+       vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
        if (IS_BROADWELL(dev))
                gen8_irq_power_well_post_enable(dev_priv,
@@ -318,7 +319,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
                                       struct i915_power_well *power_well)
 {
-       struct drm_device *dev = &dev_priv->drm;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        /*
         * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -331,9 +332,9 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
         * and error messages.
         */
        if (power_well->data == SKL_DISP_PW_2) {
-               vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+               vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
                outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-               vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+               vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
                gen8_irq_power_well_post_enable(dev_priv,
                                                1 << PIPE_C | 1 << PIPE_B);
@@ -592,6 +593,8 @@ void bxt_disable_dc9(struct drm_i915_private *dev_priv)
        DRM_DEBUG_KMS("Disabling DC9\n");
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       intel_pps_unlock_regs_wa(dev_priv);
 }
 
 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
@@ -854,7 +857,7 @@ static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
        enum skl_disp_power_wells power_well_id = power_well->data;
-       struct i915_power_well *cmn_a_well;
+       struct i915_power_well *cmn_a_well = NULL;
 
        if (power_well_id == BXT_DPIO_CMN_BC) {
                /*
@@ -867,7 +870,7 @@ static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 
        bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
 
-       if (power_well_id == BXT_DPIO_CMN_BC)
+       if (cmn_a_well)
                intel_power_well_put(dev_priv, cmn_a_well);
 }
 
@@ -1121,6 +1124,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
        }
 
        i915_redisable_vga_power_on(&dev_priv->drm);
+
+       intel_pps_unlock_regs_wa(dev_priv);
 }
 
 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -2284,7 +2289,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
  */
 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
 {
-       struct device *device = &dev_priv->drm.pdev->dev;
+       struct device *kdev = &dev_priv->drm.pdev->dev;
 
        /*
         * The i915.ko module is still not prepared to be loaded when
@@ -2306,7 +2311,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
         * the platform doesn't support runtime PM.
         */
        if (!HAS_RUNTIME_PM(dev_priv))
-               pm_runtime_put(device);
+               pm_runtime_put(kdev);
 }
 
 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -2647,10 +2652,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
 
-       pm_runtime_get_sync(device);
+       pm_runtime_get_sync(kdev);
 
        atomic_inc(&dev_priv->pm.wakeref_count);
        assert_rpm_wakelock_held(dev_priv);
@@ -2668,11 +2673,11 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  */
 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
 
        if (IS_ENABLED(CONFIG_PM)) {
-               int ret = pm_runtime_get_if_in_use(device);
+               int ret = pm_runtime_get_if_in_use(kdev);
 
                /*
                 * In cases runtime PM is disabled by the RPM core and we get
@@ -2710,11 +2715,11 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
-       pm_runtime_get_noresume(device);
+       pm_runtime_get_noresume(kdev);
 
        atomic_inc(&dev_priv->pm.wakeref_count);
 }
@@ -2729,15 +2734,15 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
        if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
                atomic_inc(&dev_priv->pm.atomic_seq);
 
-       pm_runtime_mark_last_busy(device);
-       pm_runtime_put_autosuspend(device);
+       pm_runtime_mark_last_busy(kdev);
+       pm_runtime_put_autosuspend(kdev);
 }
 
 /**
@@ -2752,11 +2757,12 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct device *kdev = &pdev->dev;
 
-       pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
-       pm_runtime_mark_last_busy(device);
+       pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
+       pm_runtime_mark_last_busy(kdev);
 
        /*
         * Take a permanent reference to disable the RPM functionality and drop
@@ -2765,10 +2771,10 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
         * platforms without RPM support.
         */
        if (!HAS_RUNTIME_PM(dev)) {
-               pm_runtime_dont_use_autosuspend(device);
-               pm_runtime_get_sync(device);
+               pm_runtime_dont_use_autosuspend(kdev);
+               pm_runtime_get_sync(kdev);
        } else {
-               pm_runtime_use_autosuspend(device);
+               pm_runtime_use_autosuspend(kdev);
        }
 
        /*
@@ -2776,6 +2782,5 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
         * We drop that here and will reacquire it during unloading in
         * intel_power_domains_fini().
         */
-       pm_runtime_put_autosuspend(device);
+       pm_runtime_put_autosuspend(kdev);
 }
-
index e378f35365a2e5b36dd3c193d0971e0db6f7e1f5..c551024d487141c00e75d7b977909a2a76966327 100644 (file)
@@ -1003,24 +1003,22 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
 }
 
 static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
-                                        const struct drm_display_mode *adjusted_mode)
+                                        struct intel_crtc_state *pipe_config)
 {
        uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
-       struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        union hdmi_infoframe frame;
        int ret;
        ssize_t len;
 
        ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-                                                      adjusted_mode);
+                                                      &pipe_config->base.adjusted_mode);
        if (ret < 0) {
                DRM_ERROR("couldn't fill AVI infoframe\n");
                return false;
        }
 
        if (intel_sdvo->rgb_quant_range_selectable) {
-               if (intel_crtc->config->limited_color_range)
+               if (pipe_config->limited_color_range)
                        frame.avi.quantization_range =
                                HDMI_QUANTIZATION_RANGE_LIMITED;
                else
@@ -1125,7 +1123,8 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
 }
 
 static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
-                                     struct intel_crtc_state *pipe_config)
+                                     struct intel_crtc_state *pipe_config,
+                                     struct drm_connector_state *conn_state)
 {
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -1192,22 +1191,21 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+                                 struct intel_crtc_state *crtc_state,
+                                 struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
-       struct drm_display_mode *mode = &crtc->config->base.mode;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+       struct drm_display_mode *mode = &crtc_state->base.mode;
        struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
        u32 sdvox;
        struct intel_sdvo_in_out_map in_out;
        struct intel_sdvo_dtd input_dtd, output_dtd;
        int rate;
 
-       if (!mode)
-               return;
-
        /* First, set the input mapping for the first input to our controlled
         * output. This is only correct if we're a single-input device, in
         * which case the first input is the output from the appropriate SDVO
@@ -1240,11 +1238,11 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
        if (!intel_sdvo_set_target_input(intel_sdvo))
                return;
 
-       if (crtc->config->has_hdmi_sink) {
+       if (crtc_state->has_hdmi_sink) {
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
                intel_sdvo_set_colorimetry(intel_sdvo,
                                           SDVO_COLORIMETRY_RGB256);
-               intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+               intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
        } else
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
@@ -1260,7 +1258,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
                DRM_INFO("Setting input timings on %s failed\n",
                         SDVO_NAME(intel_sdvo));
 
-       switch (crtc->config->pixel_multiplier) {
+       switch (crtc_state->pixel_multiplier) {
        default:
                WARN(1, "unknown pixel multiplier specified\n");
        case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1275,7 +1273,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
                /* The real mode polarity is set by the SDVO commands, using
                 * struct intel_sdvo_dtd. */
                sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
-               if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+               if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
                        sdvox |= HDMI_COLOR_RANGE_16_235;
                if (INTEL_INFO(dev)->gen < 5)
                        sdvox |= SDVO_BORDER_ENABLE;
@@ -1301,7 +1299,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
        } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
                /* done in crtc_mode_set as it lives inside the dpll register */
        } else {
-               sdvox |= (crtc->config->pixel_multiplier - 1)
+               sdvox |= (crtc_state->pixel_multiplier - 1)
                        << SDVO_PORT_MULTIPLY_SHIFT;
        }
 
@@ -1434,7 +1432,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
             pipe_config->pixel_multiplier, encoder_pixel_multiplier);
 }
 
-static void intel_disable_sdvo(struct intel_encoder *encoder)
+static void intel_disable_sdvo(struct intel_encoder *encoder,
+                              struct intel_crtc_state *old_crtc_state,
+                              struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
@@ -1477,16 +1477,22 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
        }
 }
 
-static void pch_disable_sdvo(struct intel_encoder *encoder)
+static void pch_disable_sdvo(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_sdvo(struct intel_encoder *encoder)
+static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
-       intel_disable_sdvo(encoder);
+       intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_sdvo(struct intel_encoder *encoder)
+static void intel_enable_sdvo(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2930,10 +2936,12 @@ static bool
 intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
                          struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
+
        sdvo->ddc.owner = THIS_MODULE;
        sdvo->ddc.class = I2C_CLASS_DDC;
        snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
-       sdvo->ddc.dev.parent = &dev->pdev->dev;
+       sdvo->ddc.dev.parent = &pdev->dev;
        sdvo->ddc.algo_data = sdvo;
        sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
 
index 7c08e4f29032306e3ae98fb158c74f2217982190..73a521fdf1bdf760e7d487f0e632b80020e43e49 100644 (file)
@@ -36,6 +36,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -202,23 +203,24 @@ skl_update_plane(struct drm_plane *drm_plane,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(drm_plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+       struct drm_crtc *crtc = crtc_state->base.crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        const int pipe = intel_plane->pipe;
        const int plane = intel_plane->plane + 1;
-       u32 plane_ctl, stride_div, stride;
+       u32 plane_ctl;
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       u32 surf_addr;
-       u32 tile_height, plane_offset, plane_size;
+       u32 surf_addr = plane_state->main.offset;
        unsigned int rotation = plane_state->base.rotation;
-       int x_offset, y_offset;
-       int crtc_x = plane_state->dst.x1;
-       int crtc_y = plane_state->dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->dst);
-       uint32_t x = plane_state->src.x1 >> 16;
-       uint32_t y = plane_state->src.y1 >> 16;
-       uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
-       uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+       u32 stride = skl_plane_stride(fb, 0, rotation);
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+       uint32_t x = plane_state->main.x;
+       uint32_t y = plane_state->main.y;
+       uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
 
        plane_ctl = PLANE_CTL_ENABLE |
                PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -229,14 +231,8 @@ skl_update_plane(struct drm_plane *drm_plane,
 
        plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-       stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
-                                              fb->pixel_format);
-
-       /* Sizes are 0 based */
-       src_w--;
-       src_h--;
-       crtc_w--;
-       crtc_h--;
+       if (wm->dirty_pipes & drm_crtc_mask(crtc))
+               skl_write_plane_wm(intel_crtc, wm, plane);
 
        if (key->flags) {
                I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
@@ -249,28 +245,15 @@ skl_update_plane(struct drm_plane *drm_plane,
        else if (key->flags & I915_SET_COLORKEY_SOURCE)
                plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
 
-       surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
-
-       if (intel_rotation_90_or_270(rotation)) {
-               int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-
-               /* stride: Surface height in tiles */
-               tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
-               stride = DIV_ROUND_UP(fb->height, tile_height);
-               plane_size = (src_w << 16) | src_h;
-               x_offset = stride * tile_height - y - (src_h + 1);
-               y_offset = x;
-       } else {
-               stride = fb->pitches[0] / stride_div;
-               plane_size = (src_h << 16) | src_w;
-               x_offset = x;
-               y_offset = y;
-       }
-       plane_offset = y_offset << 16 | x_offset;
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
 
-       I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset);
+       I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
        I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
-       I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
+       I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w);
 
        /* program plane scaler */
        if (plane_state->scaler_id >= 0) {
@@ -295,7 +278,8 @@ skl_update_plane(struct drm_plane *drm_plane,
        }
 
        I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
-       I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
+       I915_WRITE(PLANE_SURF(pipe, plane),
+                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
        POSTING_READ(PLANE_SURF(pipe, plane));
 }
 
@@ -308,6 +292,14 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
        const int pipe = intel_plane->pipe;
        const int plane = intel_plane->plane + 1;
 
+       /*
+        * We only populate skl_results on watermark updates, and if the
+        * plane's visiblity isn't actually changing neither is its watermarks.
+        */
+       if (!dplane->state->visible)
+               skl_write_plane_wm(to_intel_crtc(crtc),
+                                  &dev_priv->wm.skl_results, plane);
+
        I915_WRITE(PLANE_CTL(pipe, plane), 0);
 
        I915_WRITE(PLANE_SURF(pipe, plane), 0);
@@ -362,22 +354,20 @@ vlv_update_plane(struct drm_plane *dplane,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(dplane);
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        int pipe = intel_plane->pipe;
        int plane = intel_plane->plane;
        u32 sprctl;
        u32 sprsurf_offset, linear_offset;
        unsigned int rotation = dplane->state->rotation;
-       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       int crtc_x = plane_state->dst.x1;
-       int crtc_y = plane_state->dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->dst);
-       uint32_t x = plane_state->src.x1 >> 16;
-       uint32_t y = plane_state->src.y1 >> 16;
-       uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
-       uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+       uint32_t x = plane_state->base.src.x1 >> 16;
+       uint32_t y = plane_state->base.src.y1 >> 16;
+       uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
 
        sprctl = SP_ENABLE;
 
@@ -430,7 +420,7 @@ vlv_update_plane(struct drm_plane *dplane,
         */
        sprctl |= SP_GAMMA_ENABLE;
 
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                sprctl |= SP_TILED;
 
        /* Sizes are 0 based */
@@ -439,19 +429,18 @@ vlv_update_plane(struct drm_plane *dplane,
        crtc_w--;
        crtc_h--;
 
-       linear_offset = y * fb->pitches[0] + x * cpp;
-       sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
-                                                  fb->pitches[0], rotation);
-       linear_offset -= sprsurf_offset;
+       intel_add_fb_offsets(&x, &y, plane_state, 0);
+       sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
 
-       if (rotation == BIT(DRM_ROTATE_180)) {
+       if (rotation == DRM_ROTATE_180) {
                sprctl |= SP_ROTATE_180;
 
                x += src_w;
                y += src_h;
-               linear_offset += src_h * fb->pitches[0] + src_w * cpp;
        }
 
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
        if (key->flags) {
                I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
                I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
@@ -467,7 +456,7 @@ vlv_update_plane(struct drm_plane *dplane,
        I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
        I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
 
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
        else
                I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
@@ -476,8 +465,8 @@ vlv_update_plane(struct drm_plane *dplane,
 
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
-       I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
-                  sprsurf_offset);
+       I915_WRITE(SPSURF(pipe, plane),
+                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -505,21 +494,19 @@ ivb_update_plane(struct drm_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        enum pipe pipe = intel_plane->pipe;
        u32 sprctl, sprscale = 0;
        u32 sprsurf_offset, linear_offset;
        unsigned int rotation = plane_state->base.rotation;
-       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       int crtc_x = plane_state->dst.x1;
-       int crtc_y = plane_state->dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->dst);
-       uint32_t x = plane_state->src.x1 >> 16;
-       uint32_t y = plane_state->src.y1 >> 16;
-       uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
-       uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+       uint32_t x = plane_state->base.src.x1 >> 16;
+       uint32_t y = plane_state->base.src.y1 >> 16;
+       uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
 
        sprctl = SPRITE_ENABLE;
 
@@ -552,7 +539,7 @@ ivb_update_plane(struct drm_plane *plane,
         */
        sprctl |= SPRITE_GAMMA_ENABLE;
 
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                sprctl |= SPRITE_TILED;
 
        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -572,22 +559,21 @@ ivb_update_plane(struct drm_plane *plane,
        if (crtc_w != src_w || crtc_h != src_h)
                sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 
-       linear_offset = y * fb->pitches[0] + x * cpp;
-       sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
-                                                  fb->pitches[0], rotation);
-       linear_offset -= sprsurf_offset;
+       intel_add_fb_offsets(&x, &y, plane_state, 0);
+       sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
 
-       if (rotation == BIT(DRM_ROTATE_180)) {
+       if (rotation == DRM_ROTATE_180) {
                sprctl |= SPRITE_ROTATE_180;
 
                /* HSW and BDW does this automagically in hardware */
                if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
                        x += src_w;
                        y += src_h;
-                       linear_offset += src_h * fb->pitches[0] + src_w * cpp;
                }
        }
 
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
        if (key->flags) {
                I915_WRITE(SPRKEYVAL(pipe), key->min_value);
                I915_WRITE(SPRKEYMAX(pipe), key->max_value);
@@ -606,7 +592,7 @@ ivb_update_plane(struct drm_plane *plane,
         * register */
        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
-       else if (obj->tiling_mode != I915_TILING_NONE)
+       else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
        else
                I915_WRITE(SPRLINOFF(pipe), linear_offset);
@@ -616,7 +602,7 @@ ivb_update_plane(struct drm_plane *plane,
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
        I915_WRITE(SPRSURF(pipe),
-                  i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 }
 
@@ -646,21 +632,19 @@ ilk_update_plane(struct drm_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        int pipe = intel_plane->pipe;
        u32 dvscntr, dvsscale;
        u32 dvssurf_offset, linear_offset;
        unsigned int rotation = plane_state->base.rotation;
-       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       int crtc_x = plane_state->dst.x1;
-       int crtc_y = plane_state->dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->dst);
-       uint32_t x = plane_state->src.x1 >> 16;
-       uint32_t y = plane_state->src.y1 >> 16;
-       uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
-       uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+       uint32_t x = plane_state->base.src.x1 >> 16;
+       uint32_t y = plane_state->base.src.y1 >> 16;
+       uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
 
        dvscntr = DVS_ENABLE;
 
@@ -693,7 +677,7 @@ ilk_update_plane(struct drm_plane *plane,
         */
        dvscntr |= DVS_GAMMA_ENABLE;
 
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                dvscntr |= DVS_TILED;
 
        if (IS_GEN6(dev))
@@ -709,19 +693,18 @@ ilk_update_plane(struct drm_plane *plane,
        if (crtc_w != src_w || crtc_h != src_h)
                dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
-       linear_offset = y * fb->pitches[0] + x * cpp;
-       dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
-                                                  fb->pitches[0], rotation);
-       linear_offset -= dvssurf_offset;
+       intel_add_fb_offsets(&x, &y, plane_state, 0);
+       dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
 
-       if (rotation == BIT(DRM_ROTATE_180)) {
+       if (rotation == DRM_ROTATE_180) {
                dvscntr |= DVS_ROTATE_180;
 
                x += src_w;
                y += src_h;
-               linear_offset += src_h * fb->pitches[0] + src_w * cpp;
        }
 
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
        if (key->flags) {
                I915_WRITE(DVSKEYVAL(pipe), key->min_value);
                I915_WRITE(DVSKEYMAX(pipe), key->max_value);
@@ -736,7 +719,7 @@ ilk_update_plane(struct drm_plane *plane,
        I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
        I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
 
-       if (obj->tiling_mode != I915_TILING_NONE)
+       if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
                I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
        else
                I915_WRITE(DVSLINOFF(pipe), linear_offset);
@@ -745,7 +728,7 @@ ilk_update_plane(struct drm_plane *plane,
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
        I915_WRITE(DVSSURF(pipe),
-                  i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+                  intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
@@ -778,15 +761,26 @@ intel_check_sprite_plane(struct drm_plane *plane,
        int crtc_x, crtc_y;
        unsigned int crtc_w, crtc_h;
        uint32_t src_x, src_y, src_w, src_h;
-       struct drm_rect *src = &state->src;
-       struct drm_rect *dst = &state->dst;
+       struct drm_rect *src = &state->base.src;
+       struct drm_rect *dst = &state->base.dst;
        const struct drm_rect *clip = &state->clip;
        int hscale, vscale;
        int max_scale, min_scale;
        bool can_scale;
+       int ret;
+
+       src->x1 = state->base.src_x;
+       src->y1 = state->base.src_y;
+       src->x2 = state->base.src_x + state->base.src_w;
+       src->y2 = state->base.src_y + state->base.src_h;
+
+       dst->x1 = state->base.crtc_x;
+       dst->y1 = state->base.crtc_y;
+       dst->x2 = state->base.crtc_x + state->base.crtc_w;
+       dst->y2 = state->base.crtc_y + state->base.crtc_h;
 
        if (!fb) {
-               state->visible = false;
+               state->base.visible = false;
                return 0;
        }
 
@@ -834,14 +828,14 @@ intel_check_sprite_plane(struct drm_plane *plane,
        vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
        BUG_ON(vscale < 0);
 
-       state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+       state->base.visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
        crtc_x = dst->x1;
        crtc_y = dst->y1;
        crtc_w = drm_rect_width(dst);
        crtc_h = drm_rect_height(dst);
 
-       if (state->visible) {
+       if (state->base.visible) {
                /* check again in case clipping clamped the results */
                hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
                if (hscale < 0) {
@@ -898,12 +892,12 @@ intel_check_sprite_plane(struct drm_plane *plane,
                                crtc_w &= ~1;
 
                        if (crtc_w == 0)
-                               state->visible = false;
+                               state->base.visible = false;
                }
        }
 
        /* Check size restrictions when scaling */
-       if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
+       if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
                unsigned int width_bytes;
                int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
@@ -912,10 +906,10 @@ intel_check_sprite_plane(struct drm_plane *plane,
                /* FIXME interlacing min height is 6 */
 
                if (crtc_w < 3 || crtc_h < 3)
-                       state->visible = false;
+                       state->base.visible = false;
 
                if (src_w < 3 || src_h < 3)
-                       state->visible = false;
+                       state->base.visible = false;
 
                width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
 
@@ -926,7 +920,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
                }
        }
 
-       if (state->visible) {
+       if (state->base.visible) {
                src->x1 = src_x << 16;
                src->x2 = (src_x + src_w) << 16;
                src->y1 = src_y << 16;
@@ -938,6 +932,12 @@ intel_check_sprite_plane(struct drm_plane *plane,
        dst->y1 = crtc_y;
        dst->y2 = crtc_y + crtc_h;
 
+       if (INTEL_GEN(dev) >= 9) {
+               ret = skl_check_plane_surface(state);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
index 49136ad5473ede52222ac7ec383d766a377bab5c..d960e48665954fa4796fbfc11b3f9d8b76e2584d 100644 (file)
@@ -838,7 +838,9 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 }
 
 static void
-intel_enable_tv(struct intel_encoder *encoder)
+intel_enable_tv(struct intel_encoder *encoder,
+               struct intel_crtc_state *pipe_config,
+               struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -851,7 +853,9 @@ intel_enable_tv(struct intel_encoder *encoder)
 }
 
 static void
-intel_disable_tv(struct intel_encoder *encoder)
+intel_disable_tv(struct intel_encoder *encoder,
+                struct intel_crtc_state *old_crtc_state,
+                struct drm_connector_state *old_conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -908,7 +912,8 @@ intel_tv_get_config(struct intel_encoder *encoder,
 
 static bool
 intel_tv_compute_config(struct intel_encoder *encoder,
-                       struct intel_crtc_state *pipe_config)
+                       struct intel_crtc_state *pipe_config,
+                       struct drm_connector_state *conn_state)
 {
        struct intel_tv *intel_tv = enc_to_tv(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1010,7 +1015,9 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
                   color_conversion->av);
 }
 
-static void intel_tv_pre_enable(struct intel_encoder *encoder)
+static void intel_tv_pre_enable(struct intel_encoder *encoder,
+                               struct intel_crtc_state *pipe_config,
+                               struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
index ff80a81b1a84f929cf9a7aacda59608a23e0dc8a..a9b6c936aadd6a5875c1dddcee416e48a8217725 100644 (file)
@@ -435,7 +435,7 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
        i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
 
        /* BIOS often leaves RC6 enabled, but disable it for hw init */
-       intel_disable_gt_powersave(dev_priv);
+       intel_sanitize_gt_powersave(dev_priv);
 }
 
 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1018,11 +1018,9 @@ gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 __gen5_write(8)
 __gen5_write(16)
 __gen5_write(32)
-__gen5_write(64)
 __gen2_write(8)
 __gen2_write(16)
 __gen2_write(32)
-__gen2_write(64)
 
 #undef __gen5_write
 #undef __gen2_write
@@ -1112,23 +1110,18 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
 __gen9_write(8)
 __gen9_write(16)
 __gen9_write(32)
-__gen9_write(64)
 __chv_write(8)
 __chv_write(16)
 __chv_write(32)
-__chv_write(64)
 __gen8_write(8)
 __gen8_write(16)
 __gen8_write(32)
-__gen8_write(64)
 __hsw_write(8)
 __hsw_write(16)
 __hsw_write(32)
-__hsw_write(64)
 __gen6_write(8)
 __gen6_write(16)
 __gen6_write(32)
-__gen6_write(64)
 
 #undef __gen9_write
 #undef __chv_write
@@ -1158,7 +1151,6 @@ static void vgpu_write##x(struct drm_i915_private *dev_priv, \
 __vgpu_write(8)
 __vgpu_write(16)
 __vgpu_write(32)
-__vgpu_write(64)
 
 #undef __vgpu_write
 #undef VGPU_WRITE_FOOTER
@@ -1169,7 +1161,6 @@ do { \
        dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
        dev_priv->uncore.funcs.mmio_writew = x##_write16; \
        dev_priv->uncore.funcs.mmio_writel = x##_write32; \
-       dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
 } while (0)
 
 #define ASSIGN_READ_MMIO_VFUNCS(x) \
@@ -1597,8 +1588,10 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
        if (engine_mask == ALL_ENGINES) {
                hw_mask = GEN6_GRDOM_FULL;
        } else {
+               unsigned int tmp;
+
                hw_mask = 0;
-               for_each_engine_masked(engine, dev_priv, engine_mask)
+               for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                        hw_mask |= hw_engine_mask[engine->id];
        }
 
@@ -1618,8 +1611,10 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
  * @timeout_ms: timeout in millisecond
  *
  * This routine waits until the target register @reg contains the expected
- * @value after applying the @mask, i.e. it waits until
- *   (I915_READ_FW(@reg) & @mask) == @value
+ * @value after applying the @mask, i.e. it waits until ::
+ *
+ *     (I915_READ_FW(reg) & mask) == value
+ *
  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  *
  * Note that this routine assumes the caller holds forcewake asserted, it is
@@ -1652,8 +1647,10 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
  * @timeout_ms: timeout in millisecond
  *
  * This routine waits until the target register @reg contains the expected
- * @value after applying the @mask, i.e. it waits until
- *   (I915_READ(@reg) & @mask) == @value
+ * @value after applying the @mask, i.e. it waits until ::
+ *
+ *     (I915_READ(reg) & mask) == value
+ *
  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  *
  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
@@ -1710,15 +1707,16 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
                              unsigned engine_mask)
 {
        struct intel_engine_cs *engine;
+       unsigned int tmp;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask)
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                if (gen8_request_engine_reset(engine))
                        goto not_ready;
 
        return gen6_reset_engines(dev_priv, engine_mask);
 
 not_ready:
-       for_each_engine_masked(engine, dev_priv, engine_mask)
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                gen8_unrequest_engine_reset(engine);
 
        return -EIO;
index 9f7dafce3a4cf8aa94e5ac8c64ca210124609fbd..98df09c2b3885b0ad0e5d99434063005a11f6712 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/component.h>
 #include <linux/device.h>
 #include <linux/dma-buf.h>
-#include <linux/fb.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/reservation.h>
@@ -58,12 +57,6 @@ static int legacyfb_depth = 16;
 module_param(legacyfb_depth, int, 0444);
 #endif
 
-unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
-{
-       return drm_crtc_index(crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
-
 static void imx_drm_driver_lastclose(struct drm_device *drm)
 {
        struct imx_drm_device *imxdrm = drm->dev_private;
@@ -71,43 +64,6 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
        drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
 }
 
-static int imx_drm_driver_unload(struct drm_device *drm)
-{
-       struct imx_drm_device *imxdrm = drm->dev_private;
-
-       drm_kms_helper_poll_fini(drm);
-
-       if (imxdrm->fbhelper)
-               drm_fbdev_cma_fini(imxdrm->fbhelper);
-
-       component_unbind_all(drm->dev, drm);
-
-       drm_vblank_cleanup(drm);
-       drm_mode_config_cleanup(drm);
-
-       platform_set_drvdata(drm->platformdev, NULL);
-
-       return 0;
-}
-
-int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
-{
-       return drm_crtc_vblank_get(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
-
-void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
-{
-       drm_crtc_vblank_put(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
-
-void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
-{
-       drm_crtc_handle_vblank(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
-
 static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
 {
        struct imx_drm_device *imxdrm = drm->dev_private;
@@ -171,54 +127,73 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
        drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
 }
 
+static int imx_drm_atomic_check(struct drm_device *dev,
+                               struct drm_atomic_state *state)
+{
+       int ret;
+
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       ret = drm_atomic_helper_check_planes(dev, state);
+       if (ret)
+               return ret;
+
+       /*
+        * Check modeset again in case crtc_state->mode_changed is
+        * updated in plane's ->atomic_check callback.
+        */
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static int imx_drm_atomic_commit(struct drm_device *dev,
+                                struct drm_atomic_state *state,
+                                bool nonblock)
+{
+       struct drm_plane_state *plane_state;
+       struct drm_plane *plane;
+       struct dma_buf *dma_buf;
+       int i;
+
+       /*
+        * If the plane fb has an dma-buf attached, fish out the exclusive
+        * fence for the atomic helper to wait on.
+        */
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
+                       dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
+                                                        0)->base.dma_buf;
+                       if (!dma_buf)
+                               continue;
+                       plane_state->fence =
+                               reservation_object_get_excl_rcu(dma_buf->resv);
+               }
+       }
+
+       return drm_atomic_helper_commit(dev, state, nonblock);
+}
+
 static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
        .fb_create = drm_fb_cma_create,
        .output_poll_changed = imx_drm_output_poll_changed,
-       .atomic_check = drm_atomic_helper_check,
-       .atomic_commit = drm_atomic_helper_commit,
+       .atomic_check = imx_drm_atomic_check,
+       .atomic_commit = imx_drm_atomic_commit,
 };
 
 static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *crtc_state;
-       struct drm_plane_state *plane_state;
-       struct drm_gem_cma_object *cma_obj;
-       struct fence *excl;
-       unsigned shared_count;
-       struct fence **shared;
-       unsigned int i, j;
-       int ret;
-
-       /* Wait for fences. */
-       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               plane_state = crtc->primary->state;
-               if (plane_state->fb) {
-                       cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
-                       if (cma_obj->base.dma_buf) {
-                               ret = reservation_object_get_fences_rcu(
-                                       cma_obj->base.dma_buf->resv, &excl,
-                                       &shared_count, &shared);
-                               if (unlikely(ret))
-                                       DRM_ERROR("failed to get fences "
-                                                 "for buffer\n");
-
-                               if (excl) {
-                                       fence_wait(excl, false);
-                                       fence_put(excl);
-                               }
-                               for (j = 0; j < shared_count; i++) {
-                                       fence_wait(shared[j], false);
-                                       fence_put(shared[j]);
-                               }
-                       }
-               }
-       }
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
-       drm_atomic_helper_commit_planes(dev, state, true);
+       drm_atomic_helper_commit_planes(dev, state,
+                               DRM_PLANE_COMMIT_ACTIVE_ONLY |
+                               DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
@@ -233,111 +208,6 @@ static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
        .atomic_commit_tail = imx_drm_atomic_commit_tail,
 };
 
-/*
- * Main DRM initialisation. This binds, initialises and registers
- * with DRM the subcomponents of the driver.
- */
-static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
-{
-       struct imx_drm_device *imxdrm;
-       struct drm_connector *connector;
-       int ret;
-
-       imxdrm = devm_kzalloc(drm->dev, sizeof(*imxdrm), GFP_KERNEL);
-       if (!imxdrm)
-               return -ENOMEM;
-
-       imxdrm->drm = drm;
-
-       drm->dev_private = imxdrm;
-
-       /*
-        * enable drm irq mode.
-        * - with irq_enabled = true, we can use the vblank feature.
-        *
-        * P.S. note that we wouldn't use drm irq handler but
-        *      just specific driver own one instead because
-        *      drm framework supports only one irq handler and
-        *      drivers can well take care of their interrupts
-        */
-       drm->irq_enabled = true;
-
-       /*
-        * set max width and height as default value(4096x4096).
-        * this value would be used to check framebuffer size limitation
-        * at drm_mode_addfb().
-        */
-       drm->mode_config.min_width = 64;
-       drm->mode_config.min_height = 64;
-       drm->mode_config.max_width = 4096;
-       drm->mode_config.max_height = 4096;
-       drm->mode_config.funcs = &imx_drm_mode_config_funcs;
-       drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
-
-       drm_mode_config_init(drm);
-
-       ret = drm_vblank_init(drm, MAX_CRTC);
-       if (ret)
-               goto err_kms;
-
-       platform_set_drvdata(drm->platformdev, drm);
-
-       /* Now try and bind all our sub-components */
-       ret = component_bind_all(drm->dev, drm);
-       if (ret)
-               goto err_vblank;
-
-       /*
-        * All components are now added, we can publish the connector sysfs
-        * entries to userspace.  This will generate hotplug events and so
-        * userspace will expect to be able to access DRM at this point.
-        */
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               ret = drm_connector_register(connector);
-               if (ret) {
-                       dev_err(drm->dev,
-                               "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
-                               connector->base.id,
-                               connector->name, ret);
-                       goto err_unbind;
-               }
-       }
-
-       drm_mode_config_reset(drm);
-
-       /*
-        * All components are now initialised, so setup the fb helper.
-        * The fb helper takes copies of key hardware information, so the
-        * crtcs/connectors/encoders must not change after this point.
-        */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
-       if (legacyfb_depth != 16 && legacyfb_depth != 32) {
-               dev_warn(drm->dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
-               legacyfb_depth = 16;
-       }
-       imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
-                               drm->mode_config.num_crtc, MAX_CRTC);
-       if (IS_ERR(imxdrm->fbhelper)) {
-               ret = PTR_ERR(imxdrm->fbhelper);
-               imxdrm->fbhelper = NULL;
-               goto err_unbind;
-       }
-#endif
-
-       drm_kms_helper_poll_init(drm);
-
-       return 0;
-
-err_unbind:
-       component_unbind_all(drm->dev, drm);
-err_vblank:
-       drm_vblank_cleanup(drm);
-err_kms:
-       drm_mode_config_cleanup(drm);
-
-       return ret;
-}
-
 /*
  * imx_drm_add_crtc - add a new crtc
  */
@@ -430,8 +300,6 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
 static struct drm_driver imx_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
                                  DRIVER_ATOMIC,
-       .load                   = imx_drm_driver_load,
-       .unload                 = imx_drm_driver_unload,
        .lastclose              = imx_drm_driver_lastclose,
        .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
@@ -484,12 +352,122 @@ static int compare_of(struct device *dev, void *data)
 
 static int imx_drm_bind(struct device *dev)
 {
-       return drm_platform_init(&imx_drm_driver, to_platform_device(dev));
+       struct drm_device *drm;
+       struct imx_drm_device *imxdrm;
+       int ret;
+
+       drm = drm_dev_alloc(&imx_drm_driver, dev);
+       if (!drm)
+               return -ENOMEM;
+
+       imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
+       if (!imxdrm) {
+               ret = -ENOMEM;
+               goto err_unref;
+       }
+
+       imxdrm->drm = drm;
+       drm->dev_private = imxdrm;
+
+       /*
+        * enable drm irq mode.
+        * - with irq_enabled = true, we can use the vblank feature.
+        *
+        * P.S. note that we wouldn't use drm irq handler but
+        *      just specific driver own one instead because
+        *      drm framework supports only one irq handler and
+        *      drivers can well take care of their interrupts
+        */
+       drm->irq_enabled = true;
+
+       /*
+        * set max width and height as default value(4096x4096).
+        * this value would be used to check framebuffer size limitation
+        * at drm_mode_addfb().
+        */
+       drm->mode_config.min_width = 64;
+       drm->mode_config.min_height = 64;
+       drm->mode_config.max_width = 4096;
+       drm->mode_config.max_height = 4096;
+       drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+       drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
+
+       drm_mode_config_init(drm);
+
+       ret = drm_vblank_init(drm, MAX_CRTC);
+       if (ret)
+               goto err_kms;
+
+       dev_set_drvdata(dev, drm);
+
+       /* Now try and bind all our sub-components */
+       ret = component_bind_all(dev, drm);
+       if (ret)
+               goto err_vblank;
+
+       drm_mode_config_reset(drm);
+
+       /*
+        * All components are now initialised, so setup the fb helper.
+        * The fb helper takes copies of key hardware information, so the
+        * crtcs/connectors/encoders must not change after this point.
+        */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
+       if (legacyfb_depth != 16 && legacyfb_depth != 32) {
+               dev_warn(dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
+               legacyfb_depth = 16;
+       }
+       imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
+                               drm->mode_config.num_crtc, MAX_CRTC);
+       if (IS_ERR(imxdrm->fbhelper)) {
+               ret = PTR_ERR(imxdrm->fbhelper);
+               imxdrm->fbhelper = NULL;
+               goto err_unbind;
+       }
+#endif
+
+       drm_kms_helper_poll_init(drm);
+
+       ret = drm_dev_register(drm, 0);
+       if (ret)
+               goto err_fbhelper;
+
+       return 0;
+
+err_fbhelper:
+       drm_kms_helper_poll_fini(drm);
+       if (imxdrm->fbhelper)
+               drm_fbdev_cma_fini(imxdrm->fbhelper);
+err_unbind:
+       component_unbind_all(drm->dev, drm);
+err_vblank:
+       drm_vblank_cleanup(drm);
+err_kms:
+       drm_mode_config_cleanup(drm);
+err_unref:
+       drm_dev_unref(drm);
+
+       return ret;
 }
 
 static void imx_drm_unbind(struct device *dev)
 {
-       drm_put_dev(dev_get_drvdata(dev));
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct imx_drm_device *imxdrm = drm->dev_private;
+
+       drm_dev_unregister(drm);
+
+       drm_kms_helper_poll_fini(drm);
+
+       if (imxdrm->fbhelper)
+               drm_fbdev_cma_fini(imxdrm->fbhelper);
+
+       drm_mode_config_cleanup(drm);
+
+       component_unbind_all(drm->dev, drm);
+       dev_set_drvdata(dev, NULL);
+
+       drm_dev_unref(drm);
 }
 
 static const struct component_master_ops imx_drm_ops = {
index 07d33e45f90f5538982697839685f8e0a1adca78..5a91cb16c8fa53d4b84dcbcf332605c78483d171 100644 (file)
@@ -13,8 +13,6 @@ struct drm_plane;
 struct imx_drm_crtc;
 struct platform_device;
 
-unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
-
 struct imx_crtc_state {
        struct drm_crtc_state                   base;
        u32                                     bus_format;
@@ -44,10 +42,6 @@ int imx_drm_init_drm(struct platform_device *pdev,
                int preferred_bpp);
 int imx_drm_exit_drm(void);
 
-int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc);
-void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc);
-void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc);
-
 void imx_drm_mode_config_init(struct drm_device *drm);
 
 struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
index b03919ed60bade2f2051aeb4fc41389f64843608..3ce391c239b02ace097ab4fbfb89deaaebb24618 100644 (file)
@@ -57,7 +57,11 @@ struct imx_ldb_channel {
        struct imx_ldb *ldb;
        struct drm_connector connector;
        struct drm_encoder encoder;
+
+       /* Defines what is connected to the ldb, only one at a time */
        struct drm_panel *panel;
+       struct drm_bridge *bridge;
+
        struct device_node *child;
        struct i2c_adapter *ddc;
        int chno;
@@ -66,6 +70,7 @@ struct imx_ldb_channel {
        struct drm_display_mode mode;
        int mode_valid;
        u32 bus_format;
+       u32 bus_flags;
 };
 
 static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
@@ -251,11 +256,13 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
        drm_panel_enable(imx_ldb_ch->panel);
 }
 
-static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
-                        struct drm_display_mode *orig_mode,
-                        struct drm_display_mode *mode)
+static void
+imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
+                               struct drm_crtc_state *crtc_state,
+                               struct drm_connector_state *connector_state)
 {
        struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
        struct imx_ldb *ldb = imx_ldb_ch->ldb;
        int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
        unsigned long serial_clk;
@@ -297,17 +304,11 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
        }
 
        if (!bus_format) {
-               struct drm_connector *connector;
-
-               drm_for_each_connector(connector, encoder->dev) {
-                       struct drm_display_info *di = &connector->display_info;
+               struct drm_connector *connector = connector_state->connector;
+               struct drm_display_info *di = &connector->display_info;
 
-                       if (connector->encoder == encoder &&
-                           di->num_bus_formats) {
-                               bus_format = di->bus_formats[0];
-                               break;
-                       }
-               }
+               if (di->num_bus_formats)
+                       bus_format = di->bus_formats[0];
        }
        imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
 }
@@ -379,8 +380,13 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
        u32 bus_format = imx_ldb_ch->bus_format;
 
        /* Bus format description in DT overrides connector display info. */
-       if (!bus_format && di->num_bus_formats)
+       if (!bus_format && di->num_bus_formats) {
                bus_format = di->bus_formats[0];
+               imx_crtc_state->bus_flags = di->bus_flags;
+       } else {
+               bus_format = imx_ldb_ch->bus_format;
+               imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags;
+       }
        switch (bus_format) {
        case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
                imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
@@ -420,7 +426,7 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
 };
 
 static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
-       .mode_set = imx_ldb_encoder_mode_set,
+       .atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
        .enable = imx_ldb_encoder_enable,
        .disable = imx_ldb_encoder_disable,
        .atomic_check = imx_ldb_encoder_atomic_check,
@@ -466,10 +472,30 @@ static int imx_ldb_register(struct drm_device *drm,
        drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
                         DRM_MODE_ENCODER_LVDS, NULL);
 
-       drm_connector_helper_add(&imx_ldb_ch->connector,
-                       &imx_ldb_connector_helper_funcs);
-       drm_connector_init(drm, &imx_ldb_ch->connector,
-                          &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+       if (imx_ldb_ch->bridge) {
+               imx_ldb_ch->bridge->encoder = encoder;
+
+               imx_ldb_ch->encoder.bridge = imx_ldb_ch->bridge;
+               ret = drm_bridge_attach(drm, imx_ldb_ch->bridge);
+               if (ret) {
+                       DRM_ERROR("Failed to initialize bridge with drm\n");
+                       return ret;
+               }
+       } else {
+               /*
+                * We want to add the connector whenever there is no bridge
+                * that brings its own, not only when there is a panel. For
+                * historical reasons, the ldb driver can also work without
+                * a panel.
+                */
+               drm_connector_helper_add(&imx_ldb_ch->connector,
+                               &imx_ldb_connector_helper_funcs);
+               drm_connector_init(drm, &imx_ldb_ch->connector,
+                               &imx_ldb_connector_funcs,
+                               DRM_MODE_CONNECTOR_LVDS);
+               drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
+                               encoder);
+       }
 
        if (imx_ldb_ch->panel) {
                ret = drm_panel_attach(imx_ldb_ch->panel,
@@ -478,8 +504,6 @@ static int imx_ldb_register(struct drm_device *drm,
                        return ret;
        }
 
-       drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
-
        return 0;
 }
 
@@ -548,6 +572,46 @@ static const struct of_device_id imx_ldb_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids);
 
+static int imx_ldb_panel_ddc(struct device *dev,
+               struct imx_ldb_channel *channel, struct device_node *child)
+{
+       struct device_node *ddc_node;
+       const u8 *edidp;
+       int ret;
+
+       ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
+       if (ddc_node) {
+               channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
+               of_node_put(ddc_node);
+               if (!channel->ddc) {
+                       dev_warn(dev, "failed to get ddc i2c adapter\n");
+                       return -EPROBE_DEFER;
+               }
+       }
+
+       if (!channel->ddc) {
+               /* if no DDC available, fallback to hardcoded EDID */
+               dev_dbg(dev, "no ddc available\n");
+
+               edidp = of_get_property(child, "edid",
+                                       &channel->edid_len);
+               if (edidp) {
+                       channel->edid = kmemdup(edidp,
+                                               channel->edid_len,
+                                               GFP_KERNEL);
+               } else if (!channel->panel) {
+                       /* fallback to display-timings node */
+                       ret = of_get_drm_display_mode(child,
+                                                     &channel->mode,
+                                                     &channel->bus_flags,
+                                                     OF_USE_NATIVE_MODE);
+                       if (!ret)
+                               channel->mode_valid = 1;
+               }
+       }
+       return 0;
+}
+
 static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 {
        struct drm_device *drm = data;
@@ -555,7 +619,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
        const struct of_device_id *of_id =
                        of_match_device(imx_ldb_dt_ids, dev);
        struct device_node *child;
-       const u8 *edidp;
        struct imx_ldb *imx_ldb;
        int dual;
        int ret;
@@ -605,7 +668,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 
        for_each_child_of_node(np, child) {
                struct imx_ldb_channel *channel;
-               struct device_node *ddc_node;
                struct device_node *ep;
                int bus_format;
 
@@ -638,46 +700,25 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 
                        remote = of_graph_get_remote_port_parent(ep);
                        of_node_put(ep);
-                       if (remote)
+                       if (remote) {
                                channel->panel = of_drm_find_panel(remote);
-                       else
+                               channel->bridge = of_drm_find_bridge(remote);
+                       } else
                                return -EPROBE_DEFER;
                        of_node_put(remote);
-                       if (!channel->panel) {
-                               dev_err(dev, "panel not found: %s\n",
+
+                       if (!channel->panel && !channel->bridge) {
+                               dev_err(dev, "panel/bridge not found: %s\n",
                                        remote->full_name);
                                return -EPROBE_DEFER;
                        }
                }
 
-               ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
-               if (ddc_node) {
-                       channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
-                       of_node_put(ddc_node);
-                       if (!channel->ddc) {
-                               dev_warn(dev, "failed to get ddc i2c adapter\n");
-                               return -EPROBE_DEFER;
-                       }
-               }
-
-               if (!channel->ddc) {
-                       /* if no DDC available, fallback to hardcoded EDID */
-                       dev_dbg(dev, "no ddc available\n");
-
-                       edidp = of_get_property(child, "edid",
-                                               &channel->edid_len);
-                       if (edidp) {
-                               channel->edid = kmemdup(edidp,
-                                                       channel->edid_len,
-                                                       GFP_KERNEL);
-                       } else if (!channel->panel) {
-                               /* fallback to display-timings node */
-                               ret = of_get_drm_display_mode(child,
-                                                             &channel->mode,
-                                                             OF_USE_NATIVE_MODE);
-                               if (!ret)
-                                       channel->mode_valid = 1;
-                       }
+               /* panel ddc only if there is no bridge */
+               if (!channel->bridge) {
+                       ret = imx_ldb_panel_ddc(dev, channel, child);
+                       if (ret)
+                               return ret;
                }
 
                bus_format = of_get_bus_format(dev, child);
@@ -716,11 +757,10 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
        for (i = 0; i < 2; i++) {
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
-               if (!channel->connector.funcs)
-                       continue;
-
-               channel->connector.funcs->destroy(&channel->connector);
-               channel->encoder.funcs->destroy(&channel->encoder);
+               if (channel->bridge)
+                       drm_bridge_detach(channel->bridge);
+               if (channel->panel)
+                       drm_panel_detach(channel->panel);
 
                kfree(channel->edid);
                i2c_put_adapter(channel->ddc);
index 5e875944ffa214f48f06d7ed8b687aabd1705ffb..8fc088843e553c2def5353e74bdd47171824478e 100644 (file)
@@ -685,9 +685,6 @@ static void imx_tve_unbind(struct device *dev, struct device *master,
 {
        struct imx_tve *tve = dev_get_drvdata(dev);
 
-       tve->connector.funcs->destroy(&tve->connector);
-       tve->encoder.funcs->destroy(&tve->encoder);
-
        if (!IS_ERR(tve->dac_reg))
                regulator_disable(tve->dac_reg);
 }
index 08e188bc10fc6200501d4f13fccc999970a35566..4e1ae3fc462dc65591d2fa5b3f6dffe3ee8a4ad4 100644 (file)
@@ -21,7 +21,6 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
-#include <linux/fb.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -61,7 +60,8 @@ static void ipu_crtc_enable(struct drm_crtc *crtc)
        ipu_di_enable(ipu_crtc->di);
 }
 
-static void ipu_crtc_disable(struct drm_crtc *crtc)
+static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *old_crtc_state)
 {
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
@@ -76,6 +76,11 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
                crtc->state->event = NULL;
        }
        spin_unlock_irq(&crtc->dev->event_lock);
+
+       /* always disable planes on the CRTC */
+       drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
+
+       drm_crtc_vblank_off(crtc);
 }
 
 static void imx_drm_crtc_reset(struct drm_crtc *crtc)
@@ -121,9 +126,14 @@ static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
        kfree(to_imx_crtc_state(state));
 }
 
+static void imx_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+       imx_drm_remove_crtc(to_ipu_crtc(crtc)->imx_crtc);
+}
+
 static const struct drm_crtc_funcs ipu_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
-       .destroy = drm_crtc_cleanup,
+       .destroy = imx_drm_crtc_destroy,
        .page_flip = drm_atomic_helper_page_flip,
        .reset = imx_drm_crtc_reset,
        .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -134,7 +144,7 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
 {
        struct ipu_crtc *ipu_crtc = dev_id;
 
-       imx_drm_handle_vblank(ipu_crtc->imx_crtc);
+       drm_crtc_handle_vblank(&ipu_crtc->base);
 
        return IRQ_HANDLED;
 }
@@ -175,6 +185,8 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
 static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
                                  struct drm_crtc_state *old_crtc_state)
 {
+       drm_crtc_vblank_on(crtc);
+
        spin_lock_irq(&crtc->dev->event_lock);
        if (crtc->state->event) {
                WARN_ON(drm_crtc_vblank_get(crtc));
@@ -242,7 +254,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
        .mode_set_nofb = ipu_crtc_mode_set_nofb,
        .atomic_check = ipu_crtc_atomic_check,
        .atomic_begin = ipu_crtc_atomic_begin,
-       .disable = ipu_crtc_disable,
+       .atomic_disable = ipu_crtc_atomic_disable,
        .enable = ipu_crtc_enable,
 };
 
@@ -410,8 +422,6 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
 {
        struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
 
-       imx_drm_remove_crtc(ipu_crtc->imx_crtc);
-
        ipu_put_resources(ipu_crtc);
        if (ipu_crtc->plane[1])
                ipu_plane_put_resources(ipu_crtc->plane[1]);
index 4ad67d015ec7fdcb32dc84b17ebb99eb185590d2..ce22d0a0ddc8116e8026571f7bd700f461f5ee08 100644 (file)
@@ -213,8 +213,12 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
                ipu_dp_enable_channel(ipu_plane->dp);
 }
 
-static void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static int ipu_disable_plane(struct drm_plane *plane)
 {
+       struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
        ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
 
        if (ipu_plane->dp)
@@ -223,15 +227,6 @@ static void ipu_plane_disable(struct ipu_plane *ipu_plane)
        ipu_dmfc_disable_channel(ipu_plane->dmfc);
        if (ipu_plane->dp)
                ipu_dp_disable(ipu_plane->ipu);
-}
-
-static int ipu_disable_plane(struct drm_plane *plane)
-{
-       struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-       ipu_plane_disable(ipu_plane);
 
        return 0;
 }
@@ -242,7 +237,6 @@ static void ipu_plane_destroy(struct drm_plane *plane)
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-       ipu_disable_plane(plane);
        drm_plane_cleanup(plane);
        kfree(ipu_plane);
 }
@@ -319,13 +313,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
 
        /*
-        * since we cannot touch active IDMAC channels, we do not support
-        * resizing the enabled plane or changing its format
+        * We support resizing active plane or changing its format by
+        * forcing CRTC mode change in plane's ->atomic_check callback
+        * and disabling all affected active planes in CRTC's ->atomic_disable
+        * callback.  The planes will be reenabled in plane's ->atomic_update
+        * callback.
         */
        if (old_fb && (state->src_w != old_state->src_w ||
                              state->src_h != old_state->src_h ||
                              fb->pixel_format != old_fb->pixel_format))
-               return -EINVAL;
+               crtc_state->mode_changed = true;
 
        eba = drm_plane_state_to_eba(state);
 
@@ -336,7 +333,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
 
        if (old_fb && fb->pitches[0] != old_fb->pitches[0])
-               return -EINVAL;
+               crtc_state->mode_changed = true;
 
        switch (fb->pixel_format) {
        case DRM_FORMAT_YUV420:
@@ -372,7 +369,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                        return -EINVAL;
 
                if (old_fb && old_fb->pitches[1] != fb->pitches[1])
-                       return -EINVAL;
+                       crtc_state->mode_changed = true;
        }
 
        return 0;
@@ -392,8 +389,12 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
        enum ipu_color_space ics;
 
        if (old_state->fb) {
-               ipu_plane_atomic_set_base(ipu_plane, old_state);
-               return;
+               struct drm_crtc_state *crtc_state = state->crtc->state;
+
+               if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
+                       ipu_plane_atomic_set_base(ipu_plane, old_state);
+                       return;
+               }
        }
 
        switch (ipu_plane->dp_flow) {
index 1dad297b01fd5a5723b5cba7dd92b352057c7d7a..d796ada2a47aba793ace3968454688ce134fcb3f 100644 (file)
@@ -33,6 +33,7 @@ struct imx_parallel_display {
        void *edid;
        int edid_len;
        u32 bus_format;
+       u32 bus_flags;
        struct drm_display_mode mode;
        struct drm_panel *panel;
        struct drm_bridge *bridge;
@@ -80,6 +81,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
                        return -EINVAL;
 
                ret = of_get_drm_display_mode(np, &imxpd->mode,
+                                             &imxpd->bus_flags,
                                              OF_USE_NATIVE_MODE);
                if (ret)
                        return ret;
@@ -125,11 +127,13 @@ static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
        struct drm_display_info *di = &conn_state->connector->display_info;
        struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
 
-       imx_crtc_state->bus_flags = di->bus_flags;
-       if (!imxpd->bus_format && di->num_bus_formats)
+       if (!imxpd->bus_format && di->num_bus_formats) {
+               imx_crtc_state->bus_flags = di->bus_flags;
                imx_crtc_state->bus_format = di->bus_formats[0];
-       else
+       } else {
+               imx_crtc_state->bus_flags = imxpd->bus_flags;
                imx_crtc_state->bus_format = imxpd->bus_format;
+       }
        imx_crtc_state->di_hsync_pin = 2;
        imx_crtc_state->di_vsync_pin = 3;
 
@@ -289,8 +293,10 @@ static void imx_pd_unbind(struct device *dev, struct device *master,
 {
        struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
 
-       imxpd->encoder.funcs->destroy(&imxpd->encoder);
-       imxpd->connector.funcs->destroy(&imxpd->connector);
+       if (imxpd->bridge)
+               drm_bridge_detach(imxpd->bridge);
+       if (imxpd->panel)
+               drm_panel_detach(imxpd->panel);
 
        kfree(imxpd->edid);
 }
index 23ac8041c562924120b6fdbe82eda351914e0e85..294de4549922a12f85ffe40d8cfd87b85694cd52 100644 (file)
@@ -2,6 +2,9 @@ config DRM_MEDIATEK
        tristate "DRM Support for Mediatek SoCs"
        depends on DRM
        depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
+       depends on COMMON_CLK
+       depends on HAVE_ARM_SMCCC
+       depends on OF
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
        select DRM_MIPI_DSI
index 8f62671fcfbf7b25751433ee581f8b91dc32a6ea..019b7ca392d7a49db0ffcb25ccb36f293f9d530e 100644 (file)
@@ -103,7 +103,8 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
 }
 
 static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
-                          unsigned int h, unsigned int vrefresh)
+                          unsigned int h, unsigned int vrefresh,
+                          unsigned int bpc)
 {
        if (w != 0 && h != 0)
                writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
index 5fb80cbe4c5b061e942eb844968844801bde1f1e..0df05f95b9163e5264f144f7ef355cec6a38a329 100644 (file)
@@ -106,7 +106,8 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
 }
 
 static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
-                           unsigned int height, unsigned int vrefresh)
+                           unsigned int height, unsigned int vrefresh,
+                           unsigned int bpc)
 {
        unsigned int threshold;
        unsigned int reg;
index 24aa3bad1e762a228616bc542dbb077f9ebe08c8..01a21dd835b57c865ba685f560aa204d19f7a636 100644 (file)
@@ -31,7 +31,7 @@
  * struct mtk_drm_crtc - MediaTek specific crtc structure.
  * @base: crtc object.
  * @enabled: records whether crtc_enable succeeded
- * @planes: array of 4 mtk_drm_plane structures, one for each overlay plane
+ * @planes: array of 4 drm_plane structures, one for each overlay plane
  * @pending_planes: whether any plane has pending changes to be applied
  * @config_regs: memory mapped mmsys configuration register space
  * @mutex: handle to one of the ten disp_mutex streams
@@ -45,7 +45,7 @@ struct mtk_drm_crtc {
        bool                            pending_needs_vblank;
        struct drm_pending_vblank_event *event;
 
-       struct mtk_drm_plane            planes[OVL_LAYER_NR];
+       struct drm_plane                planes[OVL_LAYER_NR];
        bool                            pending_planes;
 
        void __iomem                    *config_regs;
@@ -112,8 +112,7 @@ static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
        struct mtk_crtc_state *state;
 
        if (crtc->state) {
-               if (crtc->state->mode_blob)
-                       drm_property_unreference_blob(crtc->state->mode_blob);
+               __drm_atomic_helper_crtc_destroy_state(crtc->state);
 
                state = to_mtk_crtc_state(crtc->state);
                memset(state, 0, sizeof(*state));
@@ -222,7 +221,9 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 {
        struct drm_crtc *crtc = &mtk_crtc->base;
-       unsigned int width, height, vrefresh;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
        int ret;
        int i;
 
@@ -234,6 +235,19 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
        height = crtc->state->adjusted_mode.vdisplay;
        vrefresh = crtc->state->adjusted_mode.vrefresh;
 
+       drm_for_each_encoder(encoder, crtc->dev) {
+               if (encoder->crtc != crtc)
+                       continue;
+
+               drm_for_each_connector(connector, crtc->dev) {
+                       if (connector->encoder != encoder)
+                               continue;
+                       if (connector->display_info.bpc != 0 &&
+                           bpc > connector->display_info.bpc)
+                               bpc = connector->display_info.bpc;
+               }
+       }
+
        ret = pm_runtime_get_sync(crtc->dev->dev);
        if (ret < 0) {
                DRM_ERROR("Failed to enable power domain: %d\n", ret);
@@ -266,13 +280,13 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
 
-               mtk_ddp_comp_config(comp, width, height, vrefresh);
+               mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
                mtk_ddp_comp_start(comp);
        }
 
        /* Initially configure all planes */
        for (i = 0; i < OVL_LAYER_NR; i++) {
-               struct drm_plane *plane = &mtk_crtc->planes[i].base;
+               struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
                plane_state = to_mtk_plane_state(plane->state);
@@ -351,7 +365,7 @@ static void mtk_drm_crtc_disable(struct drm_crtc *crtc)
 
        /* Set all pending plane state to disabled */
        for (i = 0; i < OVL_LAYER_NR; i++) {
-               struct drm_plane *plane = &mtk_crtc->planes[i].base;
+               struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
                plane_state = to_mtk_plane_state(plane->state);
@@ -397,7 +411,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
        if (mtk_crtc->event)
                mtk_crtc->pending_needs_vblank = true;
        for (i = 0; i < OVL_LAYER_NR; i++) {
-               struct drm_plane *plane = &mtk_crtc->planes[i].base;
+               struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
                plane_state = to_mtk_plane_state(plane->state);
@@ -409,6 +423,9 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
        }
        if (pending_planes)
                mtk_crtc->pending_planes = true;
+       if (crtc->state->color_mgmt_changed)
+               for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+                       mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
 }
 
 static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -418,6 +435,7 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
        .reset                  = mtk_drm_crtc_reset,
        .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
        .atomic_destroy_state   = mtk_drm_crtc_destroy_state,
+       .gamma_set              = drm_atomic_helper_legacy_gamma_set,
 };
 
 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
@@ -464,14 +482,14 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
        if (state->pending_config) {
                mtk_ddp_comp_config(ovl, state->pending_width,
                                    state->pending_height,
-                                   state->pending_vrefresh);
+                                   state->pending_vrefresh, 0);
 
                state->pending_config = false;
        }
 
        if (mtk_crtc->pending_planes) {
                for (i = 0; i < OVL_LAYER_NR; i++) {
-                       struct drm_plane *plane = &mtk_crtc->planes[i].base;
+                       struct drm_plane *plane = &mtk_crtc->planes[i];
                        struct mtk_plane_state *plane_state;
 
                        plane_state = to_mtk_plane_state(plane->state);
@@ -559,16 +577,17 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
                                (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
                                                DRM_PLANE_TYPE_OVERLAY;
                ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
-                                    BIT(pipe), type, zpos);
+                                    BIT(pipe), type);
                if (ret)
                        goto unprepare;
        }
 
-       ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0].base,
-                               &mtk_crtc->planes[1].base, pipe);
+       ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
+                               &mtk_crtc->planes[1], pipe);
        if (ret < 0)
                goto unprepare;
-
+       drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
+       drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
        priv->crtc[pipe] = &mtk_crtc->base;
        priv->num_pipes++;
 
index 81e5566ec82f69fc2be9016f24c9139f3a490ab3..a1550fa3c9d2c119f9367eb546084750c4f20cb9 100644 (file)
 #include "mtk_drm_plane.h"
 
 #define OVL_LAYER_NR   4
+#define MTK_LUT_SIZE   512
+#define MTK_MAX_BPC    10
+#define MTK_MIN_BPC    3
 
 int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe);
 void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe);
-void mtk_drm_crtc_check_flush(struct drm_crtc *crtc);
 void mtk_drm_crtc_commit(struct drm_crtc *crtc);
 void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
 int mtk_drm_crtc_create(struct drm_device *drm_dev,
index 3970fcf0f05f7419479b53e59c4d03bfeef17852..df33b3ca6ffd5b2038e3e01d81ada28ffd33462e 100644 (file)
 #include "mtk_drm_drv.h"
 #include "mtk_drm_plane.h"
 #include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_crtc.h"
 
 #define DISP_OD_EN                             0x0000
 #define DISP_OD_INTEN                          0x0008
 #define DISP_OD_INTSTA                         0x000c
 #define DISP_OD_CFG                            0x0020
 #define DISP_OD_SIZE                           0x0030
+#define DISP_DITHER_5                          0x0114
+#define DISP_DITHER_7                          0x011c
+#define DISP_DITHER_15                         0x013c
+#define DISP_DITHER_16                         0x0140
 
 #define DISP_REG_UFO_START                     0x0000
 
 #define DISP_COLOR_WIDTH                       0x0c50
 #define DISP_COLOR_HEIGHT                      0x0c54
 
-#define        OD_RELAY_MODE           BIT(0)
+#define DISP_AAL_EN                            0x0000
+#define DISP_AAL_SIZE                          0x0030
 
-#define        UFO_BYPASS              BIT(2)
+#define DISP_GAMMA_EN                          0x0000
+#define DISP_GAMMA_CFG                         0x0020
+#define DISP_GAMMA_SIZE                                0x0030
+#define DISP_GAMMA_LUT                         0x0700
 
-#define        COLOR_BYPASS_ALL        BIT(7)
-#define        COLOR_SEQ_SEL           BIT(13)
+#define LUT_10BIT_MASK                         0x03ff
+
+#define COLOR_BYPASS_ALL                       BIT(7)
+#define COLOR_SEQ_SEL                          BIT(13)
+
+#define OD_RELAYMODE                           BIT(0)
+
+#define UFO_BYPASS                             BIT(2)
+
+#define AAL_EN                                 BIT(0)
+
+#define GAMMA_EN                               BIT(0)
+#define GAMMA_LUT_EN                           BIT(1)
+
+#define DISP_DITHERING                         BIT(2)
+#define DITHER_LSB_ERR_SHIFT_R(x)              (((x) & 0x7) << 28)
+#define DITHER_OVFLW_BIT_R(x)                  (((x) & 0x7) << 24)
+#define DITHER_ADD_LSHIFT_R(x)                 (((x) & 0x7) << 20)
+#define DITHER_ADD_RSHIFT_R(x)                 (((x) & 0x7) << 16)
+#define DITHER_NEW_BIT_MODE                    BIT(0)
+#define DITHER_LSB_ERR_SHIFT_B(x)              (((x) & 0x7) << 28)
+#define DITHER_OVFLW_BIT_B(x)                  (((x) & 0x7) << 24)
+#define DITHER_ADD_LSHIFT_B(x)                 (((x) & 0x7) << 20)
+#define DITHER_ADD_RSHIFT_B(x)                 (((x) & 0x7) << 16)
+#define DITHER_LSB_ERR_SHIFT_G(x)              (((x) & 0x7) << 12)
+#define DITHER_OVFLW_BIT_G(x)                  (((x) & 0x7) << 8)
+#define DITHER_ADD_LSHIFT_G(x)                 (((x) & 0x7) << 4)
+#define DITHER_ADD_RSHIFT_G(x)                 (((x) & 0x7) << 0)
+
+void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
+                   unsigned int CFG)
+{
+       /* If bpc equal to 0, the dithering function didn't be enabled */
+       if (bpc == 0)
+               return;
+
+       if (bpc >= MTK_MIN_BPC) {
+               writel(0, comp->regs + DISP_DITHER_5);
+               writel(0, comp->regs + DISP_DITHER_7);
+               writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
+                      DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
+                      DITHER_NEW_BIT_MODE,
+                      comp->regs + DISP_DITHER_15);
+               writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
+                      DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
+                      DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
+                      DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
+                      comp->regs + DISP_DITHER_16);
+               writel(DISP_DITHERING, comp->regs + CFG);
+       }
+}
 
 static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
-                            unsigned int h, unsigned int vrefresh)
+                            unsigned int h, unsigned int vrefresh,
+                            unsigned int bpc)
 {
        writel(w, comp->regs + DISP_COLOR_WIDTH);
        writel(h, comp->regs + DISP_COLOR_HEIGHT);
@@ -60,14 +119,16 @@ static void mtk_color_start(struct mtk_ddp_comp *comp)
 }
 
 static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
-                         unsigned int h, unsigned int vrefresh)
+                         unsigned int h, unsigned int vrefresh,
+                         unsigned int bpc)
 {
        writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
+       writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE);
+       mtk_dither_set(comp, bpc, DISP_OD_CFG);
 }
 
 static void mtk_od_start(struct mtk_ddp_comp *comp)
 {
-       writel(OD_RELAY_MODE, comp->regs + DISP_OD_CFG);
        writel(1, comp->regs + DISP_OD_EN);
 }
 
@@ -76,6 +137,78 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
        writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START);
 }
 
+static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
+                          unsigned int h, unsigned int vrefresh,
+                          unsigned int bpc)
+{
+       writel(h << 16 | w, comp->regs + DISP_AAL_SIZE);
+}
+
+static void mtk_aal_start(struct mtk_ddp_comp *comp)
+{
+       writel(AAL_EN, comp->regs + DISP_AAL_EN);
+}
+
+static void mtk_aal_stop(struct mtk_ddp_comp *comp)
+{
+       writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
+}
+
+static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
+                            unsigned int h, unsigned int vrefresh,
+                            unsigned int bpc)
+{
+       writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE);
+       mtk_dither_set(comp, bpc, DISP_GAMMA_CFG);
+}
+
+static void mtk_gamma_start(struct mtk_ddp_comp *comp)
+{
+       writel(GAMMA_EN, comp->regs  + DISP_GAMMA_EN);
+}
+
+static void mtk_gamma_stop(struct mtk_ddp_comp *comp)
+{
+       writel_relaxed(0x0, comp->regs  + DISP_GAMMA_EN);
+}
+
+static void mtk_gamma_set(struct mtk_ddp_comp *comp,
+                         struct drm_crtc_state *state)
+{
+       unsigned int i, reg;
+       struct drm_color_lut *lut;
+       void __iomem *lut_base;
+       u32 word;
+
+       if (state->gamma_lut) {
+               reg = readl(comp->regs + DISP_GAMMA_CFG);
+               reg = reg | GAMMA_LUT_EN;
+               writel(reg, comp->regs + DISP_GAMMA_CFG);
+               lut_base = comp->regs + DISP_GAMMA_LUT;
+               lut = (struct drm_color_lut *)state->gamma_lut->data;
+               for (i = 0; i < MTK_LUT_SIZE; i++) {
+                       word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
+                               (((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
+                               ((lut[i].blue >> 6) & LUT_10BIT_MASK);
+                       writel(word, (lut_base + i * 4));
+               }
+       }
+}
+
+static const struct mtk_ddp_comp_funcs ddp_aal = {
+       .gamma_set = mtk_gamma_set,
+       .config = mtk_aal_config,
+       .start = mtk_aal_start,
+       .stop = mtk_aal_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_gamma = {
+       .gamma_set = mtk_gamma_set,
+       .config = mtk_gamma_config,
+       .start = mtk_gamma_start,
+       .stop = mtk_gamma_stop,
+};
+
 static const struct mtk_ddp_comp_funcs ddp_color = {
        .config = mtk_color_config,
        .start = mtk_color_start,
@@ -112,13 +245,13 @@ struct mtk_ddp_comp_match {
 };
 
 static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
-       [DDP_COMPONENT_AAL]     = { MTK_DISP_AAL,       0, NULL },
+       [DDP_COMPONENT_AAL]     = { MTK_DISP_AAL,       0, &ddp_aal },
        [DDP_COMPONENT_COLOR0]  = { MTK_DISP_COLOR,     0, &ddp_color },
        [DDP_COMPONENT_COLOR1]  = { MTK_DISP_COLOR,     1, &ddp_color },
        [DDP_COMPONENT_DPI0]    = { MTK_DPI,            0, NULL },
        [DDP_COMPONENT_DSI0]    = { MTK_DSI,            0, NULL },
        [DDP_COMPONENT_DSI1]    = { MTK_DSI,            1, NULL },
-       [DDP_COMPONENT_GAMMA]   = { MTK_DISP_GAMMA,     0, NULL },
+       [DDP_COMPONENT_GAMMA]   = { MTK_DISP_GAMMA,     0, &ddp_gamma },
        [DDP_COMPONENT_OD]      = { MTK_DISP_OD,        0, &ddp_od },
        [DDP_COMPONENT_OVL0]    = { MTK_DISP_OVL,       0, NULL },
        [DDP_COMPONENT_OVL1]    = { MTK_DISP_OVL,       1, NULL },
index 6b13ba97094deb8f5edcab446fa277003329eed7..22a33ee451c4d96ef3f409c08c9f215a75ece97e 100644 (file)
@@ -21,6 +21,7 @@ struct device_node;
 struct drm_crtc;
 struct drm_device;
 struct mtk_plane_state;
+struct drm_crtc_state;
 
 enum mtk_ddp_comp_type {
        MTK_DISP_OVL,
@@ -64,7 +65,7 @@ struct mtk_ddp_comp;
 
 struct mtk_ddp_comp_funcs {
        void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
-                      unsigned int h, unsigned int vrefresh);
+                      unsigned int h, unsigned int vrefresh, unsigned int bpc);
        void (*start)(struct mtk_ddp_comp *comp);
        void (*stop)(struct mtk_ddp_comp *comp);
        void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
@@ -73,6 +74,8 @@ struct mtk_ddp_comp_funcs {
        void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
        void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
                             struct mtk_plane_state *state);
+       void (*gamma_set)(struct mtk_ddp_comp *comp,
+                         struct drm_crtc_state *state);
 };
 
 struct mtk_ddp_comp {
@@ -86,10 +89,10 @@ struct mtk_ddp_comp {
 
 static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
                                       unsigned int w, unsigned int h,
-                                      unsigned int vrefresh)
+                                      unsigned int vrefresh, unsigned int bpc)
 {
        if (comp->funcs && comp->funcs->config)
-               comp->funcs->config(comp, w, h, vrefresh);
+               comp->funcs->config(comp, w, h, vrefresh, bpc);
 }
 
 static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
@@ -139,6 +142,13 @@ static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
                comp->funcs->layer_config(comp, idx, state);
 }
 
+static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
+                                    struct drm_crtc_state *state)
+{
+       if (comp->funcs && comp->funcs->gamma_set)
+               comp->funcs->gamma_set(comp, state);
+}
+
 int mtk_ddp_comp_get_id(struct device_node *node,
                        enum mtk_ddp_comp_type comp_type);
 int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
@@ -146,5 +156,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
                      const struct mtk_ddp_comp_funcs *funcs);
 int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
 void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
+void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
+                   unsigned int CFG);
 
 #endif /* MTK_DRM_DDP_COMP_H */
index eebb7d881c2ba88b9b50a309a6c37b7087feb77d..cf83f6507ec8211566f87cbf4878e5d76c3eff0b 100644 (file)
@@ -61,10 +61,27 @@ static void mtk_atomic_complete(struct mtk_drm_private *private,
 
        mtk_atomic_wait_for_fences(state);
 
+       /*
+        * Mediatek drm supports runtime PM, so plane registers cannot be
+        * written when their crtc is disabled.
+        *
+        * The comment for drm_atomic_helper_commit states:
+        *     For drivers supporting runtime PM the recommended sequence is
+        *
+        *     drm_atomic_helper_commit_modeset_disables(dev, state);
+        *     drm_atomic_helper_commit_modeset_enables(dev, state);
+        *     drm_atomic_helper_commit_planes(dev, state,
+        *                                     DRM_PLANE_COMMIT_ACTIVE_ONLY);
+        *
+        * See the kerneldoc entries for these three functions for more details.
+        */
        drm_atomic_helper_commit_modeset_disables(drm, state);
-       drm_atomic_helper_commit_planes(drm, state, false);
        drm_atomic_helper_commit_modeset_enables(drm, state);
+       drm_atomic_helper_commit_planes(drm, state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
        drm_atomic_helper_wait_for_vblanks(drm, state);
+
        drm_atomic_helper_cleanup_planes(drm, state);
        drm_atomic_state_free(state);
 }
@@ -277,8 +294,8 @@ static int mtk_drm_bind(struct device *dev)
        int ret;
 
        drm = drm_dev_alloc(&mtk_drm_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        drm->dev_private = private;
        private->drm = drm;
index 3995765a90dcd868d124b3248f6fa28cd4554afc..c461a232cbf5b0f5ebf589481f4391e7f27f24e0 100644 (file)
@@ -30,57 +30,12 @@ static const u32 formats[] = {
        DRM_FORMAT_RGB565,
 };
 
-static void mtk_plane_enable(struct mtk_drm_plane *mtk_plane, bool enable,
-                            dma_addr_t addr, struct drm_rect *dest)
-{
-       struct drm_plane *plane = &mtk_plane->base;
-       struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-       unsigned int pitch, format;
-       int x, y;
-
-       if (WARN_ON(!plane->state || (enable && !plane->state->fb)))
-               return;
-
-       if (plane->state->fb) {
-               pitch = plane->state->fb->pitches[0];
-               format = plane->state->fb->pixel_format;
-       } else {
-               pitch = 0;
-               format = DRM_FORMAT_RGBA8888;
-       }
-
-       x = plane->state->crtc_x;
-       y = plane->state->crtc_y;
-
-       if (x < 0) {
-               addr -= x * 4;
-               x = 0;
-       }
-
-       if (y < 0) {
-               addr -= y * pitch;
-               y = 0;
-       }
-
-       state->pending.enable = enable;
-       state->pending.pitch = pitch;
-       state->pending.format = format;
-       state->pending.addr = addr;
-       state->pending.x = x;
-       state->pending.y = y;
-       state->pending.width = dest->x2 - dest->x1;
-       state->pending.height = dest->y2 - dest->y1;
-       wmb(); /* Make sure the above parameters are set before update */
-       state->pending.dirty = true;
-}
-
 static void mtk_plane_reset(struct drm_plane *plane)
 {
        struct mtk_plane_state *state;
 
        if (plane->state) {
-               if (plane->state->fb)
-                       drm_framebuffer_unreference(plane->state->fb);
+               __drm_atomic_helper_plane_destroy_state(plane->state);
 
                state = to_mtk_plane_state(plane->state);
                memset(state, 0, sizeof(*state));
@@ -134,20 +89,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
 {
        struct drm_framebuffer *fb = state->fb;
        struct drm_crtc_state *crtc_state;
-       bool visible;
-       struct drm_rect dest = {
-               .x1 = state->crtc_x,
-               .y1 = state->crtc_y,
-               .x2 = state->crtc_x + state->crtc_w,
-               .y2 = state->crtc_y + state->crtc_h,
-       };
-       struct drm_rect src = {
-               /* 16.16 fixed point */
-               .x1 = state->src_x,
-               .y1 = state->src_y,
-               .x2 = state->src_x + state->src_w,
-               .y2 = state->src_y + state->src_h,
-       };
        struct drm_rect clip = { 0, };
 
        if (!fb)
@@ -168,40 +109,45 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
        clip.x2 = crtc_state->mode.hdisplay;
        clip.y2 = crtc_state->mode.vdisplay;
 
-       return drm_plane_helper_check_update(plane, state->crtc, fb,
-                                            &src, &dest, &clip,
-                                            state->rotation,
-                                            DRM_PLANE_HELPER_NO_SCALING,
-                                            DRM_PLANE_HELPER_NO_SCALING,
-                                            true, true, &visible);
+       return drm_plane_helper_check_state(state, &clip,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           true, true);
 }
 
 static void mtk_plane_atomic_update(struct drm_plane *plane,
                                    struct drm_plane_state *old_state)
 {
        struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-       struct drm_crtc *crtc = state->base.crtc;
+       struct drm_crtc *crtc = plane->state->crtc;
+       struct drm_framebuffer *fb = plane->state->fb;
        struct drm_gem_object *gem;
        struct mtk_drm_gem_obj *mtk_gem;
-       struct mtk_drm_plane *mtk_plane = to_mtk_plane(plane);
-       struct drm_rect dest = {
-               .x1 = state->base.crtc_x,
-               .y1 = state->base.crtc_y,
-               .x2 = state->base.crtc_x + state->base.crtc_w,
-               .y2 = state->base.crtc_y + state->base.crtc_h,
-       };
-       struct drm_rect clip = { 0, };
+       unsigned int pitch, format;
+       dma_addr_t addr;
 
-       if (!crtc)
+       if (!crtc || WARN_ON(!fb))
                return;
 
-       clip.x2 = state->base.crtc->state->mode.hdisplay;
-       clip.y2 = state->base.crtc->state->mode.vdisplay;
-       drm_rect_intersect(&dest, &clip);
-
-       gem = mtk_fb_get_gem_obj(state->base.fb);
+       gem = mtk_fb_get_gem_obj(fb);
        mtk_gem = to_mtk_gem_obj(gem);
-       mtk_plane_enable(mtk_plane, true, mtk_gem->dma_addr, &dest);
+       addr = mtk_gem->dma_addr;
+       pitch = fb->pitches[0];
+       format = fb->pixel_format;
+
+       addr += (plane->state->src.x1 >> 16) * drm_format_plane_cpp(format, 0);
+       addr += (plane->state->src.y1 >> 16) * pitch;
+
+       state->pending.enable = true;
+       state->pending.pitch = pitch;
+       state->pending.format = format;
+       state->pending.addr = addr;
+       state->pending.x = plane->state->dst.x1;
+       state->pending.y = plane->state->dst.y1;
+       state->pending.width = drm_rect_width(&plane->state->dst);
+       state->pending.height = drm_rect_height(&plane->state->dst);
+       wmb(); /* Make sure the above parameters are set before update */
+       state->pending.dirty = true;
 }
 
 static void mtk_plane_atomic_disable(struct drm_plane *plane,
@@ -220,13 +166,12 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
        .atomic_disable = mtk_plane_atomic_disable,
 };
 
-int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
-                  unsigned long possible_crtcs, enum drm_plane_type type,
-                  unsigned int zpos)
+int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+                  unsigned long possible_crtcs, enum drm_plane_type type)
 {
        int err;
 
-       err = drm_universal_plane_init(dev, &mtk_plane->base, possible_crtcs,
+       err = drm_universal_plane_init(dev, plane, possible_crtcs,
                                       &mtk_plane_funcs, formats,
                                       ARRAY_SIZE(formats), type, NULL);
        if (err) {
@@ -234,8 +179,7 @@ int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
                return err;
        }
 
-       drm_plane_helper_add(&mtk_plane->base, &mtk_plane_helper_funcs);
-       mtk_plane->idx = zpos;
+       drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
 
        return 0;
 }
index 72a7b3e4c126f8833c29e17775746811c9251533..6a20b49e0f2fec768f4e40381809488655d4bd06 100644 (file)
 #include <drm/drm_crtc.h>
 #include <linux/types.h>
 
-struct mtk_drm_plane {
-       struct drm_plane                base;
-       unsigned int                    idx;
-};
-
 struct mtk_plane_pending_state {
        bool                            config;
        bool                            enable;
@@ -41,19 +36,13 @@ struct mtk_plane_state {
        struct mtk_plane_pending_state  pending;
 };
 
-static inline struct mtk_drm_plane *to_mtk_plane(struct drm_plane *plane)
-{
-       return container_of(plane, struct mtk_drm_plane, base);
-}
-
 static inline struct mtk_plane_state *
 to_mtk_plane_state(struct drm_plane_state *state)
 {
        return container_of(state, struct mtk_plane_state, base);
 }
 
-int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
-                  unsigned long possible_crtcs, enum drm_plane_type type,
-                  unsigned int zpos);
+int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+                  unsigned long possible_crtcs, enum drm_plane_type type);
 
 #endif
index 5e2f131a6a72943f4f911c7428d507adae735bfa..25b2a1a424e66e4832ba8f3605f661b013bab783 100644 (file)
@@ -58,7 +58,7 @@ static const struct file_operations mga_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_PCI_DMA |
+           DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_LEGACY |
            DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
        .dev_priv_size = sizeof(drm_mga_buf_priv_t),
        .load = mga_driver_load,
index 2b4b125eebc3ef9e5987d3bcc32d429f5b15ff8e..1443b3a34775cbcaca50532482cfa32fd3052671 100644 (file)
@@ -56,7 +56,7 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
 #ifdef CONFIG_X86
        primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
-       remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
        kfree(ap);
 }
 
index d9b04b008feba8487b2765d1f1a5aaadeab7ff97..88dd2214114dcd58d431b6a166ad83fcee5df287 100644 (file)
@@ -15,8 +15,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
-#include <linux/fb.h>
-
 #include "mgag200_drv.h"
 
 static void mga_dirty_update(struct mga_fbdev *mfbdev,
@@ -185,8 +183,10 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
        }
 
        sysram = vmalloc(size);
-       if (!sysram)
+       if (!sysram) {
+               ret = -ENOMEM;
                goto err_sysram;
+       }
 
        info = drm_fb_helper_alloc_fbi(helper);
        if (IS_ERR(info)) {
index 13798b3e6bebbf0b8a62b39c60b9494659d5264d..e79cbc25ae3c54048b21c6293ea340b91b0750fc 100644 (file)
@@ -135,7 +135,7 @@ static int mga_vram_init(struct mga_device *mdev)
        aper->ranges[0].base = mdev->mc.vram_base;
        aper->ranges[0].size = mdev->mc.vram_window;
 
-       remove_conflicting_framebuffers(aper, "mgafb", true);
+       drm_fb_helper_remove_conflicting_framebuffers(aper, "mgafb", true);
        kfree(aper);
 
        if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
index 68268e55d595c343eed84053cb5bc19b4918f529..919b35f2ad2487443c97dc3af28916d67ba6c0fb 100644 (file)
@@ -150,7 +150,8 @@ static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *f
 {
        struct mgag200_bo *mgabo = mgag200_bo(bo);
 
-       return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&mgabo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
index 7c7a0314a7563f46a62b1ef4f156aba7615ebc03..d96b2b6898a3739b220633b4a711955f2a385e76 100644 (file)
@@ -11,6 +11,7 @@ config DRM_MSM
        select TMPFS
        select QCOM_SCM
        select SND_SOC_HDMI_CODEC if SND_SOC
+       select SYNC_FILE
        default y
        help
          DRM/KMS driver for MSM/snapdragon.
index 973720792236fe4dfd68327137dbc2202501058e..a968cad509c2368b0699b184d1af1e58516dc84e 100644 (file)
@@ -422,11 +422,28 @@ static const struct {
 
 static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
 {
-       int gpio = of_get_named_gpio(of_node, name, 0);
+       int gpio;
+
+       /* try with the gpio names as in the table (downstream bindings) */
+       gpio = of_get_named_gpio(of_node, name, 0);
        if (gpio < 0) {
                char name2[32];
-               snprintf(name2, sizeof(name2), "%s-gpio", name);
+
+               /* try with the gpio names as in the upstream bindings */
+               snprintf(name2, sizeof(name2), "%s-gpios", name);
                gpio = of_get_named_gpio(of_node, name2, 0);
+               if (gpio < 0) {
+                       char name3[32];
+
+                       /*
+                        * try again after stripping out the "qcom,hdmi-tx"
+                        * prefix. This is mainly to match "hpd-gpios" used
+                        * in the upstream bindings
+                        */
+                       if (sscanf(name2, "qcom,hdmi-tx-%s", name3))
+                               gpio = of_get_named_gpio(of_node, name3, 0);
+               }
+
                if (gpio < 0) {
                        DBG("failed to get gpio: %s (%d)", name, gpio);
                        gpio = -1;
index de9007e72f4e852960be7e078af599792edec6b7..73e20219d431a78c4ec9d3f335c430efbf2c49af 100644 (file)
@@ -243,7 +243,6 @@ void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c)
 
 struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
 {
-       struct drm_device *dev = hdmi->dev;
        struct hdmi_i2c_adapter *hdmi_i2c;
        struct i2c_adapter *i2c = NULL;
        int ret;
@@ -267,10 +266,8 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
        i2c->algo = &msm_hdmi_i2c_algorithm;
 
        ret = i2c_add_adapter(i2c);
-       if (ret) {
-               dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
+       if (ret)
                goto fail;
-       }
 
        return i2c;
 
index 7b39e89fbc2b488adcfc90745732539e57669dad..571a91ee9607df6705eb3629d3b156a23e570cc5 100644 (file)
@@ -228,18 +228,21 @@ static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
        struct device_node *endpoint, *panel_node;
        struct device_node *np = dev->dev->of_node;
 
-       endpoint = of_graph_get_next_endpoint(np, NULL);
+       /*
+        * LVDS/LCDC is the first port described in the list of ports in the
+        * MDP4 DT node.
+        */
+       endpoint = of_graph_get_endpoint_by_regs(np, 0, -1);
        if (!endpoint) {
-               DBG("no endpoint in MDP4 to fetch LVDS panel\n");
+               DBG("no LVDS remote endpoint\n");
                return NULL;
        }
 
-       /* don't proceed if we have an endpoint but no panel_node tied to it */
        panel_node = of_graph_get_remote_port_parent(endpoint);
        if (!panel_node) {
-               dev_err(dev->dev, "no valid panel node\n");
+               DBG("no valid panel node in LVDS endpoint\n");
                of_node_put(endpoint);
-               return ERR_PTR(-ENODEV);
+               return NULL;
        }
 
        of_node_put(endpoint);
@@ -262,14 +265,12 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
        switch (intf_type) {
        case DRM_MODE_ENCODER_LVDS:
                /*
-                * bail out early if:
-                * - there is no panel node (no need to initialize lcdc
-                *   encoder and lvds connector), or
-                * - panel node is a bad pointer
+                * bail out early if there is no panel node (no need to
+                * initialize LCDC encoder and LVDS connector)
                 */
                panel_node = mdp4_detect_lcdc_panel(dev);
-               if (IS_ERR_OR_NULL(panel_node))
-                       return PTR_ERR(panel_node);
+               if (!panel_node)
+                       return 0;
 
                encoder = mdp4_lcdc_encoder_init(dev, panel_node);
                if (IS_ERR(encoder)) {
index bc3d8e719c6c2230ca47c8148e798bd67ecb2057..a06b064f86c1974707e67cfa475bda12cad26dcb 100644 (file)
@@ -93,7 +93,7 @@ static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
 };
 
 /* this should probably be a helper: */
-struct drm_connector *get_connector(struct drm_encoder *encoder)
+static struct drm_connector *get_connector(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_connector *connector;
index 9f96dfe67769938aedc3a2b561642cb47b7673b2..3903dbcda763a4f63914bd7c765f41b2c63b73fb 100644 (file)
@@ -81,7 +81,7 @@ static void mdp4_plane_install_properties(struct drm_plane *plane,
        // XXX
 }
 
-int mdp4_plane_set_property(struct drm_plane *plane,
+static int mdp4_plane_set_property(struct drm_plane *plane,
                struct drm_property *property, uint64_t val)
 {
        // XXX
@@ -99,7 +99,7 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
 };
 
 static int mdp4_plane_prepare_fb(struct drm_plane *plane,
-               const struct drm_plane_state *new_state)
+                                struct drm_plane_state *new_state)
 {
        struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
        struct mdp4_kms *mdp4_kms = get_kms(plane);
@@ -113,7 +113,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
 }
 
 static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
-               const struct drm_plane_state *old_state)
+                                 struct drm_plane_state *old_state)
 {
        struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
        struct mdp4_kms *mdp4_kms = get_kms(plane);
index 432c09836b0e2f02d6e7e64d9980a0f522f5b8a5..951c002b05df2701977eb3f8570aa004f1cddd28 100644 (file)
@@ -78,12 +78,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev,
        if (!dev->mode_config.rotation_property)
                dev->mode_config.rotation_property =
                        drm_mode_create_rotation_property(dev,
-                       BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+                               DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
 
        if (dev->mode_config.rotation_property)
                drm_object_attach_property(&plane->base,
                        dev->mode_config.rotation_property,
-                       0);
+                       DRM_ROTATE_0);
 }
 
 /* helper to install properties which are common to planes and crtcs */
@@ -250,7 +250,7 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
 };
 
 static int mdp5_plane_prepare_fb(struct drm_plane *plane,
-               const struct drm_plane_state *new_state)
+                                struct drm_plane_state *new_state)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
        struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -264,7 +264,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
-               const struct drm_plane_state *old_state)
+                                 struct drm_plane_state *old_state)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
        struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -309,8 +309,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
                        return -EINVAL;
                }
 
-               hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
-               vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
+               hflip = !!(state->rotation & DRM_REFLECT_X);
+               vflip = !!(state->rotation & DRM_REFLECT_Y);
                if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
                        (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
                        dev_err(plane->dev->dev,
@@ -743,8 +743,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
        config |= get_scale_config(format, src_h, crtc_h, false);
        DBG("scale config = %x", config);
 
-       hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
-       vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
+       hflip = !!(pstate->rotation & DRM_REFLECT_X);
+       vflip = !!(pstate->rotation & DRM_REFLECT_Y);
 
        spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
 
index 4a8a6f1f1151a7a824ce0da479e8cd3cb956e9a6..73bae382eac3acad17460a782e6422ec992a7963 100644 (file)
@@ -112,13 +112,13 @@ static void complete_commit(struct msm_commit *c, bool async)
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_kms *kms = priv->kms;
 
-       drm_atomic_helper_wait_for_fences(dev, state);
+       drm_atomic_helper_wait_for_fences(dev, state, false);
 
        kms->funcs->prepare_commit(kms, state);
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
-       drm_atomic_helper_commit_planes(dev, state, false);
+       drm_atomic_helper_commit_planes(dev, state, 0);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
index 8a0237008f742a95d05c08c29139cfe81f535797..fb5c0b0a7594adcb0f38858cce0fc87d786f4eaa 100644 (file)
  * MSM driver version:
  * - 1.0.0 - initial interface
  * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
+ * - 1.2.0 - adds explicit fence support for submit ioctl
  */
 #define MSM_VERSION_MAJOR      1
-#define MSM_VERSION_MINOR      1
+#define MSM_VERSION_MINOR      2
 #define MSM_VERSION_PATCHLEVEL 0
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -347,9 +348,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        int ret;
 
        ddev = drm_dev_alloc(drv, dev);
-       if (!ddev) {
+       if (IS_ERR(ddev)) {
                dev_err(dev, "failed to allocate drm_device\n");
-               return -ENOMEM;
+               return PTR_ERR(ddev);
        }
 
        platform_set_drvdata(pdev, ddev);
index b4bc7f1ef717f19d4708108bc83eb40c864fdccc..d0da52f2a806a2d3b9b46f957a996fa2da0e2133 100644 (file)
@@ -157,6 +157,12 @@ struct msm_drm_private {
        struct shrinker shrinker;
 
        struct msm_vblank_ctrl vblank_ctrl;
+
+       /* task holding struct_mutex.. currently only used in submit path
+        * to detect and reject faults from copy_from_user() for submit
+        * ioctl.
+        */
+       struct task_struct *struct_mutex_task;
 };
 
 struct msm_format {
index 6cd4af443139cbdf858a0ba24baeddb01292b785..b6ac27e3192964cbf2fa1f5a4c139ab282deaa71 100644 (file)
@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_device *dev = obj->dev;
+       struct msm_drm_private *priv = dev->dev_private;
        struct page **pages;
        unsigned long pfn;
        pgoff_t pgoff;
        int ret;
 
+       /* This should only happen if userspace tries to pass a mmap'd
+        * but unfaulted gem bo vaddr into submit ioctl, triggering
+        * a page fault while struct_mutex is already held.  This is
+        * not a valid use-case so just bail.
+        */
+       if (priv->struct_mutex_task == current)
+               return VM_FAULT_SIGBUS;
+
        /* Make sure we don't parallel update on a fault, nor move or remove
         * something from beneath our feet
         */
@@ -584,18 +593,16 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        bool write = !!(op & MSM_PREP_WRITE);
-
-       if (op & MSM_PREP_NOSYNC) {
-               if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
-                       return -EBUSY;
-       } else {
-               int ret;
-
-               ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
-                               true, timeout_to_jiffies(timeout));
-               if (ret <= 0)
-                       return ret == 0 ? -ETIMEDOUT : ret;
-       }
+       unsigned long remain =
+               op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
+       long ret;
+
+       ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+                                                 true,  remain);
+       if (ret == 0)
+               return remain == 0 ? -EBUSY : -ETIMEDOUT;
+       else if (ret < 0)
+               return ret;
 
        /* TODO cache maintenance */
 
index 9766f9ae4b7d9a2593550bf7d013f988b61de46d..b6a0f37a65f30cad9f85d5773fca802e9ea3bb6b 100644 (file)
@@ -15,6 +15,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/sync_file.h>
+
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_gem.h"
@@ -64,6 +66,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
        kfree(submit);
 }
 
+static inline unsigned long __must_check
+copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+       if (access_ok(VERIFY_READ, from, n))
+               return __copy_from_user_inatomic(to, from, n);
+       return -EFAULT;
+}
+
 static int submit_lookup_objects(struct msm_gem_submit *submit,
                struct drm_msm_gem_submit *args, struct drm_file *file)
 {
@@ -71,6 +81,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
        int ret = 0;
 
        spin_lock(&file->table_lock);
+       pagefault_disable();
 
        for (i = 0; i < args->nr_bos; i++) {
                struct drm_msm_gem_submit_bo submit_bo;
@@ -84,10 +95,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                 */
                submit->bos[i].flags = 0;
 
-               ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
-               if (ret) {
-                       ret = -EFAULT;
-                       goto out_unlock;
+               ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
+               if (unlikely(ret)) {
+                       pagefault_enable();
+                       spin_unlock(&file->table_lock);
+                       ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+                       if (ret)
+                               goto out;
+                       spin_lock(&file->table_lock);
+                       pagefault_disable();
                }
 
                if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -127,9 +143,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
        }
 
 out_unlock:
-       submit->nr_bos = i;
+       pagefault_enable();
        spin_unlock(&file->table_lock);
 
+out:
+       submit->nr_bos = i;
+
        return ret;
 }
 
@@ -361,6 +380,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        struct msm_file_private *ctx = file->driver_priv;
        struct msm_gem_submit *submit;
        struct msm_gpu *gpu = priv->gpu;
+       struct fence *in_fence = NULL;
+       struct sync_file *sync_file = NULL;
+       int out_fence_fd = -1;
        unsigned i;
        int ret;
 
@@ -370,13 +392,25 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        /* for now, we just have 3d pipe.. eventually this would need to
         * be more clever to dispatch to appropriate gpu module:
         */
-       if (args->pipe != MSM_PIPE_3D0)
+       if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
+               return -EINVAL;
+
+       if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
                return -EINVAL;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
 
+       if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+               out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+               if (out_fence_fd < 0) {
+                       ret = out_fence_fd;
+                       goto out_unlock;
+               }
+       }
+       priv->struct_mutex_task = current;
+
        submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
        if (!submit) {
                ret = -ENOMEM;
@@ -391,9 +425,32 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (ret)
                goto out;
 
-       ret = submit_fence_sync(submit);
-       if (ret)
-               goto out;
+       if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+               in_fence = sync_file_get_fence(args->fence_fd);
+
+               if (!in_fence) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               /* TODO if we get an array-fence due to userspace merging multiple
+                * fences, we need a way to determine if all the backing fences
+                * are from our own context..
+                */
+
+               if (in_fence->context != gpu->fctx->context) {
+                       ret = fence_wait(in_fence, true);
+                       if (ret)
+                               goto out;
+               }
+
+       }
+
+       if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+               ret = submit_fence_sync(submit);
+               if (ret)
+                       goto out;
+       }
 
        ret = submit_pin_objects(submit);
        if (ret)
@@ -459,15 +516,40 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 
        submit->nr_cmds = i;
 
-       ret = msm_gpu_submit(gpu, submit, ctx);
+       submit->fence = msm_fence_alloc(gpu->fctx);
+       if (IS_ERR(submit->fence)) {
+               ret = PTR_ERR(submit->fence);
+               submit->fence = NULL;
+               goto out;
+       }
+
+       if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+               sync_file = sync_file_create(submit->fence);
+               if (!sync_file) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       msm_gpu_submit(gpu, submit, ctx);
 
        args->fence = submit->fence->seqno;
 
+       if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+               fd_install(out_fence_fd, sync_file->file);
+               args->fence_fd = out_fence_fd;
+       }
+
 out:
+       if (in_fence)
+               fence_put(in_fence);
        submit_cleanup(submit);
        if (ret)
                msm_gem_submit_free(submit);
 out_unlock:
+       if (ret && (out_fence_fd >= 0))
+               put_unused_fd(out_fence_fd);
+       priv->struct_mutex_task = NULL;
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
index 36ed53e661fe81f86a403a3ff30ee895983510ac..5bb09838b5ae2c2f421ca605a350e4c992751d8b 100644 (file)
@@ -509,22 +509,15 @@ void msm_gpu_retire(struct msm_gpu *gpu)
 }
 
 /* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                struct msm_file_private *ctx)
 {
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
-       int i, ret;
+       int i;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       submit->fence = msm_fence_alloc(gpu->fctx);
-       if (IS_ERR(submit->fence)) {
-               ret = PTR_ERR(submit->fence);
-               submit->fence = NULL;
-               return ret;
-       }
-
        inactive_cancel(gpu);
 
        list_add_tail(&submit->node, &gpu->submit_list);
@@ -557,8 +550,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        priv->lastctx = ctx;
 
        hangcheck_timer_reset(gpu);
-
-       return 0;
 }
 
 /*
index c9022837a1a47d0674150f04187b3a493ee7c15c..d61d98a6e047fe9575760b64db94dc39aeadf9c0 100644 (file)
@@ -163,7 +163,7 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
                uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
 
 void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                struct msm_file_private *ctx);
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
index f2ad17aa33f069897dd44013b7ad1a6240f23481..dc57b628e07473ad6e0810085c5fd960ef6b49bc 100644 (file)
@@ -225,6 +225,17 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
        if (!parent_pdev)
                return false;
 
+       if (!parent_pdev->bridge_d3) {
+               /*
+                * Parent PCI bridge is currently not power managed.
+                * Since userspace can change these afterwards to be on
+                * the safe side we stick with _DSM and prevent usage of
+                * _PR3 from the bridge.
+                */
+               pci_d3cold_disable(pdev);
+               return false;
+       }
+
        parent_adev = ACPI_COMPANION(&parent_pdev->dev);
        if (!parent_adev)
                return false;
index 528bdeffb339ecaaa887c97887dc80427a153c77..66f31c3eb8ba417ba6b972092c1e7553267587f6 100644 (file)
@@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
 out:
        ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
@@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
@@ -1297,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        /* Fallback to software copy. */
        ret = ttm_bo_wait(bo, intr, no_wait_gpu);
        if (ret == 0)
-               ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
+               ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
 
 out:
        if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1315,7 +1315,8 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-       return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int
index 66c1280c0f1f2854aa501f810c50a11e6149eaaa..3100fd88a015f0e5288aade4c2c0e60027c2daed 100644 (file)
@@ -351,7 +351,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
        boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
        if (nouveau_modeset != 2)
-               remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+               drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
        kfree(aper);
 
        ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
@@ -1067,8 +1067,8 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
                goto err_free;
 
        drm = drm_dev_alloc(&driver_platform, &pdev->dev);
-       if (!drm) {
-               err = -ENOMEM;
+       if (IS_ERR(drm)) {
+               err = PTR_ERR(drm);
                goto err_free;
        }
 
index d1f248fd35061993db2529c4b93e557a4331e829..9f5692726c169086ec382a769e1eecb9526f08aa 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/tty.h>
 #include <linux/sysrq.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/screen_info.h>
 #include <linux/vga_switcheroo.h>
index 0eae8afaed90bb08fe49bf32f3559bb0bec739db..b1f3b818edf43590016225b4d310dc9921154daa 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <linux/backlight.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
index fc4c238c95836f10bd9237745d6f484b7f4e71ad..9f3d6f48f3e16658722065d9afeb3ff8cda6f179 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
-#include <linux/fb.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of_gpio.h>
 
index 157c512205d19f18d9fa3eb92e005bac8c438799..3557a4c7dd7b8489c0bab8a76698abdff8aeb453 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/jiffies.h>
 #include <linux/sched.h>
 #include <linux/backlight.h>
-#include <linux/fb.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
index e256d879b25ccc379cf93201b155f31e00269af2..dfd4e9621e3b1c969b76ac6873a480a163d98959 100644 (file)
@@ -125,16 +125,15 @@ u32 dss_of_port_get_port_number(struct device_node *port)
 
 static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
 {
-       struct device_node *np, *np_parent;
+       struct device_node *np;
 
        np = of_parse_phandle(node, "remote-endpoint", 0);
        if (!np)
                return NULL;
 
-       np_parent = of_get_next_parent(np);
-       of_node_put(np);
+       np = of_get_next_parent(np);
 
-       return np_parent;
+       return np;
 }
 
 struct device_node *
index 26c6134eb744075db77edaa279aa25cb47a56dc3..e1cfba51cff64d11971a8cb0740e6c56faf7b690 100644 (file)
@@ -96,7 +96,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
        dispc_runtime_get();
 
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
-       drm_atomic_helper_commit_planes(dev, old_state, false);
+       drm_atomic_helper_commit_planes(dev, old_state, 0);
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
        omap_atomic_wait_for_completion(dev, old_state);
@@ -295,9 +295,9 @@ static int omap_modeset_init_properties(struct drm_device *dev)
        if (priv->has_dmm) {
                dev->mode_config.rotation_property =
                        drm_mode_create_rotation_property(dev,
-                               BIT(DRM_ROTATE_0) | BIT(DRM_ROTATE_90) |
-                               BIT(DRM_ROTATE_180) | BIT(DRM_ROTATE_270) |
-                               BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+                               DRM_ROTATE_0 | DRM_ROTATE_90 |
+                               DRM_ROTATE_180 | DRM_ROTATE_270 |
+                               DRM_REFLECT_X | DRM_REFLECT_Y);
                if (!dev->mode_config.rotation_property)
                        return -ENOMEM;
        }
index 31f5178c22c7577136da27e6903dc37391d4a8fa..5f3337f1e9aaf007fcc86fb698e4cc442ab9ba3a 100644 (file)
@@ -179,24 +179,24 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                                        (uint32_t)win->rotation);
                        /* fallthru to default to no rotation */
                case 0:
-               case BIT(DRM_ROTATE_0):
+               case DRM_ROTATE_0:
                        orient = 0;
                        break;
-               case BIT(DRM_ROTATE_90):
+               case DRM_ROTATE_90:
                        orient = MASK_XY_FLIP | MASK_X_INVERT;
                        break;
-               case BIT(DRM_ROTATE_180):
+               case DRM_ROTATE_180:
                        orient = MASK_X_INVERT | MASK_Y_INVERT;
                        break;
-               case BIT(DRM_ROTATE_270):
+               case DRM_ROTATE_270:
                        orient = MASK_XY_FLIP | MASK_Y_INVERT;
                        break;
                }
 
-               if (win->rotation & BIT(DRM_REFLECT_X))
+               if (win->rotation & DRM_REFLECT_X)
                        orient ^= MASK_X_INVERT;
 
-               if (win->rotation & BIT(DRM_REFLECT_Y))
+               if (win->rotation & DRM_REFLECT_Y)
                        orient ^= MASK_Y_INVERT;
 
                /* adjust x,y offset for flip/invert: */
@@ -213,7 +213,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
        } else {
                switch (win->rotation & DRM_ROTATE_MASK) {
                case 0:
-               case BIT(DRM_ROTATE_0):
+               case DRM_ROTATE_0:
                        /* OK */
                        break;
 
index 5252ab720e70043435b0ac7ecad70113948dbaff..66ac8c40db2614966af3992b561feb43dec5ab10 100644 (file)
@@ -60,7 +60,7 @@ to_omap_plane_state(struct drm_plane_state *state)
 }
 
 static int omap_plane_prepare_fb(struct drm_plane *plane,
-                                const struct drm_plane_state *new_state)
+                                struct drm_plane_state *new_state)
 {
        if (!new_state->fb)
                return 0;
@@ -69,7 +69,7 @@ static int omap_plane_prepare_fb(struct drm_plane *plane,
 }
 
 static void omap_plane_cleanup_fb(struct drm_plane *plane,
-                                 const struct drm_plane_state *old_state)
+                                 struct drm_plane_state *old_state)
 {
        if (old_state->fb)
                omap_framebuffer_unpin(old_state->fb);
@@ -109,8 +109,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
        win.src_y = state->src_y >> 16;
 
        switch (state->rotation & DRM_ROTATE_MASK) {
-       case BIT(DRM_ROTATE_90):
-       case BIT(DRM_ROTATE_270):
+       case DRM_ROTATE_90:
+       case DRM_ROTATE_270:
                win.src_w = state->src_h >> 16;
                win.src_h = state->src_w >> 16;
                break;
@@ -149,7 +149,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
        struct omap_plane_state *omap_state = to_omap_plane_state(plane->state);
        struct omap_plane *omap_plane = to_omap_plane(plane);
 
-       plane->state->rotation = BIT(DRM_ROTATE_0);
+       plane->state->rotation = DRM_ROTATE_0;
        omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
                           ? 0 : omap_plane->id;
 
@@ -178,7 +178,7 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
 
        if (state->fb) {
-               if (state->rotation != BIT(DRM_ROTATE_0) &&
+               if (state->rotation != DRM_ROTATE_0 &&
                    !omap_framebuffer_supports_rotation(state->fb))
                        return -EINVAL;
        }
@@ -269,7 +269,7 @@ static void omap_plane_reset(struct drm_plane *plane)
         */
        omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
                           ? 0 : omap_plane->id;
-       omap_state->base.rotation = BIT(DRM_ROTATE_0);
+       omap_state->base.rotation = DRM_ROTATE_0;
 
        plane->state = &omap_state->base;
        plane->state->plane = plane;
index 1500ab99f5482a1e5e5f358a467d467191367380..62aba976e744c146c26d7fedf44c54cdd480361e 100644 (file)
@@ -18,6 +18,17 @@ config DRM_PANEL_SIMPLE
          that it can be automatically turned off when the panel goes into a
          low power state.
 
+config DRM_PANEL_JDI_LT070ME05000
+       tristate "JDI LT070ME05000 WUXGA DSI panel"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for JDI DSI video mode
+         panel as found in Google Nexus 7 (2013) devices.
+         The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
+         24 bit per pixel.
+
 config DRM_PANEL_SAMSUNG_LD9040
        tristate "Samsung LD9040 RGB/SPI panel"
        depends on OF && SPI
index f277eed933d6f399c96c108937ab207f98f783ca..a5c7ec0236e0174079cce0f07d46372967e9cf3b 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
 obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
new file mode 100644 (file)
index 0000000..5b2340e
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) 2016 InforceComputing
+ * Author: Vinay Simha BN <simhavcs@gmail.com>
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Author: Sumit Semwal <sumit.semwal@linaro.org>
+ *
+ * From internet archives, the panel for Nexus 7 2nd Gen, 2013 model is a
+ * JDI model LT070ME05000, and its data sheet is at:
+ * http://panelone.net/en/7-0-inch/JDI_LT070ME05000_7.0_inch-datasheet
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+static const char * const regulator_names[] = {
+       "vddp",
+       "iovcc"
+};
+
+struct jdi_panel {
+       struct drm_panel base;
+       struct mipi_dsi_device *dsi;
+
+       struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+
+       struct gpio_desc *enable_gpio;
+       struct gpio_desc *reset_gpio;
+       struct gpio_desc *dcdc_en_gpio;
+       struct backlight_device *backlight;
+
+       bool prepared;
+       bool enabled;
+
+       const struct drm_display_mode *mode;
+};
+
+static inline struct jdi_panel *to_jdi_panel(struct drm_panel *panel)
+{
+       return container_of(panel, struct jdi_panel, base);
+}
+
+static int jdi_panel_init(struct jdi_panel *jdi)
+{
+       struct mipi_dsi_device *dsi = jdi->dsi;
+       struct device *dev = &jdi->dsi->dev;
+       int ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_soft_reset(dsi);
+       if (ret < 0)
+               return ret;
+
+       usleep_range(10000, 20000);
+
+       ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+       if (ret < 0) {
+               dev_err(dev, "failed to set pixel format: %d\n", ret);
+               return ret;
+       }
+
+       ret = mipi_dsi_dcs_set_column_address(dsi, 0, jdi->mode->hdisplay - 1);
+       if (ret < 0) {
+               dev_err(dev, "failed to set column address: %d\n", ret);
+               return ret;
+       }
+
+       ret = mipi_dsi_dcs_set_page_address(dsi, 0, jdi->mode->vdisplay - 1);
+       if (ret < 0) {
+               dev_err(dev, "failed to set page address: %d\n", ret);
+               return ret;
+       }
+
+       /*
+        * BIT(5) BCTRL = 1 Backlight Control Block On, Brightness registers
+        *                  are active
+        * BIT(3) BL = 1    Backlight Control On
+        * BIT(2) DD = 0    Display Dimming is Off
+        */
+       ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+                                (u8[]){ 0x24 }, 1);
+       if (ret < 0) {
+               dev_err(dev, "failed to write control display: %d\n", ret);
+               return ret;
+       }
+
+       /* CABC off */
+       ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_POWER_SAVE,
+                                (u8[]){ 0x00 }, 1);
+       if (ret < 0) {
+               dev_err(dev, "failed to set cabc off: %d\n", ret);
+               return ret;
+       }
+
+       ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+       if (ret < 0) {
+               dev_err(dev, "failed to set exit sleep mode: %d\n", ret);
+               return ret;
+       }
+
+       msleep(120);
+
+       ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x00}, 2);
+       if (ret < 0) {
+               dev_err(dev, "failed to set mcap: %d\n", ret);
+               return ret;
+       }
+
+       mdelay(10);
+
+       /* Interface setting, video mode */
+       ret = mipi_dsi_generic_write(dsi, (u8[])
+                                    {0xB3, 0x26, 0x08, 0x00, 0x20, 0x00}, 6);
+       if (ret < 0) {
+               dev_err(dev, "failed to set display interface setting: %d\n"
+                       , ret);
+               return ret;
+       }
+
+       mdelay(20);
+
+       ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x03}, 2);
+       if (ret < 0) {
+               dev_err(dev, "failed to set default values for mcap: %d\n"
+                       , ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int jdi_panel_on(struct jdi_panel *jdi)
+{
+       struct mipi_dsi_device *dsi = jdi->dsi;
+       struct device *dev = &jdi->dsi->dev;
+       int ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_set_display_on(dsi);
+       if (ret < 0)
+               dev_err(dev, "failed to set display on: %d\n", ret);
+
+       return ret;
+}
+
+static void jdi_panel_off(struct jdi_panel *jdi)
+{
+       struct mipi_dsi_device *dsi = jdi->dsi;
+       struct device *dev = &jdi->dsi->dev;
+       int ret;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_set_display_off(dsi);
+       if (ret < 0)
+               dev_err(dev, "failed to set display off: %d\n", ret);
+
+       ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+       if (ret < 0)
+               dev_err(dev, "failed to enter sleep mode: %d\n", ret);
+
+       msleep(100);
+}
+
+static int jdi_panel_disable(struct drm_panel *panel)
+{
+       struct jdi_panel *jdi = to_jdi_panel(panel);
+
+       if (!jdi->enabled)
+               return 0;
+
+       jdi->backlight->props.power = FB_BLANK_POWERDOWN;
+       backlight_update_status(jdi->backlight);
+
+       jdi->enabled = false;
+
+       return 0;
+}
+
+static int jdi_panel_unprepare(struct drm_panel *panel)
+{
+       struct jdi_panel *jdi = to_jdi_panel(panel);
+       struct device *dev = &jdi->dsi->dev;
+       int ret;
+
+       if (!jdi->prepared)
+               return 0;
+
+       jdi_panel_off(jdi);
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+       if (ret < 0)
+               dev_err(dev, "regulator disable failed, %d\n", ret);
+
+       gpiod_set_value(jdi->enable_gpio, 0);
+
+       gpiod_set_value(jdi->reset_gpio, 1);
+
+       gpiod_set_value(jdi->dcdc_en_gpio, 0);
+
+       jdi->prepared = false;
+
+       return 0;
+}
+
+static int jdi_panel_prepare(struct drm_panel *panel)
+{
+       struct jdi_panel *jdi = to_jdi_panel(panel);
+       struct device *dev = &jdi->dsi->dev;
+       int ret;
+
+       if (jdi->prepared)
+               return 0;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+       if (ret < 0) {
+               dev_err(dev, "regulator enable failed, %d\n", ret);
+               return ret;
+       }
+
+       msleep(20);
+
+       gpiod_set_value(jdi->dcdc_en_gpio, 1);
+       usleep_range(10, 20);
+
+       gpiod_set_value(jdi->reset_gpio, 0);
+       usleep_range(10, 20);
+
+       gpiod_set_value(jdi->enable_gpio, 1);
+       usleep_range(10, 20);
+
+       ret = jdi_panel_init(jdi);
+       if (ret < 0) {
+               dev_err(dev, "failed to init panel: %d\n", ret);
+               goto poweroff;
+       }
+
+       ret = jdi_panel_on(jdi);
+       if (ret < 0) {
+               dev_err(dev, "failed to set panel on: %d\n", ret);
+               goto poweroff;
+       }
+
+       jdi->prepared = true;
+
+       return 0;
+
+poweroff:
+       ret = regulator_bulk_disable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+       if (ret < 0)
+               dev_err(dev, "regulator disable failed, %d\n", ret);
+
+       gpiod_set_value(jdi->enable_gpio, 0);
+
+       gpiod_set_value(jdi->reset_gpio, 1);
+
+       gpiod_set_value(jdi->dcdc_en_gpio, 0);
+
+       return ret;
+}
+
+static int jdi_panel_enable(struct drm_panel *panel)
+{
+       struct jdi_panel *jdi = to_jdi_panel(panel);
+
+       if (jdi->enabled)
+               return 0;
+
+       jdi->backlight->props.power = FB_BLANK_UNBLANK;
+       backlight_update_status(jdi->backlight);
+
+       jdi->enabled = true;
+
+       return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+               .clock = 155493,
+               .hdisplay = 1200,
+               .hsync_start = 1200 + 48,
+               .hsync_end = 1200 + 48 + 32,
+               .htotal = 1200 + 48 + 32 + 60,
+               .vdisplay = 1920,
+               .vsync_start = 1920 + 3,
+               .vsync_end = 1920 + 3 + 5,
+               .vtotal = 1920 + 3 + 5 + 6,
+               .vrefresh = 60,
+               .flags = 0,
+};
+
+static int jdi_panel_get_modes(struct drm_panel *panel)
+{
+       struct drm_display_mode *mode;
+       struct jdi_panel *jdi = to_jdi_panel(panel);
+       struct device *dev = &jdi->dsi->dev;
+
+       mode = drm_mode_duplicate(panel->drm, &default_mode);
+       if (!mode) {
+               dev_err(dev, "failed to add mode %ux%ux@%u\n",
+                       default_mode.hdisplay, default_mode.vdisplay,
+                       default_mode.vrefresh);
+               return -ENOMEM;
+       }
+
+       drm_mode_set_name(mode);
+
+       drm_mode_probed_add(panel->connector, mode);
+
+       panel->connector->display_info.width_mm = 95;
+       panel->connector->display_info.height_mm = 151;
+
+       return 1;
+}
+
+static int dsi_dcs_bl_get_brightness(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       int ret;
+       u16 brightness = bl->props.brightness;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       return brightness & 0xff;
+}
+
+static int dsi_dcs_bl_update_status(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       int ret;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       return 0;
+}
+
+static const struct backlight_ops dsi_bl_ops = {
+       .update_status = dsi_dcs_bl_update_status,
+       .get_brightness = dsi_dcs_bl_get_brightness,
+};
+
+static struct backlight_device *
+drm_panel_create_dsi_backlight(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct backlight_properties props;
+
+       memset(&props, 0, sizeof(props));
+       props.type = BACKLIGHT_RAW;
+       props.brightness = 255;
+       props.max_brightness = 255;
+
+       return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+                                             &dsi_bl_ops, &props);
+}
+
+static const struct drm_panel_funcs jdi_panel_funcs = {
+       .disable = jdi_panel_disable,
+       .unprepare = jdi_panel_unprepare,
+       .prepare = jdi_panel_prepare,
+       .enable = jdi_panel_enable,
+       .get_modes = jdi_panel_get_modes,
+};
+
+static const struct of_device_id jdi_of_match[] = {
+       { .compatible = "jdi,lt070me05000", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, jdi_of_match);
+
+static int jdi_panel_add(struct jdi_panel *jdi)
+{
+       struct device *dev = &jdi->dsi->dev;
+       int ret;
+       unsigned int i;
+
+       jdi->mode = &default_mode;
+
+       for (i = 0; i < ARRAY_SIZE(jdi->supplies); i++)
+               jdi->supplies[i].supply = regulator_names[i];
+
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies),
+                                     jdi->supplies);
+       if (ret < 0) {
+               dev_err(dev, "failed to init regulator, ret=%d\n", ret);
+               return ret;
+       }
+
+       jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+       if (IS_ERR(jdi->enable_gpio)) {
+               ret = PTR_ERR(jdi->enable_gpio);
+               dev_err(dev, "cannot get enable-gpio %d\n", ret);
+               return ret;
+       }
+
+       jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(jdi->reset_gpio)) {
+               ret = PTR_ERR(jdi->reset_gpio);
+               dev_err(dev, "cannot get reset-gpios %d\n", ret);
+               return ret;
+       }
+
+       jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW);
+       if (IS_ERR(jdi->dcdc_en_gpio)) {
+               ret = PTR_ERR(jdi->dcdc_en_gpio);
+               dev_err(dev, "cannot get dcdc-en-gpio %d\n", ret);
+               return ret;
+       }
+
+       jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi);
+       if (IS_ERR(jdi->backlight)) {
+               ret = PTR_ERR(jdi->backlight);
+               dev_err(dev, "failed to register backlight %d\n", ret);
+               return ret;
+       }
+
+       drm_panel_init(&jdi->base);
+       jdi->base.funcs = &jdi_panel_funcs;
+       jdi->base.dev = &jdi->dsi->dev;
+
+       ret = drm_panel_add(&jdi->base);
+
+       return ret;
+}
+
+static void jdi_panel_del(struct jdi_panel *jdi)
+{
+       if (jdi->base.dev)
+               drm_panel_remove(&jdi->base);
+}
+
+static int jdi_panel_probe(struct mipi_dsi_device *dsi)
+{
+       struct jdi_panel *jdi;
+       int ret;
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags =  MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+                          MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+       jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
+       if (!jdi)
+               return -ENOMEM;
+
+       mipi_dsi_set_drvdata(dsi, jdi);
+
+       jdi->dsi = dsi;
+
+       ret = jdi_panel_add(jdi);
+       if (ret < 0)
+               return ret;
+
+       return mipi_dsi_attach(dsi);
+}
+
+static int jdi_panel_remove(struct mipi_dsi_device *dsi)
+{
+       struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi);
+       int ret;
+
+       ret = jdi_panel_disable(&jdi->base);
+       if (ret < 0)
+               dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+       ret = mipi_dsi_detach(dsi);
+       if (ret < 0)
+               dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
+                       ret);
+
+       drm_panel_detach(&jdi->base);
+       jdi_panel_del(jdi);
+
+       return 0;
+}
+
+static void jdi_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+       struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi);
+
+       jdi_panel_disable(&jdi->base);
+}
+
+static struct mipi_dsi_driver jdi_panel_driver = {
+       .driver = {
+               .name = "panel-jdi-lt070me05000",
+               .of_match_table = jdi_of_match,
+       },
+       .probe = jdi_panel_probe,
+       .remove = jdi_panel_remove,
+       .shutdown = jdi_panel_shutdown,
+};
+module_mipi_dsi_driver(jdi_panel_driver);
+
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_AUTHOR("Vinay Simha BN <simhavcs@gmail.com>");
+MODULE_DESCRIPTION("JDI LT070ME05000 WUXGA");
+MODULE_LICENSE("GPL v2");
index 85143d1b9b3168062aef554e7041956cf4f62ec0..113db3c4a633fc6a1ce35e5ec86d217b99e7851f 100644 (file)
@@ -849,6 +849,34 @@ static const struct panel_desc innolux_at070tn92 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct display_timing innolux_g101ice_l01_timing = {
+       .pixelclock = { 60400000, 71100000, 74700000 },
+       .hactive = { 1280, 1280, 1280 },
+       .hfront_porch = { 41, 80, 100 },
+       .hback_porch = { 40, 79, 99 },
+       .hsync_len = { 1, 1, 1 },
+       .vactive = { 800, 800, 800 },
+       .vfront_porch = { 5, 11, 14 },
+       .vback_porch = { 4, 11, 14 },
+       .vsync_len = { 1, 1, 1 },
+       .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g101ice_l01 = {
+       .timings = &innolux_g101ice_l01_timing,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 217,
+               .height = 135,
+       },
+       .delay = {
+               .enable = 200,
+               .disable = 200,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
 static const struct drm_display_mode innolux_g121i1_l01_mode = {
        .clock = 71000,
        .hdisplay = 1280,
@@ -1186,7 +1214,7 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
                .width = 105,
                .height = 67,
        },
-       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
 /*
@@ -1245,6 +1273,7 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
                .height = 93,
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
 };
 
 static const struct drm_display_mode qd43003c0_40_mode = {
@@ -1384,6 +1413,11 @@ static const struct panel_desc sharp_lq123p1jx31 = {
                .width = 259,
                .height = 173,
        },
+       .delay = {
+               .prepare = 110,
+               .enable = 50,
+               .unprepare = 550,
+       },
 };
 
 static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
@@ -1430,6 +1464,11 @@ static const struct panel_desc starry_kr122ea0sra = {
                .width = 263,
                .height = 164,
        },
+       .delay = {
+               .prepare = 10 + 200,
+               .enable = 50,
+               .unprepare = 10 + 500,
+       },
 };
 
 static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -1574,6 +1613,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "innolux,at070tn92",
                .data = &innolux_at070tn92,
+       }, {
+               .compatible ="innolux,g101ice-l01",
+               .data = &innolux_g101ice_l01
        }, {
                .compatible ="innolux,g121i1-l01",
                .data = &innolux_g121i1_l01
index 3aef12742a53b895fac7be63651643d5db9325e2..a61c0d460ec2dd018463389d63c243b532ad55db 100644 (file)
@@ -211,6 +211,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
        struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
 
        drm_crtc_cleanup(crtc);
+       qxl_bo_unref(&qxl_crtc->cursor_bo);
        kfree(qxl_crtc);
 }
 
@@ -296,6 +297,52 @@ qxl_hide_cursor(struct qxl_device *qdev)
        return 0;
 }
 
+static int qxl_crtc_apply_cursor(struct drm_crtc *crtc)
+{
+       struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_cursor_cmd *cmd;
+       struct qxl_release *release;
+       int ret = 0;
+
+       if (!qcrtc->cursor_bo)
+               return 0;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+                                        QXL_RELEASE_CURSOR_CMD,
+                                        &release, NULL);
+       if (ret)
+               return ret;
+
+       ret = qxl_release_list_add(release, qcrtc->cursor_bo);
+       if (ret)
+               goto out_free_release;
+
+       ret = qxl_release_reserve_list(release, false);
+       if (ret)
+               goto out_free_release;
+
+       cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_CURSOR_SET;
+       cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+       cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+
+       cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
+
+       cmd->u.set.visible = 1;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+       qxl_release_fence_buffer_objects(release);
+
+       return ret;
+
+out_free_release:
+       qxl_release_free(qdev, release);
+       return ret;
+}
+
 static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
                                struct drm_file *file_priv,
                                uint32_t handle,
@@ -400,7 +447,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
        }
        drm_gem_object_unreference_unlocked(obj);
 
-       qxl_bo_unref(&cursor_bo);
+       qxl_bo_unref (&qcrtc->cursor_bo);
+       qcrtc->cursor_bo = cursor_bo;
 
        return ret;
 
@@ -655,6 +703,12 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
                           bo->surf.stride, bo->surf.format);
                qxl_io_create_primary(qdev, 0, bo);
                bo->is_primary = true;
+
+               ret = qxl_crtc_apply_cursor(crtc);
+               if (ret) {
+                       DRM_ERROR("could not set cursor after modeset");
+                       ret = 0;
+               }
        }
 
        if (bo->is_primary) {
index ffe8853951452fa4e087e65820d21df407c7f605..9b728edf1b49453b0a2e70bd06260822ae820525 100644 (file)
@@ -57,11 +57,8 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
 static int
 alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
 {
-       int ret;
-       ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
-                                        QXL_RELEASE_DRAWABLE, release,
-                                        NULL);
-       return ret;
+       return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+                                         QXL_RELEASE_DRAWABLE, release, NULL);
 }
 
 static void
index 8e633caa40787d48192292c8620272dd843946cd..5f3e5ad99de7912843026abdafa9b93fa0c1e20a 100644 (file)
@@ -137,6 +137,7 @@ struct qxl_crtc {
        int cur_y;
        int hot_spot_x;
        int hot_spot_y;
+       struct qxl_bo *cursor_bo;
 };
 
 struct qxl_output {
index df2657051afd7c7eaa6de39683b99f6190f214db..2cd879a4ae15643cd7a1f72e1644d87a7a50c6d6 100644 (file)
@@ -24,7 +24,6 @@
  *     David Airlie
  */
 #include <linux/module.h>
-#include <linux/fb.h>
 
 #include "drmP.h"
 #include "drm/drm.h"
@@ -73,10 +72,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
        }
 }
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 static struct fb_deferred_io qxl_defio = {
        .delay          = QXL_DIRTY_DELAY,
        .deferred_io    = drm_fb_helper_deferred_io,
 };
+#endif
 
 static struct fb_ops qxlfb_ops = {
        .owner = THIS_MODULE,
@@ -313,8 +314,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
                goto out_destroy_fbi;
        }
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
        info->fbdefio = &qxl_defio;
        fb_deferred_io_init(info);
+#endif
 
        qdev->fbdev_info = info;
        qdev->fbdev_qfb = &qfbdev->qfb;
index 5e1d7899dd7234fcac0bcf811ed6ac37f1cd9eff..fa5440dc9a19764af3051c401b630cafd15d2314 100644 (file)
@@ -61,7 +61,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
        if (domain == QXL_GEM_DOMAIN_VRAM)
                qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
        if (domain == QXL_GEM_DOMAIN_SURFACE)
-               qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
+               qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
        if (domain == QXL_GEM_DOMAIN_CPU)
                qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
        if (!c)
@@ -151,7 +151,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
 
        if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
                map = qdev->vram_mapping;
-       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
                map = qdev->surface_mapping;
        else
                goto fallback;
@@ -191,7 +191,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
 
        if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
                map = qdev->vram_mapping;
-       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
                map = qdev->surface_mapping;
        else
                goto fallback;
@@ -311,7 +311,7 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
 
 int qxl_surf_evict(struct qxl_device *qdev)
 {
-       return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+       return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 }
 
 int qxl_vram_evict(struct qxl_device *qdev)
index f599cd073b722cd4c9db4c2634582dbe901120d3..cd83f050cf3e7fc0a0ee2c8cdf88fed97580eb2f 100644 (file)
@@ -203,12 +203,9 @@ qxl_release_free(struct qxl_device *qdev,
 static int qxl_release_bo_alloc(struct qxl_device *qdev,
                                struct qxl_bo **bo)
 {
-       int ret;
        /* pin releases bo's they are too messy to evict */
-       ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
-                           QXL_GEM_DOMAIN_VRAM, NULL,
-                           bo);
-       return ret;
+       return qxl_bo_create(qdev, PAGE_SIZE, false, true,
+                            QXL_GEM_DOMAIN_VRAM, NULL, bo);
 }
 
 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
index d50c9679e631a8e8fc226c382cdb9f88fee64fc9..e26c82db948b9f8ef9da2c14cd671c3b4782e1a4 100644 (file)
@@ -168,7 +168,7 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_VRAM:
-       case TTM_PL_PRIV0:
+       case TTM_PL_PRIV:
                /* "On-card" video ram */
                man->func = &ttm_bo_manager_func;
                man->gpu_offset = 0;
@@ -210,7 +210,8 @@ static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct qxl_bo *qbo = to_qxl_bo(bo);
 
-       return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
+       return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
+                                         filp->private_data);
 }
 
 static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -235,7 +236,7 @@ static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
                mem->bus.base = qdev->vram_base;
                mem->bus.offset = mem->start << PAGE_SHIFT;
                break;
-       case TTM_PL_PRIV0:
+       case TTM_PL_PRIV:
                mem->bus.is_iomem = true;
                mem->bus.base = qdev->surfaceram_base;
                mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -361,8 +362,8 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
                qxl_move_null(bo, new_mem);
                return 0;
        }
-       return ttm_bo_move_memcpy(bo, evict, interruptible,
-                                 no_wait_gpu, new_mem);
+       return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu,
+                                 new_mem);
 }
 
 static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
@@ -376,7 +377,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
        qbo = to_qxl_bo(bo);
        qdev = qbo->gem_base.dev->dev_private;
 
-       if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+       if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
                qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 }
 
@@ -422,7 +423,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
                DRM_ERROR("Failed initializing VRAM heap.\n");
                return r;
        }
-       r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+       r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
                           qdev->surfaceram_size / PAGE_SIZE);
        if (r) {
                DRM_ERROR("Failed initializing Surfaces heap.\n");
@@ -445,7 +446,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
 void qxl_ttm_fini(struct qxl_device *qdev)
 {
        ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
-       ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+       ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
        ttm_bo_device_release(&qdev->mman.bdev);
        qxl_ttm_global_fini(qdev);
        DRM_INFO("qxl: ttm finalized\n");
@@ -489,7 +490,7 @@ static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
                if (i == 0)
                        qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
                else
-                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
 
        }
        return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
index c57b4de63caf5f633edda167a83009f6fb6a8484..a982be57d1efb44b0638f55faa18621dcdc14fce 100644 (file)
@@ -56,7 +56,7 @@ static const struct file_operations r128_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+           DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_LEGACY |
            DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
        .dev_priv_size = sizeof(drm_r128_buf_priv_t),
        .load = r128_driver_load,
index a97abc8af657e879dc0f7ee3b29334b1352cffb0..a4e9f35da3a22e87a2c35ad18b99f359999f4e9a 100644 (file)
@@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        if (radeon_crtc->ss.refdiv) {
                                radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
                                radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
-                               if (rdev->family >= CHIP_RV770)
+                               if (ASIC_IS_AVIVO(rdev) &&
+                                   rdev->family != CHIP_RS780 &&
+                                   rdev->family != CHIP_RS880)
                                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                        }
                }
@@ -1154,6 +1156,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        u32 tmp, viewport_w, viewport_h;
        int r;
        bool bypass_lut = false;
+       char *format_name;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -1257,8 +1260,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
                bypass_lut = true;
                break;
        default:
-               DRM_ERROR("Unsupported screen format %s\n",
-                         drm_get_format_name(target_fb->pixel_format));
+               format_name = drm_get_format_name(target_fb->pixel_format);
+               DRM_ERROR("Unsupported screen format %s\n", format_name);
+               kfree(format_name);
                return -EINVAL;
        }
 
@@ -1433,8 +1437,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
               (viewport_w << 16) | viewport_h);
 
-       /* set pageflip to happen only at start of vblank interval (front porch) */
-       WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+       /* set pageflip to happen anywhere in vblank interval */
+       WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
@@ -1469,6 +1473,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        u32 viewport_w, viewport_h;
        int r;
        bool bypass_lut = false;
+       char *format_name;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -1558,8 +1563,9 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
                bypass_lut = true;
                break;
        default:
-               DRM_ERROR("Unsupported screen format %s\n",
-                         drm_get_format_name(target_fb->pixel_format));
+               format_name = drm_get_format_name(target_fb->pixel_format);
+               DRM_ERROR("Unsupported screen format %s\n", format_name);
+               kfree(format_name);
                return -EINVAL;
        }
 
@@ -1632,8 +1638,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
               (viewport_w << 16) | viewport_h);
 
-       /* set pageflip to happen only at start of vblank interval (front porch) */
-       WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+       /* set pageflip to happen anywhere in vblank interval */
+       WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
index cead089a9e7d2ea1bc78046aeed4d6cb7e8a02d6..432cb46f6a34a10753e30317562ce2df5315a890 100644 (file)
@@ -389,22 +389,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
 {
        struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
        u8 msg[DP_DPCD_SIZE];
-       int ret, i;
+       int ret;
 
-       for (i = 0; i < 7; i++) {
-               ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-                                      DP_DPCD_SIZE);
-               if (ret == DP_DPCD_SIZE) {
-                       memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+       ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+                              DP_DPCD_SIZE);
+       if (ret == DP_DPCD_SIZE) {
+               memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-                       DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-                                     dig_connector->dpcd);
+               DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+                             dig_connector->dpcd);
 
-                       radeon_dp_probe_oui(radeon_connector);
+               radeon_dp_probe_oui(radeon_connector);
 
-                       return true;
-               }
+               return true;
        }
+
        dig_connector->dpcd[0] = 0;
        return false;
 }
index 0c1b9ff433af706c15347467a756a7f71e36746a..f6ff41a0eed63698b78ca6ed0532e56ddb5714a3 100644 (file)
@@ -1871,7 +1871,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data = NULL;
        const __le32 *new_fw_data = NULL;
-       u32 running, blackout = 0, tmp;
+       u32 running, tmp;
        u32 *io_mc_regs = NULL;
        const __le32 *new_io_mc_regs = NULL;
        int i, regs_size, ucode_size;
@@ -1912,11 +1912,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
        running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
        if (running == 0) {
-               if (running) {
-                       blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
-                       WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
-               }
-
                /* reset the engine and set to writable */
                WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
                WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -1964,9 +1959,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
                                break;
                        udelay(1);
                }
-
-               if (running)
-                       WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
        }
 
        return 0;
@@ -4201,11 +4193,7 @@ u32 cik_gfx_get_rptr(struct radeon_device *rdev,
 u32 cik_gfx_get_wptr(struct radeon_device *rdev,
                     struct radeon_ring *ring)
 {
-       u32 wptr;
-
-       wptr = RREG32(CP_RB0_WPTR);
-
-       return wptr;
+       return RREG32(CP_RB0_WPTR);
 }
 
 void cik_gfx_set_wptr(struct radeon_device *rdev,
@@ -8215,7 +8203,7 @@ static void cik_uvd_resume(struct radeon_device *rdev)
                return;
 
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
        if (r) {
                dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
                return;
index cead2284fd796790fba849343b8891aa86134031..48db93577c1dacadbf9804d7b21efedcb8023bd0 100644 (file)
 #define UVD_UDEC_ADDR_CONFIG           0xef4c
 #define UVD_UDEC_DB_ADDR_CONFIG                0xef50
 #define UVD_UDEC_DBW_ADDR_CONFIG       0xef54
+#define UVD_NO_OP                      0xeffc
 
 #define UVD_LMI_EXT40_ADDR             0xf498
 #define UVD_GP_SCRATCH4                        0xf4e0
index db275b7ed34abd5efa2c6f0e5bd19d7c54bb3177..0b6b5766216faa525728abcc31b157ca95e5a6ee 100644 (file)
@@ -2878,9 +2878,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        for (i = 0; i < rdev->num_crtc; i++) {
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if ((tmp & 0x7) != 3) {
+                       if ((tmp & 0x7) != 0) {
                                tmp &= ~0x7;
-                               tmp |= 0x3;
                                WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -5580,7 +5579,7 @@ static void evergreen_uvd_resume(struct radeon_device *rdev)
                return;
 
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
        if (r) {
                dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
                return;
index c8e3d394cde70033798b696305da88c38a9c0c9c..f3d88ca2aa8fc5b09b0330d32e31baa75cca1a65 100644 (file)
 #define UVD_UDEC_ADDR_CONFIG                           0xef4c
 #define UVD_UDEC_DB_ADDR_CONFIG                                0xef50
 #define UVD_UDEC_DBW_ADDR_CONFIG                       0xef54
+#define UVD_NO_OP                                      0xeffc
 #define UVD_RBC_RB_RPTR                                        0xf690
 #define UVD_RBC_RB_WPTR                                        0xf694
 #define UVD_STATUS                                     0xf6bc
index 4a3d7cab83f7a554aea1a21b309815bc83f1900c..103fc8650197bfe8efd6903ba29ba1c3906ccd2a 100644 (file)
@@ -2062,7 +2062,7 @@ static void cayman_uvd_resume(struct radeon_device *rdev)
                return;
 
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
        if (r) {
                dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
                return;
index 47eb49b77d326ac88d2cfb3ec1de0c4632412257..3c9fec88ea4463e072173025431d0ef2c7bbf74a 100644 (file)
 #define UVD_UDEC_ADDR_CONFIG                           0xEF4C
 #define UVD_UDEC_DB_ADDR_CONFIG                                0xEF50
 #define UVD_UDEC_DBW_ADDR_CONFIG                       0xEF54
+#define UVD_NO_OP                                      0xEFFC
 #define UVD_RBC_RB_RPTR                                        0xF690
 #define UVD_RBC_RB_WPTR                                        0xF694
 #define UVD_STATUS                                     0xf6bc
index f25994b3afa633db491737d0afa9684b6a8eda67..f5e84f4b58e60e2e7a5c8cacbd459a5c0567866f 100644 (file)
@@ -1071,11 +1071,7 @@ u32 r100_gfx_get_rptr(struct radeon_device *rdev,
 u32 r100_gfx_get_wptr(struct radeon_device *rdev,
                      struct radeon_ring *ring)
 {
-       u32 wptr;
-
-       wptr = RREG32(RADEON_CP_RB_WPTR);
-
-       return wptr;
+       return RREG32(RADEON_CP_RB_WPTR);
 }
 
 void r100_gfx_set_wptr(struct radeon_device *rdev,
index 9247e7d207fe2e3079819f7511e43f9895b31cb9..a951881c2a504050c001b9c9e9830ec691b13424 100644 (file)
@@ -2631,11 +2631,7 @@ u32 r600_gfx_get_rptr(struct radeon_device *rdev,
 u32 r600_gfx_get_wptr(struct radeon_device *rdev,
                      struct radeon_ring *ring)
 {
-       u32 wptr;
-
-       wptr = RREG32(R600_CP_RB_WPTR);
-
-       return wptr;
+       return RREG32(R600_CP_RB_WPTR);
 }
 
 void r600_gfx_set_wptr(struct radeon_device *rdev,
@@ -3097,7 +3093,7 @@ static void r600_uvd_resume(struct radeon_device *rdev)
                return;
 
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
        if (r) {
                dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
                return;
index 1e8495cca41e9f5f611dfc3b84726104427969c7..2e00a5287bd2dbbe01cb5799e8541e49bb201f56 100644 (file)
 #define UVD_GPCOM_VCPU_DATA0                           0xef10
 #define UVD_GPCOM_VCPU_DATA1                           0xef14
 #define UVD_ENGINE_CNTL                                        0xef18
+#define UVD_NO_OP                                      0xeffc
 
 #define UVD_SEMA_CNTL                                  0xf400
 #define UVD_RB_ARB_CTRL                                        0xf480
index 5633ee3eb46e7d3d9a7197b2c5c909db326ba03b..1b0dcad916b0a3559909bca4c77592c1e37d12cd 100644 (file)
@@ -742,6 +742,7 @@ struct radeon_flip_work {
        struct work_struct              unpin_work;
        struct radeon_device            *rdev;
        int                             crtc_id;
+       u32                             target_vblank;
        uint64_t                        base;
        struct drm_pending_vblank_event *event;
        struct radeon_bo                *old_rbo;
index 31c9a92d6a1b89da90863e761fa4701f15d3ad3f..6efbd65c929efc28135056c4077ed979e18017c3 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include "radeon_acpi.h"
 #include "atom.h"
 
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_atpx_dgpu_req_power_for_displays(void);
+#else
+static inline bool radeon_atpx_dgpu_req_power_for_displays(void) { return false; }
+#endif
+
 #define ACPI_AC_CLASS           "ac_adapter"
 
 extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
@@ -394,6 +401,16 @@ int radeon_atif_handler(struct radeon_device *rdev,
 #endif
                }
        }
+       if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+               if ((rdev->flags & RADEON_IS_PX) &&
+                   radeon_atpx_dgpu_req_power_for_displays()) {
+                       pm_runtime_get_sync(rdev->ddev->dev);
+                       /* Just fire off a uevent and let userspace tell us what to do */
+                       drm_helper_hpd_irq_event(rdev->ddev);
+                       pm_runtime_mark_last_busy(rdev->ddev->dev);
+                       pm_runtime_put_autosuspend(rdev->ddev->dev);
+               }
+       }
        /* TODO: check other events */
 
        /* We've handled the event, stop the notifier chain. The ACPI interface
index 6de3428612027f5da11ccec1e1827bc828e4e341..2fdcd04bc93f7b9c6abf5d84752836e154d566b0 100644 (file)
@@ -29,6 +29,7 @@ struct radeon_atpx {
        acpi_handle handle;
        struct radeon_atpx_functions functions;
        bool is_hybrid;
+       bool dgpu_req_power_for_displays;
 };
 
 static struct radeon_atpx_priv {
@@ -72,6 +73,10 @@ bool radeon_is_atpx_hybrid(void) {
        return radeon_atpx_priv.atpx.is_hybrid;
 }
 
+bool radeon_atpx_dgpu_req_power_for_displays(void) {
+       return radeon_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
 /**
  * radeon_atpx_call - call an ATPX method
  *
@@ -198,16 +203,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
        atpx->is_hybrid = false;
        if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
                printk("ATPX Hybrid Graphics\n");
-#if 1
-               /* This is a temporary hack until the D3 cold support
-                * makes it upstream.  The ATPX power_control method seems
-                * to still work on even if the system should be using
-                * the new standardized hybrid D3 cold ACPI interface.
-                */
-               atpx->functions.power_cntl = true;
-#else
                atpx->functions.power_cntl = false;
-#endif
                atpx->is_hybrid = true;
        }
 
index a00dd2f745271ec760bff14d5455571388313ebd..eb92aef46e3cfcf99a07d26272fac7725d0cfaa8 100644 (file)
@@ -639,7 +639,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
  * Used at driver startup.
  * Returns true if virtual or false if not.
  */
-static bool radeon_device_is_virtual(void)
+bool radeon_device_is_virtual(void)
 {
 #ifdef CONFIG_X86
        return boot_cpu_has(X86_FEATURE_HYPERVISOR);
@@ -661,8 +661,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
 {
        uint32_t reg;
 
-       /* for pass through, always force asic_init */
-       if (radeon_device_is_virtual())
+       /* for pass through, always force asic_init for CI */
+       if (rdev->family >= CHIP_BONAIRE &&
+           radeon_device_is_virtual())
                return false;
 
        /* required for EFI mode on macbook2,1 which uses an r5xx asic */
@@ -1956,14 +1957,3 @@ static void radeon_debugfs_remove_files(struct radeon_device *rdev)
        }
 #endif
 }
-
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor)
-{
-       return 0;
-}
-
-void radeon_debugfs_cleanup(struct drm_minor *minor)
-{
-}
-#endif
index c3206fb8f4cf773b9e378e2ad79f710d0b45d38d..890171f089875c156773bfc008c1aa514b9ca67f 100644 (file)
@@ -400,14 +400,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
        struct radeon_flip_work *work =
                container_of(__work, struct radeon_flip_work, flip_work);
        struct radeon_device *rdev = work->rdev;
+       struct drm_device *dev = rdev->ddev;
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
 
        struct drm_crtc *crtc = &radeon_crtc->base;
        unsigned long flags;
        int r;
-       int vpos, hpos, stat, min_udelay = 0;
-       unsigned repcnt = 4;
-       struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+       int vpos, hpos;
 
        down_read(&rdev->exclusive_lock);
        if (work->fence) {
@@ -438,59 +437,25 @@ static void radeon_flip_work_func(struct work_struct *__work)
                work->fence = NULL;
        }
 
+       /* Wait until we're out of the vertical blank period before the one
+        * targeted by the flip
+        */
+       while (radeon_crtc->enabled &&
+              (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0,
+                                          &vpos, &hpos, NULL, NULL,
+                                          &crtc->hwmode)
+               & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+              (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+              (int)(work->target_vblank -
+                    dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)
+               usleep_range(1000, 2000);
+
        /* We borrow the event spin lock for protecting flip_status */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
        /* set the proper interrupt */
        radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
 
-       /* If this happens to execute within the "virtually extended" vblank
-        * interval before the start of the real vblank interval then it needs
-        * to delay programming the mmio flip until the real vblank is entered.
-        * This prevents completing a flip too early due to the way we fudge
-        * our vblank counter and vblank timestamps in order to work around the
-        * problem that the hw fires vblank interrupts before actual start of
-        * vblank (when line buffer refilling is done for a frame). It
-        * complements the fudging logic in radeon_get_crtc_scanoutpos() for
-        * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
-        *
-        * In practice this won't execute very often unless on very fast
-        * machines because the time window for this to happen is very small.
-        */
-       while (radeon_crtc->enabled && --repcnt) {
-               /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
-                * start in hpos, and to the "fudged earlier" vblank start in
-                * vpos.
-                */
-               stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
-                                                 GET_DISTANCE_TO_VBLANKSTART,
-                                                 &vpos, &hpos, NULL, NULL,
-                                                 &crtc->hwmode);
-
-               if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
-                   (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
-                   !(vpos >= 0 && hpos <= 0))
-                       break;
-
-               /* Sleep at least until estimated real start of hw vblank */
-               min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
-               if (min_udelay > vblank->framedur_ns / 2000) {
-                       /* Don't wait ridiculously long - something is wrong */
-                       repcnt = 0;
-                       break;
-               }
-               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-               usleep_range(min_udelay, 2 * min_udelay);
-               spin_lock_irqsave(&crtc->dev->event_lock, flags);
-       };
-
-       if (!repcnt)
-               DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
-                                "framedur %d, linedur %d, stat %d, vpos %d, "
-                                "hpos %d\n", work->crtc_id, min_udelay,
-                                vblank->framedur_ns / 1000,
-                                vblank->linedur_ns / 1000, stat, vpos, hpos);
-
        /* do the flip (mmio) */
        radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async);
 
@@ -499,10 +464,11 @@ static void radeon_flip_work_func(struct work_struct *__work)
        up_read(&rdev->exclusive_lock);
 }
 
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
-                                struct drm_framebuffer *fb,
-                                struct drm_pending_vblank_event *event,
-                                uint32_t page_flip_flags)
+static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
+                                       struct drm_framebuffer *fb,
+                                       struct drm_pending_vblank_event *event,
+                                       uint32_t page_flip_flags,
+                                       uint32_t target)
 {
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -599,12 +565,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
                base &= ~7;
        }
        work->base = base;
-
-       r = drm_crtc_vblank_get(crtc);
-       if (r) {
-               DRM_ERROR("failed to get vblank before flip\n");
-               goto pflip_cleanup;
-       }
+       work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+               dev->driver->get_vblank_counter(dev, work->crtc_id);
 
        /* We borrow the event spin lock for protecting flip_work */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -613,7 +575,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                r = -EBUSY;
-               goto vblank_cleanup;
+               goto pflip_cleanup;
        }
        radeon_crtc->flip_status = RADEON_FLIP_PENDING;
        radeon_crtc->flip_work = work;
@@ -626,9 +588,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        queue_work(radeon_crtc->flip_queue, &work->flip_work);
        return 0;
 
-vblank_cleanup:
-       drm_crtc_vblank_put(crtc);
-
 pflip_cleanup:
        if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
                DRM_ERROR("failed to reserve new rbo in error path\n");
@@ -697,7 +656,7 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = {
        .gamma_set = radeon_crtc_gamma_set,
        .set_config = radeon_crtc_set_config,
        .destroy = radeon_crtc_destroy,
-       .page_flip = radeon_crtc_page_flip,
+       .page_flip_target = radeon_crtc_page_flip_target,
 };
 
 static void radeon_crtc_init(struct drm_device *dev, int index)
index db64e0062689b076842b9710c33e4660c96e9985..2d465648856a03156c878993ab2cc24755aec74f 100644 (file)
@@ -164,7 +164,6 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
        }
 
        if (tmp & AUX_SW_RX_TIMEOUT) {
-               DRM_DEBUG_KMS("dp_aux_ch timed out\n");
                ret = -ETIMEDOUT;
                goto done;
        }
index c01a7c6abb491c4227ad02fd6525813a7a9d3d55..78367ba8bb7dc5b87aec81215793a9441e559c58 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/vga_switcheroo.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_fb_helper.h>
 
 #include "drm_crtc_helper.h"
 #include "radeon_kfd.h"
  *   2.44.0 - SET_APPEND_CNT packet3 support
  *   2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
+ *   2.47.0 - Add UVD_NO_OP register support
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       46
+#define KMS_DRIVER_MINOR       47
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -154,11 +156,6 @@ void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
                                    unsigned long arg);
 
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor);
-void radeon_debugfs_cleanup(struct drm_minor *minor);
-#endif
-
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
 void radeon_register_atpx_handler(void);
@@ -309,6 +306,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 
 static struct drm_driver kms_driver;
 
+bool radeon_device_is_virtual(void);
+
 static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 {
        struct apertures_struct *ap;
@@ -324,7 +323,7 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 #ifdef CONFIG_X86
        primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
-       remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
        kfree(ap);
 
        return 0;
@@ -362,6 +361,16 @@ radeon_pci_remove(struct pci_dev *pdev)
        drm_put_dev(dev);
 }
 
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+       /* if we are running in a VM, make sure the device
+        * torn down properly on reboot/shutdown
+        */
+       if (radeon_device_is_virtual())
+               radeon_pci_remove(pdev);
+}
+
 static int radeon_pmops_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -373,6 +382,14 @@ static int radeon_pmops_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       /* GPU comes up enabled by the bios on resume */
+       if (radeon_is_px(drm_dev)) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+       }
+
        return radeon_resume_kms(drm_dev, true, true);
 }
 
@@ -529,10 +546,6 @@ static struct drm_driver kms_driver = {
        .disable_vblank = radeon_disable_vblank_kms,
        .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
        .get_scanout_position = radeon_get_crtc_scanoutpos,
-#if defined(CONFIG_DEBUG_FS)
-       .debugfs_init = radeon_debugfs_init,
-       .debugfs_cleanup = radeon_debugfs_cleanup,
-#endif
        .irq_preinstall = radeon_driver_irq_preinstall_kms,
        .irq_postinstall = radeon_driver_irq_postinstall_kms,
        .irq_uninstall = radeon_driver_irq_uninstall_kms,
@@ -574,6 +587,7 @@ static struct pci_driver radeon_kms_pci_driver = {
        .id_table = pciidlist,
        .probe = radeon_pci_probe,
        .remove = radeon_pci_remove,
+       .shutdown = radeon_pci_shutdown,
        .driver.pm = &radeon_pm_ops,
 };
 
index 0e3143acb565dc11572b1f47b52c286bb1ec79b3..0daad446d2c754768f501604ff3bc8fe623731aa 100644 (file)
@@ -25,7 +25,7 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -47,8 +47,35 @@ struct radeon_fbdev {
        struct radeon_device *rdev;
 };
 
+static int
+radeonfb_open(struct fb_info *info, int user)
+{
+       struct radeon_fbdev *rfbdev = info->par;
+       struct radeon_device *rdev = rfbdev->rdev;
+       int ret = pm_runtime_get_sync(rdev->ddev->dev);
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_mark_last_busy(rdev->ddev->dev);
+               pm_runtime_put_autosuspend(rdev->ddev->dev);
+               return ret;
+       }
+       return 0;
+}
+
+static int
+radeonfb_release(struct fb_info *info, int user)
+{
+       struct radeon_fbdev *rfbdev = info->par;
+       struct radeon_device *rdev = rfbdev->rdev;
+
+       pm_runtime_mark_last_busy(rdev->ddev->dev);
+       pm_runtime_put_autosuspend(rdev->ddev->dev);
+       return 0;
+}
+
 static struct fb_ops radeonfb_ops = {
        .owner = THIS_MODULE,
+       .fb_open = radeonfb_open,
+       .fb_release = radeonfb_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
@@ -383,7 +410,7 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
 void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
 {
        if (rdev->mode_info.rfbdev)
-               fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+               drm_fb_helper_set_suspend(&rdev->mode_info.rfbdev->helper, state);
 }
 
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
index 9590bcd321c09a6cc5246dd147484f4767d7a3f8..021aa005623f804be130179398a617291032c92b 100644 (file)
@@ -938,10 +938,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
                         "Radeon i2c hw bus %s", name);
                i2c->adapter.algo = &radeon_i2c_algo;
                ret = i2c_add_adapter(&i2c->adapter);
-               if (ret) {
-                       DRM_ERROR("Failed to register hw i2c %s\n", name);
+               if (ret)
                        goto out_free;
-               }
        } else if (rec->hw_capable &&
                   radeon_hw_i2c &&
                   ASIC_IS_DCE3(rdev)) {
@@ -950,10 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
                         "Radeon i2c hw bus %s", name);
                i2c->adapter.algo = &radeon_atom_i2c_algo;
                ret = i2c_add_adapter(&i2c->adapter);
-               if (ret) {
-                       DRM_ERROR("Failed to register hw i2c %s\n", name);
+               if (ret)
                        goto out_free;
-               }
        } else {
                /* set the radeon bit adapter */
                snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
index 835563c1f0edf65e5fbf5545aa039738046d48f4..4388ddeec8d243928cbff7ecedbdfd5a78f80023 100644 (file)
@@ -641,11 +641,11 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        if (rdev->family >= CHIP_CAYMAN) {
                struct radeon_fpriv *fpriv;
                struct radeon_vm *vm;
-               int r;
 
                fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
                if (unlikely(!fpriv)) {
-                       return -ENOMEM;
+                       r = -ENOMEM;
+                       goto out_suspend;
                }
 
                if (rdev->accel_working) {
@@ -653,14 +653,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        r = radeon_vm_init(rdev, vm);
                        if (r) {
                                kfree(fpriv);
-                               return r;
+                               goto out_suspend;
                        }
 
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
                                radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
-                               return r;
+                               goto out_suspend;
                        }
 
                        /* map the ib pool buffer read only into
@@ -674,15 +674,16 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        if (r) {
                                radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
-                               return r;
+                               goto out_suspend;
                        }
                }
                file_priv->driver_priv = fpriv;
        }
 
+out_suspend:
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
-       return 0;
+       return r;
 }
 
 /**
@@ -717,6 +718,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 }
 
 /**
@@ -733,6 +736,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
 {
        struct radeon_device *rdev = dev->dev_private;
 
+       pm_runtime_get_sync(dev->dev);
+
        mutex_lock(&rdev->gem.mutex);
        if (rdev->hyperz_filp == file_priv)
                rdev->hyperz_filp = NULL;
index ffdad81ef9647cbc96612e3d0dc4d41b85009dff..455268214b893eac36e8bbd65d5e2b18d2735483 100644 (file)
@@ -237,7 +237,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 
        if (radeon_ttm_tt_has_userptr(bo->ttm))
                return -EPERM;
-       return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+       return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+                                         filp->private_data);
 }
 
 static void radeon_move_null(struct ttm_buffer_object *bo,
@@ -263,8 +264,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
 
        rdev = radeon_get_rdev(bo->bdev);
        ridx = radeon_copy_ring_index(rdev);
-       old_start = old_mem->start << PAGE_SHIFT;
-       new_start = new_mem->start << PAGE_SHIFT;
+       old_start = (u64)old_mem->start << PAGE_SHIFT;
+       new_start = (u64)new_mem->start << PAGE_SHIFT;
 
        switch (old_mem->mem_type) {
        case TTM_PL_VRAM:
@@ -346,7 +347,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
        ttm_bo_mem_put(bo, &tmp_mem);
        return r;
@@ -379,7 +380,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -444,8 +445,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, interruptible,
-                                      no_wait_gpu, new_mem);
+               r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
                if (r) {
                        return r;
                }
index 73dfe01435eaedcf0831ce8eb412057fb6f5ac64..0cd0e7bdee5594dbc30f375dacc773d84acfeb29 100644 (file)
@@ -669,6 +669,7 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
                                return r;
                        break;
                case UVD_ENGINE_CNTL:
+               case UVD_NO_OP:
                        break;
                default:
                        DRM_ERROR("Invalid reg 0x%X!\n",
@@ -753,8 +754,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
        ib.ptr[3] = addr >> 32;
        ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
        ib.ptr[5] = 0;
-       for (i = 6; i < 16; ++i)
-               ib.ptr[i] = PACKET2(0);
+       for (i = 6; i < 16; i += 2) {
+               ib.ptr[i] = PACKET0(UVD_NO_OP, 0);
+               ib.ptr[i+1] = 0;
+       }
        ib.length_dw = 16;
 
        r = radeon_ib_schedule(rdev, &ib, NULL, false);
index c55d653aaf5f6bcfb0b80d24ad1c42025d36e267..76c55c5d11ec62c8630d7162a7fc5d72e49df6a0 100644 (file)
@@ -406,9 +406,8 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
        for (i = 0; i < rdev->num_crtc; i++) {
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if ((tmp & 0x7) != 3) {
+                       if ((tmp & 0x7) != 0) {
                                tmp &= ~0x7;
-                               tmp |= 0x3;
                                WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
index 1c120a4c3c9781f6f2892318cf1a968197c51c0b..729ae588c970d4861c1b748d72a451c33eb2187a 100644 (file)
@@ -1738,7 +1738,7 @@ static void rv770_uvd_resume(struct radeon_device *rdev)
                return;
 
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
        if (r) {
                dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
                return;
index 9ef2064b1c9cdfc0392c6154258366387a1bf95a..0271f4c559aecf7b24283756e3cd42d702b1adfa 100644 (file)
 #define UVD_UDEC_TILING_CONFIG                          0xef40
 #define UVD_UDEC_DB_TILING_CONFIG                       0xef44
 #define UVD_UDEC_DBW_TILING_CONFIG                      0xef48
+#define UVD_NO_OP                                      0xeffc
 
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
index 2523ca96c6c7d5e36fd9633cab723f2656d241fb..7ee9aafbdf744bc0a97f358767e5b2178f24c41a 100644 (file)
@@ -1547,7 +1547,7 @@ int si_mc_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data = NULL;
        const __le32 *new_fw_data = NULL;
-       u32 running, blackout = 0;
+       u32 running;
        u32 *io_mc_regs = NULL;
        const __le32 *new_io_mc_regs = NULL;
        int i, regs_size, ucode_size;
@@ -1598,11 +1598,6 @@ int si_mc_load_microcode(struct radeon_device *rdev)
        running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
        if (running == 0) {
-               if (running) {
-                       blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
-                       WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
-               }
-
                /* reset the engine and set to writable */
                WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
                WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -1641,9 +1636,6 @@ int si_mc_load_microcode(struct radeon_device *rdev)
                                break;
                        udelay(1);
                }
-
-               if (running)
-                       WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
        }
 
        return 0;
@@ -6928,7 +6920,7 @@ static void si_uvd_resume(struct radeon_device *rdev)
                return;
 
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
        if (r) {
                dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
                return;
index d1a7b58dd291f8798666276acded0290aae805e2..eb220eecba789aaf40ced21852a8d98441e55a3b 100644 (file)
 #define UVD_UDEC_ADDR_CONFIG                           0xEF4C
 #define UVD_UDEC_DB_ADDR_CONFIG                                0xEF50
 #define UVD_UDEC_DBW_ADDR_CONFIG                       0xEF54
+#define UVD_NO_OP                                      0xEFFC
 #define UVD_RBC_RB_RPTR                                        0xF690
 #define UVD_RBC_RB_WPTR                                        0xF694
 #define UVD_STATUS                                     0xf6bc
index 899ef7a2a7b4eaff0748c41e7a8e03cee201a2ee..73c971e39b1c723f5e712dd85eb491a85ced2ac4 100644 (file)
@@ -316,8 +316,8 @@ static int rcar_du_probe(struct platform_device *pdev)
        rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
 
        ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
-       if (!ddev)
-               return -ENOMEM;
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
 
        rcdu->ddev = ddev;
        ddev->dev_private = rcdu;
index 4de3ff0dbebd8cba5bf4a761ae9d97bb37f86c00..e03004f4588deb2bb360fa78b9d0540b6eb43ca4 100644 (file)
@@ -125,6 +125,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
 
        /* Link drm_bridge to encoder */
        bridge->encoder = encoder;
+       encoder->bridge = bridge;
 
        ret = drm_bridge_attach(rcdu->ddev, bridge);
        if (ret) {
index f03eb55318c1cfb3fc3914fd9184f251f245bd41..bd9c3bb9252c68520af8233412c69253bfd04838 100644 (file)
@@ -257,7 +257,8 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
        /* Apply the atomic update. */
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
-       drm_atomic_helper_commit_planes(dev, old_state, true);
+       drm_atomic_helper_commit_planes(dev, old_state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
        drm_atomic_helper_wait_for_vblanks(dev, old_state);
 
index 05d07138a2b2a144da5361d26321c8dc2b50eae7..9746365694ba6ed050fb7901686b2484d664bf94 100644 (file)
@@ -3,7 +3,7 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
-               rockchip_drm_gem.o rockchip_drm_vop.o
+               rockchip_drm_gem.o rockchip_drm_psr.o rockchip_drm_vop.o
 rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
 
 obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
index 89aadbf465f8563cdefb8174ff5cfca608876d09..e83be157cc2a5fd049b440a7cec830867096ccb9 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/bridge/analogix_dp.h>
 
 #include "rockchip_drm_drv.h"
+#include "rockchip_drm_psr.h"
 #include "rockchip_drm_vop.h"
 
 #define RK3288_GRF_SOC_CON6            0x25c
@@ -41,6 +42,8 @@
 
 #define HIWORD_UPDATE(val, mask)       (val | (mask) << 16)
 
+#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS  100
+
 #define to_dp(nm)      container_of(nm, struct rockchip_dp_device, nm)
 
 /**
@@ -68,11 +71,62 @@ struct rockchip_dp_device {
        struct regmap            *grf;
        struct reset_control     *rst;
 
+       struct work_struct       psr_work;
+       spinlock_t               psr_lock;
+       unsigned int             psr_state;
+
        const struct rockchip_dp_chip_data *data;
 
        struct analogix_dp_plat_data plat_data;
 };
 
+static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
+{
+       struct rockchip_dp_device *dp = to_dp(encoder);
+       unsigned long flags;
+
+       dev_dbg(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
+
+       spin_lock_irqsave(&dp->psr_lock, flags);
+       if (enabled)
+               dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE;
+       else
+               dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
+
+       schedule_work(&dp->psr_work);
+       spin_unlock_irqrestore(&dp->psr_lock, flags);
+}
+
+static void analogix_dp_psr_work(struct work_struct *work)
+{
+       struct rockchip_dp_device *dp =
+                               container_of(work, typeof(*dp), psr_work);
+       struct drm_crtc *crtc = dp->encoder.crtc;
+       int psr_state = dp->psr_state;
+       int vact_end;
+       int ret;
+       unsigned long flags;
+
+       if (!crtc)
+               return;
+
+       vact_end = crtc->mode.vtotal - crtc->mode.vsync_start + crtc->mode.vdisplay;
+
+       ret = rockchip_drm_wait_line_flag(dp->encoder.crtc, vact_end,
+                                         PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
+       if (ret) {
+               dev_err(dp->dev, "line flag interrupt did not arrive\n");
+               return;
+       }
+
+       spin_lock_irqsave(&dp->psr_lock, flags);
+       if (psr_state == EDP_VSC_PSR_STATE_ACTIVE)
+               analogix_dp_enable_psr(dp->dev);
+       else
+               analogix_dp_disable_psr(dp->dev);
+       spin_unlock_irqrestore(&dp->psr_lock, flags);
+}
+
 static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
 {
        reset_control_assert(dp->rst);
@@ -87,6 +141,8 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
        struct rockchip_dp_device *dp = to_dp(plat_data);
        int ret;
 
+       cancel_work_sync(&dp->psr_work);
+
        ret = clk_prepare_enable(dp->pclk);
        if (ret < 0) {
                dev_err(dp->dev, "failed to enable pclk %d\n", ret);
@@ -342,12 +398,22 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
        dp->plat_data.power_off = rockchip_dp_powerdown;
        dp->plat_data.get_modes = rockchip_dp_get_modes;
 
+       spin_lock_init(&dp->psr_lock);
+       dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
+       INIT_WORK(&dp->psr_work, analogix_dp_psr_work);
+
+       rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
+
        return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
 }
 
 static void rockchip_dp_unbind(struct device *dev, struct device *master,
                               void *data)
 {
+       struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+
+       rockchip_drm_psr_unregister(&dp->encoder);
+
        return analogix_dp_unbind(dev, master, data);
 }
 
@@ -381,10 +447,8 @@ static int rockchip_dp_probe(struct platform_device *pdev)
 
                panel = of_drm_find_panel(panel_node);
                of_node_put(panel_node);
-               if (!panel) {
-                       DRM_ERROR("failed to find panel\n");
+               if (!panel)
                        return -EPROBE_DEFER;
-               }
        }
 
        dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
@@ -445,7 +509,6 @@ static struct platform_driver rockchip_dp_driver = {
        .remove = rockchip_dp_remove,
        .driver = {
                   .name = "rockchip-dp",
-                  .owner = THIS_MODULE,
                   .pm = &rockchip_dp_pm_ops,
                   .of_match_table = of_match_ptr(rockchip_dp_dt_ids),
        },
index a822d49a255ad898e57cafdd8a7d9be5aae68c5e..446b5d7e85f705404e73e8262c8c9d21f505f827 100644 (file)
@@ -143,8 +143,8 @@ static int rockchip_drm_bind(struct device *dev)
        int ret;
 
        drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
-       if (!drm_dev)
-               return -ENOMEM;
+       if (IS_ERR(drm_dev))
+               return PTR_ERR(drm_dev);
 
        dev_set_drvdata(dev, drm_dev);
 
@@ -156,6 +156,9 @@ static int rockchip_drm_bind(struct device *dev)
 
        drm_dev->dev_private = private;
 
+       INIT_LIST_HEAD(&private->psr_list);
+       spin_lock_init(&private->psr_list_lock);
+
        drm_mode_config_init(drm_dev);
 
        rockchip_drm_mode_config_init(drm_dev);
index ea393294006151f6ffb6aa816a5b802879168e21..fb6226cf84b7ab27dd053125cfbc6a37845cbe7d 100644 (file)
@@ -39,7 +39,6 @@ struct drm_connector;
 struct rockchip_crtc_funcs {
        int (*enable_vblank)(struct drm_crtc *crtc);
        void (*disable_vblank)(struct drm_crtc *crtc);
-       void (*wait_for_update)(struct drm_crtc *crtc);
 };
 
 struct rockchip_crtc_state {
@@ -61,6 +60,9 @@ struct rockchip_drm_private {
        struct drm_gem_object *fbdev_bo;
        const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
        struct drm_atomic_state *state;
+
+       struct list_head psr_list;
+       spinlock_t psr_list_lock;
 };
 
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
@@ -70,4 +72,7 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
                                   struct device *dev);
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
                                    struct device *dev);
+int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
+                               unsigned int mstimeout);
+
 #endif /* _ROCKCHIP_DRM_DRV_H_ */
index 55c52734c52d3f63895f1cac04c639df0a41f503..0f6eda023bd0cc7eb4611f2ec259172cac8dd14e 100644 (file)
@@ -22,6 +22,7 @@
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_fb.h"
 #include "rockchip_drm_gem.h"
+#include "rockchip_drm_psr.h"
 
 #define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
 
@@ -63,9 +64,20 @@ static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
                                     rockchip_fb->obj[0], handle);
 }
 
+static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
+                                struct drm_file *file,
+                                unsigned int flags, unsigned int color,
+                                struct drm_clip_rect *clips,
+                                unsigned int num_clips)
+{
+       rockchip_drm_psr_flush_all(fb->dev);
+       return 0;
+}
+
 static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
        .destroy        = rockchip_drm_fb_destroy,
        .create_handle  = rockchip_drm_fb_create_handle,
+       .dirty          = rockchip_drm_fb_dirty,
 };
 
 static struct rockchip_drm_fb *
@@ -162,68 +174,6 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
                drm_fb_helper_hotplug_event(fb_helper);
 }
 
-static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc)
-{
-       struct rockchip_drm_private *priv = crtc->dev->dev_private;
-       int pipe = drm_crtc_index(crtc);
-       const struct rockchip_crtc_funcs *crtc_funcs = priv->crtc_funcs[pipe];
-
-       if (crtc_funcs && crtc_funcs->wait_for_update)
-               crtc_funcs->wait_for_update(crtc);
-}
-
-/*
- * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066
- * have hardware counters for neither vblanks nor scanlines, which results in
- * a race where:
- *                             | <-- HW vsync irq and reg take effect
- *            plane_commit --> |
- *     get_vblank and wait --> |
- *                             | <-- handle_vblank, vblank->count + 1
- *              cleanup_fb --> |
- *             iommu crash --> |
- *                             | <-- HW vsync irq and reg take effect
- *
- * This function is equivalent but uses rockchip_crtc_wait_for_update() instead
- * of waiting for vblank_count to change.
- */
-static void
-rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state)
-{
-       struct drm_crtc_state *old_crtc_state;
-       struct drm_crtc *crtc;
-       int i, ret;
-
-       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-               /* No one cares about the old state, so abuse it for tracking
-                * and store whether we hold a vblank reference (and should do a
-                * vblank wait) in the ->enable boolean.
-                */
-               old_crtc_state->enable = false;
-
-               if (!crtc->state->active)
-                       continue;
-
-               if (!drm_atomic_helper_framebuffer_changed(dev,
-                               old_state, crtc))
-                       continue;
-
-               ret = drm_crtc_vblank_get(crtc);
-               if (ret != 0)
-                       continue;
-
-               old_crtc_state->enable = true;
-       }
-
-       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-               if (!old_crtc_state->enable)
-                       continue;
-
-               rockchip_crtc_wait_for_update(crtc);
-               drm_crtc_vblank_put(crtc);
-       }
-}
-
 static void
 rockchip_atomic_commit_tail(struct drm_atomic_state *state)
 {
@@ -233,11 +183,12 @@ rockchip_atomic_commit_tail(struct drm_atomic_state *state)
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
-       drm_atomic_helper_commit_planes(dev, state, true);
+       drm_atomic_helper_commit_planes(dev, state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
        drm_atomic_helper_commit_hw_done(state);
 
-       rockchip_atomic_wait_for_complete(dev, state);
+       drm_atomic_helper_wait_for_vblanks(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
 }
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
new file mode 100644 (file)
index 0000000..a553e18
--- /dev/null
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_psr.h"
+
+#define PSR_FLUSH_TIMEOUT      msecs_to_jiffies(100)
+
+enum psr_state {
+       PSR_FLUSH,
+       PSR_ENABLE,
+       PSR_DISABLE,
+};
+
+struct psr_drv {
+       struct list_head        list;
+       struct drm_encoder      *encoder;
+
+       spinlock_t              lock;
+       bool                    active;
+       enum psr_state          state;
+
+       struct timer_list       flush_timer;
+
+       void (*set)(struct drm_encoder *encoder, bool enable);
+};
+
+static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
+{
+       struct rockchip_drm_private *drm_drv = crtc->dev->dev_private;
+       struct psr_drv *psr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+       list_for_each_entry(psr, &drm_drv->psr_list, list) {
+               if (psr->encoder->crtc == crtc)
+                       goto out;
+       }
+       psr = ERR_PTR(-ENODEV);
+
+out:
+       spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+       return psr;
+}
+
+static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state)
+{
+       /*
+        * Allowed finite state machine:
+        *
+        *   PSR_ENABLE  < = = = = = >  PSR_FLUSH
+        *       | ^                        |
+        *       | |                        |
+        *       v |                        |
+        *   PSR_DISABLE < - - - - - - - - -
+        */
+       if (state == psr->state || !psr->active)
+               return;
+
+       /* Already disabled in flush, change the state, but not the hardware */
+       if (state == PSR_DISABLE && psr->state == PSR_FLUSH) {
+               psr->state = state;
+               return;
+       }
+
+       psr->state = state;
+
+       /* Actually commit the state change to hardware */
+       switch (psr->state) {
+       case PSR_ENABLE:
+               psr->set(psr->encoder, true);
+               break;
+
+       case PSR_DISABLE:
+       case PSR_FLUSH:
+               psr->set(psr->encoder, false);
+               break;
+       }
+}
+
+static void psr_set_state(struct psr_drv *psr, enum psr_state state)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&psr->lock, flags);
+       psr_set_state_locked(psr, state);
+       spin_unlock_irqrestore(&psr->lock, flags);
+}
+
+static void psr_flush_handler(unsigned long data)
+{
+       struct psr_drv *psr = (struct psr_drv *)data;
+       unsigned long flags;
+
+       /* If the state has changed since we initiated the flush, do nothing */
+       spin_lock_irqsave(&psr->lock, flags);
+       if (psr->state == PSR_FLUSH)
+               psr_set_state_locked(psr, PSR_ENABLE);
+       spin_unlock_irqrestore(&psr->lock, flags);
+}
+
+/**
+ * rockchip_drm_psr_activate - activate PSR on the given pipe
+ * @crtc: CRTC to obtain the PSR encoder
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_activate(struct drm_crtc *crtc)
+{
+       struct psr_drv *psr = find_psr_by_crtc(crtc);
+       unsigned long flags;
+
+       if (IS_ERR(psr))
+               return PTR_ERR(psr);
+
+       spin_lock_irqsave(&psr->lock, flags);
+       psr->active = true;
+       spin_unlock_irqrestore(&psr->lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_activate);
+
+/**
+ * rockchip_drm_psr_deactivate - deactivate PSR on the given pipe
+ * @crtc: CRTC to obtain the PSR encoder
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_deactivate(struct drm_crtc *crtc)
+{
+       struct psr_drv *psr = find_psr_by_crtc(crtc);
+       unsigned long flags;
+
+       if (IS_ERR(psr))
+               return PTR_ERR(psr);
+
+       spin_lock_irqsave(&psr->lock, flags);
+       psr->active = false;
+       spin_unlock_irqrestore(&psr->lock, flags);
+       del_timer_sync(&psr->flush_timer);
+
+       return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_deactivate);
+
+static void rockchip_drm_do_flush(struct psr_drv *psr)
+{
+       mod_timer(&psr->flush_timer,
+                 round_jiffies_up(jiffies + PSR_FLUSH_TIMEOUT));
+       psr_set_state(psr, PSR_FLUSH);
+}
+
+/**
+ * rockchip_drm_psr_flush - flush a single pipe
+ * @crtc: CRTC of the pipe to flush
+ *
+ * Returns:
+ * 0 on success, -errno on fail
+ */
+int rockchip_drm_psr_flush(struct drm_crtc *crtc)
+{
+       struct psr_drv *psr = find_psr_by_crtc(crtc);
+       if (IS_ERR(psr))
+               return PTR_ERR(psr);
+
+       rockchip_drm_do_flush(psr);
+       return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_flush);
+
+/**
+ * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders
+ * @dev: drm device
+ *
+ * Disable the PSR function for all registered encoders, and then enable the
+ * PSR function back after PSR_FLUSH_TIMEOUT. If encoder PSR state have been
+ * changed during flush time, then keep the state no change after flush
+ * timeout.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void rockchip_drm_psr_flush_all(struct drm_device *dev)
+{
+       struct rockchip_drm_private *drm_drv = dev->dev_private;
+       struct psr_drv *psr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+       list_for_each_entry(psr, &drm_drv->psr_list, list)
+               rockchip_drm_do_flush(psr);
+       spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
+
+/**
+ * rockchip_drm_psr_register - register encoder to psr driver
+ * @encoder: encoder that obtain the PSR function
+ * @psr_set: call back to set PSR state
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_register(struct drm_encoder *encoder,
+                       void (*psr_set)(struct drm_encoder *, bool enable))
+{
+       struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+       struct psr_drv *psr;
+       unsigned long flags;
+
+       if (!encoder || !psr_set)
+               return -EINVAL;
+
+       psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
+       if (!psr)
+               return -ENOMEM;
+
+       setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+       spin_lock_init(&psr->lock);
+
+       psr->active = true;
+       psr->state = PSR_DISABLE;
+       psr->encoder = encoder;
+       psr->set = psr_set;
+
+       spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+       list_add_tail(&psr->list, &drm_drv->psr_list);
+       spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_register);
+
+/**
+ * rockchip_drm_psr_unregister - unregister encoder to psr driver
+ * @encoder: encoder that obtain the PSR function
+ * @psr_set: call back to set PSR state
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
+{
+       struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+       struct psr_drv *psr, *n;
+       unsigned long flags;
+
+       spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+       list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
+               if (psr->encoder == encoder) {
+                       del_timer(&psr->flush_timer);
+                       list_del(&psr->list);
+                       kfree(psr);
+               }
+       }
+       spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_unregister);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
new file mode 100644 (file)
index 0000000..b420cf1
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ROCKCHIP_DRM_PSR___
+#define __ROCKCHIP_DRM_PSR___
+
+void rockchip_drm_psr_flush_all(struct drm_device *dev);
+int rockchip_drm_psr_flush(struct drm_crtc *crtc);
+
+int rockchip_drm_psr_activate(struct drm_crtc *crtc);
+int rockchip_drm_psr_deactivate(struct drm_crtc *crtc);
+
+int rockchip_drm_psr_register(struct drm_encoder *encoder,
+                       void (*psr_set)(struct drm_encoder *, bool enable));
+void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
+
+#endif /* __ROCKCHIP_DRM_PSR__ */
index 91305eb7d312ade85f05e3ce303c4136c48c3ccc..c7eba305c48830b225d3927185ad849dac193d07 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
 #include <drm/drm_plane_helper.h>
 
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/iopoll.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
 #include "rockchip_drm_fb.h"
+#include "rockchip_drm_psr.h"
 #include "rockchip_drm_vop.h"
 
-#define __REG_SET_RELAXED(x, off, mask, shift, v) \
-               vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
-#define __REG_SET_NORMAL(x, off, mask, shift, v) \
-               vop_mask_write(x, off, (mask) << shift, (v) << shift)
+#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
+               vop_mask_write(x, off, mask, shift, v, write_mask, true)
+
+#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
+               vop_mask_write(x, off, mask, shift, v, write_mask, false)
 
 #define REG_SET(x, base, reg, v, mode) \
-               __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+               __REG_SET_##mode(x, base + reg.offset, \
+                                reg.mask, reg.shift, v, reg.write_mask)
 #define REG_SET_MASK(x, base, reg, mask, v, mode) \
-               __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
+               __REG_SET_##mode(x, base + reg.offset, \
+                                mask, reg.shift, v, reg.write_mask)
 
 #define VOP_WIN_SET(x, win, name, v) \
                REG_SET(x, win->base, win->phy->name, v, RELAXED)
 
 #define to_vop(x) container_of(x, struct vop, crtc)
 #define to_vop_win(x) container_of(x, struct vop_win, base)
-#define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
 
-struct vop_plane_state {
-       struct drm_plane_state base;
-       int format;
-       struct drm_rect src;
-       struct drm_rect dest;
-       dma_addr_t yrgb_mst;
-       bool enable;
+enum vop_pending {
+       VOP_PENDING_FB_UNREF,
 };
 
 struct vop_win {
        struct drm_plane base;
        const struct vop_win_data *data;
        struct vop *vop;
-
-       /* protected by dev->event_lock */
-       bool enable;
-       dma_addr_t yrgb_mst;
 };
 
 struct vop {
@@ -113,11 +109,15 @@ struct vop {
        struct mutex vsync_mutex;
        bool vsync_work_pending;
        struct completion dsp_hold_completion;
-       struct completion wait_update_complete;
 
        /* protected by dev->event_lock */
        struct drm_pending_vblank_event *event;
 
+       struct drm_flip_work fb_unref_work;
+       unsigned long pending;
+
+       struct completion line_flag_completion;
+
        const struct vop_data *data;
 
        uint32_t *regsbak;
@@ -164,27 +164,25 @@ static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
 }
 
 static inline void vop_mask_write(struct vop *vop, uint32_t offset,
-                                 uint32_t mask, uint32_t v)
+                                 uint32_t mask, uint32_t shift, uint32_t v,
+                                 bool write_mask, bool relaxed)
 {
-       if (mask) {
-               uint32_t cached_val = vop->regsbak[offset >> 2];
-
-               cached_val = (cached_val & ~mask) | v;
-               writel(cached_val, vop->regs + offset);
-               vop->regsbak[offset >> 2] = cached_val;
-       }
-}
+       if (!mask)
+               return;
 
-static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
-                                         uint32_t mask, uint32_t v)
-{
-       if (mask) {
+       if (write_mask) {
+               v = ((v << shift) & 0xffff) | (mask << (shift + 16));
+       } else {
                uint32_t cached_val = vop->regsbak[offset >> 2];
 
-               cached_val = (cached_val & ~mask) | v;
-               writel_relaxed(cached_val, vop->regs + offset);
-               vop->regsbak[offset >> 2] = cached_val;
+               v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
+               vop->regsbak[offset >> 2] = v;
        }
+
+       if (relaxed)
+               writel_relaxed(v, vop->regs + offset);
+       else
+               writel(v, vop->regs + offset);
 }
 
 static inline uint32_t vop_get_intr_type(struct vop *vop,
@@ -240,7 +238,7 @@ static enum vop_data_format vop_convert_format(uint32_t format)
        case DRM_FORMAT_NV24:
                return VOP_FMT_YUV444SP;
        default:
-               DRM_ERROR("unsupport format[%08x]\n", format);
+               DRM_ERROR("unsupported format[%08x]\n", format);
                return -EINVAL;
        }
 }
@@ -317,7 +315,7 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
        int vskiplines = 0;
 
        if (dst_w > 3840) {
-               DRM_ERROR("Maximum destination width (3840) exceeded\n");
+               DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
                return;
        }
 
@@ -355,11 +353,11 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
        VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
        if (lb_mode == LB_RGB_3840X2) {
                if (yrgb_ver_scl_mode != SCALE_NONE) {
-                       DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+                       DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
                        return;
                }
                if (cbcr_ver_scl_mode != SCALE_NONE) {
-                       DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+                       DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
                        return;
                }
                vsu_mode = SCALE_UP_BIL;
@@ -411,6 +409,7 @@ static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
 
        spin_lock_irqsave(&vop->irq_lock, flags);
 
+       VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
        VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
 
        spin_unlock_irqrestore(&vop->irq_lock, flags);
@@ -430,7 +429,73 @@ static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
        spin_unlock_irqrestore(&vop->irq_lock, flags);
 }
 
-static void vop_enable(struct drm_crtc *crtc)
+/*
+ * (1) each frame starts at the start of the Vsync pulse which is signaled by
+ *     the "FRAME_SYNC" interrupt.
+ * (2) the active data region of each frame ends at dsp_vact_end
+ * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
+ *      to get "LINE_FLAG" interrupt at the end of the active on screen data.
+ *
+ * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
+ * Interrupts
+ * LINE_FLAG -------------------------------+
+ * FRAME_SYNC ----+                         |
+ *                |                         |
+ *                v                         v
+ *                | Vsync | Vbp |  Vactive  | Vfp |
+ *                        ^     ^           ^     ^
+ *                        |     |           |     |
+ *                        |     |           |     |
+ * dsp_vs_end ------------+     |           |     |   VOP_DSP_VTOTAL_VS_END
+ * dsp_vact_start --------------+           |     |   VOP_DSP_VACT_ST_END
+ * dsp_vact_end ----------------------------+     |   VOP_DSP_VACT_ST_END
+ * dsp_total -------------------------------------+   VOP_DSP_VTOTAL_VS_END
+ */
+static bool vop_line_flag_irq_is_enabled(struct vop *vop)
+{
+       uint32_t line_flag_irq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vop->irq_lock, flags);
+
+       line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
+
+       spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+       return !!line_flag_irq;
+}
+
+static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
+{
+       unsigned long flags;
+
+       if (WARN_ON(!vop->is_enabled))
+               return;
+
+       spin_lock_irqsave(&vop->irq_lock, flags);
+
+       VOP_CTRL_SET(vop, line_flag_num[0], line_num);
+       VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
+       VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
+
+       spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static void vop_line_flag_irq_disable(struct vop *vop)
+{
+       unsigned long flags;
+
+       if (WARN_ON(!vop->is_enabled))
+               return;
+
+       spin_lock_irqsave(&vop->irq_lock, flags);
+
+       VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
+
+       spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
        int ret;
@@ -438,26 +503,20 @@ static void vop_enable(struct drm_crtc *crtc)
        ret = pm_runtime_get_sync(vop->dev);
        if (ret < 0) {
                dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
-               return;
+               goto err_put_pm_runtime;
        }
 
        ret = clk_enable(vop->hclk);
-       if (ret < 0) {
-               dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
-               return;
-       }
+       if (WARN_ON(ret < 0))
+               goto err_put_pm_runtime;
 
        ret = clk_enable(vop->dclk);
-       if (ret < 0) {
-               dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+       if (WARN_ON(ret < 0))
                goto err_disable_hclk;
-       }
 
        ret = clk_enable(vop->aclk);
-       if (ret < 0) {
-               dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
+       if (WARN_ON(ret < 0))
                goto err_disable_dclk;
-       }
 
        /*
         * Slave iommu shares power, irq and clock with vop.  It was associated
@@ -487,7 +546,7 @@ static void vop_enable(struct drm_crtc *crtc)
 
        drm_crtc_vblank_on(crtc);
 
-       return;
+       return 0;
 
 err_disable_aclk:
        clk_disable(vop->aclk);
@@ -495,6 +554,9 @@ err_disable_dclk:
        clk_disable(vop->dclk);
 err_disable_hclk:
        clk_disable(vop->hclk);
+err_put_pm_runtime:
+       pm_runtime_put_sync(vop->dev);
+       return ret;
 }
 
 static void vop_crtc_disable(struct drm_crtc *crtc)
@@ -504,6 +566,8 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
 
        WARN_ON(vop->event);
 
+       rockchip_drm_psr_deactivate(&vop->crtc);
+
        /*
         * We need to make sure that all windows are disabled before we
         * disable that crtc. Otherwise we might try to scan from a destroyed
@@ -568,22 +632,6 @@ static void vop_plane_destroy(struct drm_plane *plane)
        drm_plane_cleanup(plane);
 }
 
-static int vop_plane_prepare_fb(struct drm_plane *plane,
-                               const struct drm_plane_state *new_state)
-{
-       if (plane->state->fb)
-               drm_framebuffer_reference(plane->state->fb);
-
-       return 0;
-}
-
-static void vop_plane_cleanup_fb(struct drm_plane *plane,
-                                const struct drm_plane_state *old_state)
-{
-       if (old_state->fb)
-               drm_framebuffer_unreference(old_state->fb);
-}
-
 static int vop_plane_atomic_check(struct drm_plane *plane,
                           struct drm_plane_state *state)
 {
@@ -591,12 +639,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
        struct drm_crtc_state *crtc_state;
        struct drm_framebuffer *fb = state->fb;
        struct vop_win *vop_win = to_vop_win(plane);
-       struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
        const struct vop_win_data *win = vop_win->data;
-       bool visible;
        int ret;
-       struct drm_rect *dest = &vop_plane_state->dest;
-       struct drm_rect *src = &vop_plane_state->src;
        struct drm_rect clip;
        int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
                                        DRM_PLANE_HELPER_NO_SCALING;
@@ -604,62 +648,43 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
                                        DRM_PLANE_HELPER_NO_SCALING;
 
        if (!crtc || !fb)
-               goto out_disable;
+               return 0;
 
        crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
        if (WARN_ON(!crtc_state))
                return -EINVAL;
 
-       src->x1 = state->src_x;
-       src->y1 = state->src_y;
-       src->x2 = state->src_x + state->src_w;
-       src->y2 = state->src_y + state->src_h;
-       dest->x1 = state->crtc_x;
-       dest->y1 = state->crtc_y;
-       dest->x2 = state->crtc_x + state->crtc_w;
-       dest->y2 = state->crtc_y + state->crtc_h;
-
        clip.x1 = 0;
        clip.y1 = 0;
        clip.x2 = crtc_state->adjusted_mode.hdisplay;
        clip.y2 = crtc_state->adjusted_mode.vdisplay;
 
-       ret = drm_plane_helper_check_update(plane, crtc, state->fb,
-                                           src, dest, &clip,
-                                           state->rotation,
-                                           min_scale,
-                                           max_scale,
-                                           true, true, &visible);
+       ret = drm_plane_helper_check_state(state, &clip,
+                                          min_scale, max_scale,
+                                          true, true);
        if (ret)
                return ret;
 
-       if (!visible)
-               goto out_disable;
+       if (!state->visible)
+               return 0;
 
-       vop_plane_state->format = vop_convert_format(fb->pixel_format);
-       if (vop_plane_state->format < 0)
-               return vop_plane_state->format;
+       ret = vop_convert_format(fb->pixel_format);
+       if (ret < 0)
+               return ret;
 
        /*
         * Src.x1 can be odd when do clip, but yuv plane start point
         * need align with 2 pixel.
         */
-       if (is_yuv_support(fb->pixel_format) && ((src->x1 >> 16) % 2))
+       if (is_yuv_support(fb->pixel_format) && ((state->src.x1 >> 16) % 2))
                return -EINVAL;
 
-       vop_plane_state->enable = true;
-
-       return 0;
-
-out_disable:
-       vop_plane_state->enable = false;
        return 0;
 }
 
 static void vop_plane_atomic_disable(struct drm_plane *plane,
                                     struct drm_plane_state *old_state)
 {
-       struct vop_plane_state *vop_plane_state = to_vop_plane_state(old_state);
        struct vop_win *vop_win = to_vop_win(plane);
        const struct vop_win_data *win = vop_win->data;
        struct vop *vop = to_vop(old_state->crtc);
@@ -667,18 +692,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
        if (!old_state->crtc)
                return;
 
-       spin_lock_irq(&plane->dev->event_lock);
-       vop_win->enable = false;
-       vop_win->yrgb_mst = 0;
-       spin_unlock_irq(&plane->dev->event_lock);
-
        spin_lock(&vop->reg_lock);
 
        VOP_WIN_SET(vop, win, enable, 0);
 
        spin_unlock(&vop->reg_lock);
-
-       vop_plane_state->enable = false;
 }
 
 static void vop_plane_atomic_update(struct drm_plane *plane,
@@ -687,21 +705,21 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        struct drm_plane_state *state = plane->state;
        struct drm_crtc *crtc = state->crtc;
        struct vop_win *vop_win = to_vop_win(plane);
-       struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
        const struct vop_win_data *win = vop_win->data;
        struct vop *vop = to_vop(state->crtc);
        struct drm_framebuffer *fb = state->fb;
        unsigned int actual_w, actual_h;
        unsigned int dsp_stx, dsp_sty;
        uint32_t act_info, dsp_info, dsp_st;
-       struct drm_rect *src = &vop_plane_state->src;
-       struct drm_rect *dest = &vop_plane_state->dest;
+       struct drm_rect *src = &state->src;
+       struct drm_rect *dest = &state->dst;
        struct drm_gem_object *obj, *uv_obj;
        struct rockchip_gem_object *rk_obj, *rk_uv_obj;
        unsigned long offset;
        dma_addr_t dma_addr;
        uint32_t val;
        bool rb_swap;
+       int format;
 
        /*
         * can't update plane when vop is disabled.
@@ -712,7 +730,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        if (WARN_ON(!vop->is_enabled))
                return;
 
-       if (!vop_plane_state->enable) {
+       if (!state->visible) {
                vop_plane_atomic_disable(plane, old_state);
                return;
        }
@@ -733,18 +751,15 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 
        offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
        offset += (src->y1 >> 16) * fb->pitches[0];
-       vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+       dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
 
-       spin_lock_irq(&plane->dev->event_lock);
-       vop_win->enable = true;
-       vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
-       spin_unlock_irq(&plane->dev->event_lock);
+       format = vop_convert_format(fb->pixel_format);
 
        spin_lock(&vop->reg_lock);
 
-       VOP_WIN_SET(vop, win, format, vop_plane_state->format);
+       VOP_WIN_SET(vop, win, format, format);
        VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
-       VOP_WIN_SET(vop, win, yrgb_mst, vop_plane_state->yrgb_mst);
+       VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
        if (is_yuv_support(fb->pixel_format)) {
                int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
                int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
@@ -791,68 +806,18 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 }
 
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
-       .prepare_fb = vop_plane_prepare_fb,
-       .cleanup_fb = vop_plane_cleanup_fb,
        .atomic_check = vop_plane_atomic_check,
        .atomic_update = vop_plane_atomic_update,
        .atomic_disable = vop_plane_atomic_disable,
 };
 
-static void vop_atomic_plane_reset(struct drm_plane *plane)
-{
-       struct vop_plane_state *vop_plane_state =
-                                       to_vop_plane_state(plane->state);
-
-       if (plane->state && plane->state->fb)
-               drm_framebuffer_unreference(plane->state->fb);
-
-       kfree(vop_plane_state);
-       vop_plane_state = kzalloc(sizeof(*vop_plane_state), GFP_KERNEL);
-       if (!vop_plane_state)
-               return;
-
-       plane->state = &vop_plane_state->base;
-       plane->state->plane = plane;
-}
-
-static struct drm_plane_state *
-vop_atomic_plane_duplicate_state(struct drm_plane *plane)
-{
-       struct vop_plane_state *old_vop_plane_state;
-       struct vop_plane_state *vop_plane_state;
-
-       if (WARN_ON(!plane->state))
-               return NULL;
-
-       old_vop_plane_state = to_vop_plane_state(plane->state);
-       vop_plane_state = kmemdup(old_vop_plane_state,
-                                 sizeof(*vop_plane_state), GFP_KERNEL);
-       if (!vop_plane_state)
-               return NULL;
-
-       __drm_atomic_helper_plane_duplicate_state(plane,
-                                                 &vop_plane_state->base);
-
-       return &vop_plane_state->base;
-}
-
-static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
-                                          struct drm_plane_state *state)
-{
-       struct vop_plane_state *vop_state = to_vop_plane_state(state);
-
-       __drm_atomic_helper_plane_destroy_state(state);
-
-       kfree(vop_state);
-}
-
 static const struct drm_plane_funcs vop_plane_funcs = {
        .update_plane   = drm_atomic_helper_update_plane,
        .disable_plane  = drm_atomic_helper_disable_plane,
        .destroy = vop_plane_destroy,
-       .reset = vop_atomic_plane_reset,
-       .atomic_duplicate_state = vop_atomic_plane_duplicate_state,
-       .atomic_destroy_state = vop_atomic_plane_destroy_state,
+       .reset = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
 };
 
 static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -865,6 +830,7 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
 
        spin_lock_irqsave(&vop->irq_lock, flags);
 
+       VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
        VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
 
        spin_unlock_irqrestore(&vop->irq_lock, flags);
@@ -887,18 +853,9 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
        spin_unlock_irqrestore(&vop->irq_lock, flags);
 }
 
-static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
-{
-       struct vop *vop = to_vop(crtc);
-
-       reinit_completion(&vop->wait_update_complete);
-       WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
-}
-
 static const struct rockchip_crtc_funcs private_crtc_funcs = {
        .enable_vblank = vop_crtc_enable_vblank,
        .disable_vblank = vop_crtc_disable_vblank,
-       .wait_for_update = vop_crtc_wait_for_update,
 };
 
 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -928,11 +885,17 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
        u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
        u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
        u16 vact_end = vact_st + vdisplay;
-       uint32_t val;
+       uint32_t pin_pol, val;
+       int ret;
 
        WARN_ON(vop->event);
 
-       vop_enable(crtc);
+       ret = vop_enable(crtc);
+       if (ret) {
+               DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
+               return;
+       }
+
        /*
         * If dclk rate is zero, mean that scanout is stop,
         * we don't need wait any more.
@@ -969,25 +932,31 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
                vop_dsp_hold_valid_irq_disable(vop);
        }
 
-       val = 0x8;
-       val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
-       val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
-       VOP_CTRL_SET(vop, pin_pol, val);
+       pin_pol = 0x8;
+       pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
+       pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
+       VOP_CTRL_SET(vop, pin_pol, pin_pol);
+
        switch (s->output_type) {
        case DRM_MODE_CONNECTOR_LVDS:
                VOP_CTRL_SET(vop, rgb_en, 1);
+               VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
                break;
        case DRM_MODE_CONNECTOR_eDP:
+               VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
                VOP_CTRL_SET(vop, edp_en, 1);
                break;
        case DRM_MODE_CONNECTOR_HDMIA:
+               VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
                VOP_CTRL_SET(vop, hdmi_en, 1);
                break;
        case DRM_MODE_CONNECTOR_DSI:
+               VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
                VOP_CTRL_SET(vop, mipi_en, 1);
                break;
        default:
-               DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
+               DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
+                             s->output_type);
        }
        VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
@@ -1006,12 +975,44 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
        clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
 
        VOP_CTRL_SET(vop, standby, 0);
+
+       rockchip_drm_psr_activate(&vop->crtc);
+}
+
+static bool vop_fs_irq_is_pending(struct vop *vop)
+{
+       return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
+}
+
+static void vop_wait_for_irq_handler(struct vop *vop)
+{
+       bool pending;
+       int ret;
+
+       /*
+        * Spin until frame start interrupt status bit goes low, which means
+        * that interrupt handler was invoked and cleared it. The timeout of
+        * 10 msecs is really too long, but it is just a safety measure if
+        * something goes really wrong. The wait will only happen in the very
+        * unlikely case of a vblank happening exactly at the same time and
+        * shouldn't exceed microseconds range.
+        */
+       ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
+                                       !pending, 0, 10 * 1000);
+       if (ret)
+               DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
+
+       synchronize_irq(vop->irq);
 }
 
 static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
                                  struct drm_crtc_state *old_crtc_state)
 {
+       struct drm_atomic_state *old_state = old_crtc_state->state;
+       struct drm_plane_state *old_plane_state;
        struct vop *vop = to_vop(crtc);
+       struct drm_plane *plane;
+       int i;
 
        if (WARN_ON(!vop->is_enabled))
                return;
@@ -1021,12 +1022,13 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
        vop_cfg_done(vop);
 
        spin_unlock(&vop->reg_lock);
-}
 
-static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
-                                 struct drm_crtc_state *old_crtc_state)
-{
-       struct vop *vop = to_vop(crtc);
+       /*
+        * There is a (rather unlikely) possiblity that a vblank interrupt
+        * fired before we set the cfg_done bit. To avoid spuriously
+        * signalling flip completion we need to wait for it to finish.
+        */
+       vop_wait_for_irq_handler(vop);
 
        spin_lock_irq(&crtc->dev->event_lock);
        if (crtc->state->event) {
@@ -1037,6 +1039,25 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
                crtc->state->event = NULL;
        }
        spin_unlock_irq(&crtc->dev->event_lock);
+
+       for_each_plane_in_state(old_state, plane, old_plane_state, i) {
+               if (!old_plane_state->fb)
+                       continue;
+
+               if (old_plane_state->fb == plane->state->fb)
+                       continue;
+
+               drm_framebuffer_reference(old_plane_state->fb);
+               drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
+               set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+       }
+}
+
+static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
+                                 struct drm_crtc_state *old_crtc_state)
+{
+       rockchip_drm_psr_flush(crtc);
 }
 
 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1093,16 +1114,13 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
        .atomic_destroy_state = vop_crtc_destroy_state,
 };
 
-static bool vop_win_pending_is_complete(struct vop_win *vop_win)
+static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
 {
-       dma_addr_t yrgb_mst;
-
-       if (!vop_win->enable)
-               return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
-
-       yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
+       struct vop *vop = container_of(work, struct vop, fb_unref_work);
+       struct drm_framebuffer *fb = val;
 
-       return yrgb_mst == vop_win->yrgb_mst;
+       drm_crtc_vblank_put(&vop->crtc);
+       drm_framebuffer_unreference(fb);
 }
 
 static void vop_handle_vblank(struct vop *vop)
@@ -1110,25 +1128,17 @@ static void vop_handle_vblank(struct vop *vop)
        struct drm_device *drm = vop->drm_dev;
        struct drm_crtc *crtc = &vop->crtc;
        unsigned long flags;
-       int i;
-
-       for (i = 0; i < vop->data->win_size; i++) {
-               if (!vop_win_pending_is_complete(&vop->win[i]))
-                       return;
-       }
 
        spin_lock_irqsave(&drm->event_lock, flags);
        if (vop->event) {
-
                drm_crtc_send_vblank_event(crtc, vop->event);
                drm_crtc_vblank_put(crtc);
                vop->event = NULL;
-
        }
        spin_unlock_irqrestore(&drm->event_lock, flags);
 
-       if (!completion_done(&vop->wait_update_complete))
-               complete(&vop->wait_update_complete);
+       if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
+               drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
 }
 
 static irqreturn_t vop_isr(int irq, void *data)
@@ -1162,6 +1172,12 @@ static irqreturn_t vop_isr(int irq, void *data)
                ret = IRQ_HANDLED;
        }
 
+       if (active_irqs & LINE_FLAG_INTR) {
+               complete(&vop->line_flag_completion);
+               active_irqs &= ~LINE_FLAG_INTR;
+               ret = IRQ_HANDLED;
+       }
+
        if (active_irqs & FS_INTR) {
                drm_crtc_handle_vblank(crtc);
                vop_handle_vblank(vop);
@@ -1171,7 +1187,8 @@ static irqreturn_t vop_isr(int irq, void *data)
 
        /* Unhandled irqs are spurious. */
        if (active_irqs)
-               DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
+               DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
+                             active_irqs);
 
        return ret;
 }
@@ -1206,7 +1223,8 @@ static int vop_create_crtc(struct vop *vop)
                                               win_data->phy->nformats,
                                               win_data->type, NULL);
                if (ret) {
-                       DRM_ERROR("failed to initialize plane\n");
+                       DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
+                                     ret);
                        goto err_cleanup_planes;
                }
 
@@ -1244,7 +1262,8 @@ static int vop_create_crtc(struct vop *vop)
                                               win_data->phy->nformats,
                                               win_data->type, NULL);
                if (ret) {
-                       DRM_ERROR("failed to initialize overlay plane\n");
+                       DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
+                                     ret);
                        goto err_cleanup_crtc;
                }
                drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
@@ -1252,14 +1271,17 @@ static int vop_create_crtc(struct vop *vop)
 
        port = of_get_child_by_name(dev->of_node, "port");
        if (!port) {
-               DRM_ERROR("no port node found in %s\n",
-                         dev->of_node->full_name);
+               DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
+                             dev->of_node->full_name);
                ret = -ENOENT;
                goto err_cleanup_crtc;
        }
 
+       drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
+                          vop_fb_unref_worker);
+
        init_completion(&vop->dsp_hold_completion);
-       init_completion(&vop->wait_update_complete);
+       init_completion(&vop->line_flag_completion);
        crtc->port = port;
        rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
 
@@ -1300,6 +1322,7 @@ static void vop_destroy_crtc(struct vop *vop)
         * references the CRTC.
         */
        drm_crtc_cleanup(crtc);
+       drm_flip_work_cleanup(&vop->fb_unref_work);
 }
 
 static int vop_initial(struct vop *vop)
@@ -1416,6 +1439,49 @@ static void vop_win_init(struct vop *vop)
        }
 }
 
+/**
+ * rockchip_drm_wait_line_flag - acqiure the give line flag event
+ * @crtc: CRTC to enable line flag
+ * @line_num: interested line number
+ * @mstimeout: millisecond for timeout
+ *
+ * Driver would hold here until the interested line flag interrupt have
+ * happened or timeout to wait.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
+                               unsigned int mstimeout)
+{
+       struct vop *vop = to_vop(crtc);
+       unsigned long jiffies_left;
+
+       if (!crtc || !vop->is_enabled)
+               return -ENODEV;
+
+       if (line_num > crtc->mode.vtotal || mstimeout <= 0)
+               return -EINVAL;
+
+       if (vop_line_flag_irq_is_enabled(vop))
+               return -EBUSY;
+
+       reinit_completion(&vop->line_flag_completion);
+       vop_line_flag_irq_enable(vop, line_num);
+
+       jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
+                                                  msecs_to_jiffies(mstimeout));
+       vop_line_flag_irq_disable(vop);
+
+       if (jiffies_left == 0) {
+               dev_err(vop->dev, "Timeout waiting for IRQ\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
+
 static int vop_bind(struct device *dev, struct device *master, void *data)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1481,10 +1547,15 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
 
        ret = vop_create_crtc(vop);
        if (ret)
-               return ret;
+               goto err_enable_irq;
 
        pm_runtime_enable(&pdev->dev);
+
        return 0;
+
+err_enable_irq:
+       enable_irq(vop->irq); /* To balance out the disable_irq above */
+       return ret;
 }
 
 static void vop_unbind(struct device *dev, struct device *master, void *data)
index 071ff0be7a958d6cf9a16e65004737913fa87646..1dbc52615257cf500e87b2b64839b9389b3febfa 100644 (file)
@@ -33,6 +33,7 @@ struct vop_reg {
        uint32_t offset;
        uint32_t shift;
        uint32_t mask;
+       bool write_mask;
 };
 
 struct vop_ctrl {
@@ -48,6 +49,10 @@ struct vop_ctrl {
        struct vop_reg dither_down;
        struct vop_reg dither_up;
        struct vop_reg pin_pol;
+       struct vop_reg rgb_pin_pol;
+       struct vop_reg hdmi_pin_pol;
+       struct vop_reg edp_pin_pol;
+       struct vop_reg mipi_pin_pol;
 
        struct vop_reg htotal_pw;
        struct vop_reg hact_st_end;
@@ -56,6 +61,8 @@ struct vop_ctrl {
        struct vop_reg hpost_st_end;
        struct vop_reg vpost_st_end;
 
+       struct vop_reg line_flag_num[2];
+
        struct vop_reg cfg_done;
 };
 
index 919992cdc97e32f190b285f874c8c3944dd93ea3..35c51f3402f2973f0ea1305cd27073db950cc356 100644 (file)
 #define VOP_REG(off, _mask, s) \
                {.offset = off, \
                 .mask = _mask, \
-                .shift = s,}
+                .shift = s, \
+                .write_mask = false,}
+
+#define VOP_REG_MASK(off, _mask, s) \
+               {.offset = off, \
+                .mask = _mask, \
+                .shift = s, \
+                .write_mask = true,}
 
 static const uint32_t formats_win_full[] = {
        DRM_FORMAT_XRGB8888,
@@ -50,6 +57,89 @@ static const uint32_t formats_win_lite[] = {
        DRM_FORMAT_BGR565,
 };
 
+static const struct vop_scl_regs rk3036_win_scl = {
+       .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+       .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+       .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+       .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy rk3036_win0_data = {
+       .scl = &rk3036_win_scl,
+       .data_formats = formats_win_full,
+       .nformats = ARRAY_SIZE(formats_win_full),
+       .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
+       .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
+       .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
+       .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
+       .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
+       .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
+       .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
+};
+
+static const struct vop_win_phy rk3036_win1_data = {
+       .data_formats = formats_win_lite,
+       .nformats = ARRAY_SIZE(formats_win_lite),
+       .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
+       .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
+       .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
+       .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
+       .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
+};
+
+static const struct vop_win_data rk3036_vop_win_data[] = {
+       { .base = 0x00, .phy = &rk3036_win0_data,
+         .type = DRM_PLANE_TYPE_PRIMARY },
+       { .base = 0x00, .phy = &rk3036_win1_data,
+         .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const int rk3036_vop_intrs[] = {
+       DSP_HOLD_VALID_INTR,
+       FS_INTR,
+       LINE_FLAG_INTR,
+       BUS_ERROR_INTR,
+};
+
+static const struct vop_intr rk3036_intr = {
+       .intrs = rk3036_vop_intrs,
+       .nintrs = ARRAY_SIZE(rk3036_vop_intrs),
+       .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
+       .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
+       .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+};
+
+static const struct vop_ctrl rk3036_ctrl_data = {
+       .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
+       .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
+       .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
+       .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+       .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
+       .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+       .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
+       .line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
+       .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
+};
+
+static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
+       {RK3036_DSP_CTRL1, 0x00000000},
+};
+
+static const struct vop_data rk3036_vop = {
+       .init_table = rk3036_vop_init_reg_table,
+       .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
+       .ctrl = &rk3036_ctrl_data,
+       .intr = &rk3036_intr,
+       .win = rk3036_vop_win_data,
+       .win_size = ARRAY_SIZE(rk3036_vop_win_data),
+};
+
 static const struct vop_scl_extension rk3288_win_full_scl_ext = {
        .cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
        .cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
@@ -133,6 +223,7 @@ static const struct vop_ctrl rk3288_ctrl_data = {
        .vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
        .hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
        .vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+       .line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
        .cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0),
 };
 
@@ -190,93 +281,104 @@ static const struct vop_data rk3288_vop = {
        .win_size = ARRAY_SIZE(rk3288_vop_win_data),
 };
 
-static const struct vop_scl_regs rk3036_win_scl = {
-       .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
-       .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
-       .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
-       .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
-};
-
-static const struct vop_win_phy rk3036_win0_data = {
-       .scl = &rk3036_win_scl,
-       .data_formats = formats_win_full,
-       .nformats = ARRAY_SIZE(formats_win_full),
-       .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
-       .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
-       .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
-       .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
-       .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
-       .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
-       .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
-       .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
-       .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
-       .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
+static const struct vop_ctrl rk3399_ctrl_data = {
+       .standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22),
+       .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
+       .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
+       .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
+       .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
+       .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
+       .dither_down = VOP_REG(RK3399_DSP_CTRL1, 0xf, 1),
+       .dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6),
+       .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
+       .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
+       .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
+       .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20),
+       .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24),
+       .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28),
+       .htotal_pw = VOP_REG(RK3399_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+       .hact_st_end = VOP_REG(RK3399_DSP_HACT_ST_END, 0x1fff1fff, 0),
+       .vtotal_pw = VOP_REG(RK3399_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+       .vact_st_end = VOP_REG(RK3399_DSP_VACT_ST_END, 0x1fff1fff, 0),
+       .hpost_st_end = VOP_REG(RK3399_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+       .vpost_st_end = VOP_REG(RK3399_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+       .line_flag_num[0] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 0),
+       .line_flag_num[1] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 16),
+       .cfg_done = VOP_REG_MASK(RK3399_REG_CFG_DONE, 0x1, 0),
 };
 
-static const struct vop_win_phy rk3036_win1_data = {
-       .data_formats = formats_win_lite,
-       .nformats = ARRAY_SIZE(formats_win_lite),
-       .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
-       .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
-       .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
-       .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
-       .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
-       .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
-       .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
-       .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
-};
-
-static const struct vop_win_data rk3036_vop_win_data[] = {
-       { .base = 0x00, .phy = &rk3036_win0_data,
-         .type = DRM_PLANE_TYPE_PRIMARY },
-       { .base = 0x00, .phy = &rk3036_win1_data,
-         .type = DRM_PLANE_TYPE_CURSOR },
-};
-
-static const int rk3036_vop_intrs[] = {
-       DSP_HOLD_VALID_INTR,
+static const int rk3399_vop_intrs[] = {
        FS_INTR,
+       0, 0,
        LINE_FLAG_INTR,
+       0,
        BUS_ERROR_INTR,
+       0, 0, 0, 0, 0, 0, 0,
+       DSP_HOLD_VALID_INTR,
 };
 
-static const struct vop_intr rk3036_intr = {
-       .intrs = rk3036_vop_intrs,
-       .nintrs = ARRAY_SIZE(rk3036_vop_intrs),
-       .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
-       .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
-       .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+static const struct vop_intr rk3399_vop_intr = {
+       .intrs = rk3399_vop_intrs,
+       .nintrs = ARRAY_SIZE(rk3399_vop_intrs),
+       .status = VOP_REG_MASK(RK3399_INTR_STATUS0, 0xffff, 0),
+       .enable = VOP_REG_MASK(RK3399_INTR_EN0, 0xffff, 0),
+       .clear = VOP_REG_MASK(RK3399_INTR_CLEAR0, 0xffff, 0),
 };
 
-static const struct vop_ctrl rk3036_ctrl_data = {
-       .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
-       .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
-       .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
-       .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
-       .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
-       .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
-       .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
-       .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
+static const struct vop_reg_data rk3399_init_reg_table[] = {
+       {RK3399_SYS_CTRL, 0x2000f800},
+       {RK3399_DSP_CTRL0, 0x00000000},
+       {RK3399_WIN0_CTRL0, 0x00000080},
+       {RK3399_WIN1_CTRL0, 0x00000080},
+       /* TODO: Win2/3 support multiple area function, but we haven't found
+        * a suitable way to use it yet, so let's just use them as other windows
+        * with only area 0 enabled.
+        */
+       {RK3399_WIN2_CTRL0, 0x00000010},
+       {RK3399_WIN3_CTRL0, 0x00000010},
 };
 
-static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
-       {RK3036_DSP_CTRL1, 0x00000000},
+static const struct vop_data rk3399_vop_big = {
+       .init_table = rk3399_init_reg_table,
+       .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+       .intr = &rk3399_vop_intr,
+       .ctrl = &rk3399_ctrl_data,
+       /*
+        * rk3399 vop big windows register layout is same as rk3288.
+        */
+       .win = rk3288_vop_win_data,
+       .win_size = ARRAY_SIZE(rk3288_vop_win_data),
 };
 
-static const struct vop_data rk3036_vop = {
-       .init_table = rk3036_vop_init_reg_table,
-       .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
-       .ctrl = &rk3036_ctrl_data,
-       .intr = &rk3036_intr,
-       .win = rk3036_vop_win_data,
-       .win_size = ARRAY_SIZE(rk3036_vop_win_data),
+static const struct vop_win_data rk3399_vop_lit_win_data[] = {
+       { .base = 0x00, .phy = &rk3288_win01_data,
+         .type = DRM_PLANE_TYPE_PRIMARY },
+       { .base = 0x00, .phy = &rk3288_win23_data,
+         .type = DRM_PLANE_TYPE_CURSOR},
+};
+
+static const struct vop_data rk3399_vop_lit = {
+       .init_table = rk3399_init_reg_table,
+       .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+       .intr = &rk3399_vop_intr,
+       .ctrl = &rk3399_ctrl_data,
+       /*
+        * rk3399 vop lit windows register layout is same as rk3288,
+        * but cut off the win1 and win3 windows.
+        */
+       .win = rk3399_vop_lit_win_data,
+       .win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
 };
 
 static const struct of_device_id vop_driver_dt_match[] = {
-       { .compatible = "rockchip,rk3288-vop",
-         .data = &rk3288_vop },
        { .compatible = "rockchip,rk3036-vop",
          .data = &rk3036_vop },
+       { .compatible = "rockchip,rk3288-vop",
+         .data = &rk3288_vop },
+       { .compatible = "rockchip,rk3399-vop-big",
+         .data = &rk3399_vop_big },
+       { .compatible = "rockchip,rk3399-vop-lit",
+         .data = &rk3399_vop_lit },
        {},
 };
 MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
@@ -305,7 +407,6 @@ static struct platform_driver vop_platform_driver = {
        .remove = vop_remove,
        .driver = {
                .name = "rockchip-vop",
-               .owner = THIS_MODULE,
                .of_match_table = of_match_ptr(vop_driver_dt_match),
        },
 };
index d4b46cba2f26d93b942522f99c07fefcab1cc7ba..cd197260ece5defab17c0448d6cee18001bc14b3 100644 (file)
 #define RK3036_HWC_LUT_ADDR            0x800
 /* rk3036 register definition end */
 
+/* rk3399 register definition */
+#define RK3399_REG_CFG_DONE            0x00000
+#define RK3399_VERSION_INFO            0x00004
+#define RK3399_SYS_CTRL                        0x00008
+#define RK3399_SYS_CTRL1               0x0000c
+#define RK3399_DSP_CTRL0               0x00010
+#define RK3399_DSP_CTRL1               0x00014
+#define RK3399_DSP_BG                  0x00018
+#define RK3399_MCU_CTRL                        0x0001c
+#define RK3399_WB_CTRL0                        0x00020
+#define RK3399_WB_CTRL1                        0x00024
+#define RK3399_WB_YRGB_MST             0x00028
+#define RK3399_WB_CBR_MST              0x0002c
+#define RK3399_WIN0_CTRL0              0x00030
+#define RK3399_WIN0_CTRL1              0x00034
+#define RK3399_WIN0_COLOR_KEY          0x00038
+#define RK3399_WIN0_VIR                        0x0003c
+#define RK3399_WIN0_YRGB_MST           0x00040
+#define RK3399_WIN0_CBR_MST            0x00044
+#define RK3399_WIN0_ACT_INFO           0x00048
+#define RK3399_WIN0_DSP_INFO           0x0004c
+#define RK3399_WIN0_DSP_ST             0x00050
+#define RK3399_WIN0_SCL_FACTOR_YRGB    0x00054
+#define RK3399_WIN0_SCL_FACTOR_CBR     0x00058
+#define RK3399_WIN0_SCL_OFFSET         0x0005c
+#define RK3399_WIN0_SRC_ALPHA_CTRL     0x00060
+#define RK3399_WIN0_DST_ALPHA_CTRL     0x00064
+#define RK3399_WIN0_FADING_CTRL                0x00068
+#define RK3399_WIN0_CTRL2              0x0006c
+#define RK3399_WIN1_CTRL0              0x00070
+#define RK3399_WIN1_CTRL1              0x00074
+#define RK3399_WIN1_COLOR_KEY          0x00078
+#define RK3399_WIN1_VIR                        0x0007c
+#define RK3399_WIN1_YRGB_MST           0x00080
+#define RK3399_WIN1_CBR_MST            0x00084
+#define RK3399_WIN1_ACT_INFO           0x00088
+#define RK3399_WIN1_DSP_INFO           0x0008c
+#define RK3399_WIN1_DSP_ST             0x00090
+#define RK3399_WIN1_SCL_FACTOR_YRGB    0x00094
+#define RK3399_WIN1_SCL_FACTOR_CBR     0x00098
+#define RK3399_WIN1_SCL_OFFSET         0x0009c
+#define RK3399_WIN1_SRC_ALPHA_CTRL     0x000a0
+#define RK3399_WIN1_DST_ALPHA_CTRL     0x000a4
+#define RK3399_WIN1_FADING_CTRL                0x000a8
+#define RK3399_WIN1_CTRL2              0x000ac
+#define RK3399_WIN2_CTRL0              0x000b0
+#define RK3399_WIN2_CTRL1              0x000b4
+#define RK3399_WIN2_VIR0_1             0x000b8
+#define RK3399_WIN2_VIR2_3             0x000bc
+#define RK3399_WIN2_MST0               0x000c0
+#define RK3399_WIN2_DSP_INFO0          0x000c4
+#define RK3399_WIN2_DSP_ST0            0x000c8
+#define RK3399_WIN2_COLOR_KEY          0x000cc
+#define RK3399_WIN2_MST1               0x000d0
+#define RK3399_WIN2_DSP_INFO1          0x000d4
+#define RK3399_WIN2_DSP_ST1            0x000d8
+#define RK3399_WIN2_SRC_ALPHA_CTRL     0x000dc
+#define RK3399_WIN2_MST2               0x000e0
+#define RK3399_WIN2_DSP_INFO2          0x000e4
+#define RK3399_WIN2_DSP_ST2            0x000e8
+#define RK3399_WIN2_DST_ALPHA_CTRL     0x000ec
+#define RK3399_WIN2_MST3               0x000f0
+#define RK3399_WIN2_DSP_INFO3          0x000f4
+#define RK3399_WIN2_DSP_ST3            0x000f8
+#define RK3399_WIN2_FADING_CTRL                0x000fc
+#define RK3399_WIN3_CTRL0              0x00100
+#define RK3399_WIN3_CTRL1              0x00104
+#define RK3399_WIN3_VIR0_1             0x00108
+#define RK3399_WIN3_VIR2_3             0x0010c
+#define RK3399_WIN3_MST0               0x00110
+#define RK3399_WIN3_DSP_INFO0          0x00114
+#define RK3399_WIN3_DSP_ST0            0x00118
+#define RK3399_WIN3_COLOR_KEY          0x0011c
+#define RK3399_WIN3_MST1               0x00120
+#define RK3399_WIN3_DSP_INFO1          0x00124
+#define RK3399_WIN3_DSP_ST1            0x00128
+#define RK3399_WIN3_SRC_ALPHA_CTRL     0x0012c
+#define RK3399_WIN3_MST2               0x00130
+#define RK3399_WIN3_DSP_INFO2          0x00134
+#define RK3399_WIN3_DSP_ST2            0x00138
+#define RK3399_WIN3_DST_ALPHA_CTRL     0x0013c
+#define RK3399_WIN3_MST3               0x00140
+#define RK3399_WIN3_DSP_INFO3          0x00144
+#define RK3399_WIN3_DSP_ST3            0x00148
+#define RK3399_WIN3_FADING_CTRL                0x0014c
+#define RK3399_HWC_CTRL0               0x00150
+#define RK3399_HWC_CTRL1               0x00154
+#define RK3399_HWC_MST                 0x00158
+#define RK3399_HWC_DSP_ST              0x0015c
+#define RK3399_HWC_SRC_ALPHA_CTRL      0x00160
+#define RK3399_HWC_DST_ALPHA_CTRL      0x00164
+#define RK3399_HWC_FADING_CTRL         0x00168
+#define RK3399_HWC_RESERVED1           0x0016c
+#define RK3399_POST_DSP_HACT_INFO      0x00170
+#define RK3399_POST_DSP_VACT_INFO      0x00174
+#define RK3399_POST_SCL_FACTOR_YRGB    0x00178
+#define RK3399_POST_RESERVED           0x0017c
+#define RK3399_POST_SCL_CTRL           0x00180
+#define RK3399_POST_DSP_VACT_INFO_F1   0x00184
+#define RK3399_DSP_HTOTAL_HS_END       0x00188
+#define RK3399_DSP_HACT_ST_END         0x0018c
+#define RK3399_DSP_VTOTAL_VS_END       0x00190
+#define RK3399_DSP_VACT_ST_END         0x00194
+#define RK3399_DSP_VS_ST_END_F1                0x00198
+#define RK3399_DSP_VACT_ST_END_F1      0x0019c
+#define RK3399_PWM_CTRL                        0x001a0
+#define RK3399_PWM_PERIOD_HPR          0x001a4
+#define RK3399_PWM_DUTY_LPR            0x001a8
+#define RK3399_PWM_CNT                 0x001ac
+#define RK3399_BCSH_COLOR_BAR          0x001b0
+#define RK3399_BCSH_BCS                        0x001b4
+#define RK3399_BCSH_H                  0x001b8
+#define RK3399_BCSH_CTRL               0x001bc
+#define RK3399_CABC_CTRL0              0x001c0
+#define RK3399_CABC_CTRL1              0x001c4
+#define RK3399_CABC_CTRL2              0x001c8
+#define RK3399_CABC_CTRL3              0x001cc
+#define RK3399_CABC_GAUSS_LINE0_0      0x001d0
+#define RK3399_CABC_GAUSS_LINE0_1      0x001d4
+#define RK3399_CABC_GAUSS_LINE1_0      0x001d8
+#define RK3399_CABC_GAUSS_LINE1_1      0x001dc
+#define RK3399_CABC_GAUSS_LINE2_0      0x001e0
+#define RK3399_CABC_GAUSS_LINE2_1      0x001e4
+#define RK3399_FRC_LOWER01_0           0x001e8
+#define RK3399_FRC_LOWER01_1           0x001ec
+#define RK3399_FRC_LOWER10_0           0x001f0
+#define RK3399_FRC_LOWER10_1           0x001f4
+#define RK3399_FRC_LOWER11_0           0x001f8
+#define RK3399_FRC_LOWER11_1           0x001fc
+#define RK3399_AFBCD0_CTRL             0x00200
+#define RK3399_AFBCD0_HDR_PTR          0x00204
+#define RK3399_AFBCD0_PIC_SIZE         0x00208
+#define RK3399_AFBCD0_STATUS           0x0020c
+#define RK3399_AFBCD1_CTRL             0x00220
+#define RK3399_AFBCD1_HDR_PTR          0x00224
+#define RK3399_AFBCD1_PIC_SIZE         0x00228
+#define RK3399_AFBCD1_STATUS           0x0022c
+#define RK3399_AFBCD2_CTRL             0x00240
+#define RK3399_AFBCD2_HDR_PTR          0x00244
+#define RK3399_AFBCD2_PIC_SIZE         0x00248
+#define RK3399_AFBCD2_STATUS           0x0024c
+#define RK3399_AFBCD3_CTRL             0x00260
+#define RK3399_AFBCD3_HDR_PTR          0x00264
+#define RK3399_AFBCD3_PIC_SIZE         0x00268
+#define RK3399_AFBCD3_STATUS           0x0026c
+#define RK3399_INTR_EN0                        0x00280
+#define RK3399_INTR_CLEAR0             0x00284
+#define RK3399_INTR_STATUS0            0x00288
+#define RK3399_INTR_RAW_STATUS0                0x0028c
+#define RK3399_INTR_EN1                        0x00290
+#define RK3399_INTR_CLEAR1             0x00294
+#define RK3399_INTR_STATUS1            0x00298
+#define RK3399_INTR_RAW_STATUS1                0x0029c
+#define RK3399_LINE_FLAG               0x002a0
+#define RK3399_VOP_STATUS              0x002a4
+#define RK3399_BLANKING_VALUE          0x002a8
+#define RK3399_MCU_BYPASS_PORT         0x002ac
+#define RK3399_WIN0_DSP_BG             0x002b0
+#define RK3399_WIN1_DSP_BG             0x002b4
+#define RK3399_WIN2_DSP_BG             0x002b8
+#define RK3399_WIN3_DSP_BG             0x002bc
+#define RK3399_YUV2YUV_WIN             0x002c0
+#define RK3399_YUV2YUV_POST            0x002c4
+#define RK3399_AUTO_GATING_EN          0x002cc
+#define RK3399_WIN0_CSC_COE            0x003a0
+#define RK3399_WIN1_CSC_COE            0x003c0
+#define RK3399_WIN2_CSC_COE            0x003e0
+#define RK3399_WIN3_CSC_COE            0x00400
+#define RK3399_HWC_CSC_COE             0x00420
+#define RK3399_BCSH_R2Y_CSC_COE                0x00440
+#define RK3399_BCSH_Y2R_CSC_COE                0x00460
+#define RK3399_POST_YUV2YUV_Y2R_COE    0x00480
+#define RK3399_POST_YUV2YUV_3X3_COE    0x004a0
+#define RK3399_POST_YUV2YUV_R2Y_COE    0x004c0
+#define RK3399_WIN0_YUV2YUV_Y2R                0x004e0
+#define RK3399_WIN0_YUV2YUV_3X3                0x00500
+#define RK3399_WIN0_YUV2YUV_R2Y                0x00520
+#define RK3399_WIN1_YUV2YUV_Y2R                0x00540
+#define RK3399_WIN1_YUV2YUV_3X3                0x00560
+#define RK3399_WIN1_YUV2YUV_R2Y                0x00580
+#define RK3399_WIN2_YUV2YUV_Y2R                0x005a0
+#define RK3399_WIN2_YUV2YUV_3X3                0x005c0
+#define RK3399_WIN2_YUV2YUV_R2Y                0x005e0
+#define RK3399_WIN3_YUV2YUV_Y2R                0x00600
+#define RK3399_WIN3_YUV2YUV_3X3                0x00620
+#define RK3399_WIN3_YUV2YUV_R2Y                0x00640
+#define RK3399_WIN2_LUT_ADDR           0x01000
+#define RK3399_WIN3_LUT_ADDR           0x01400
+#define RK3399_HWC_LUT_ADDR            0x01800
+#define RK3399_CABC_GAMMA_LUT_ADDR     0x01c00
+#define RK3399_GAMMA_LUT_ADDR          0x02000
+/* rk3399 register definition end */
+
 #endif /* _ROCKCHIP_VOP_REG_H */
index 21aed1febeb4d12f6fd4d48ff1d257497dc04bc5..3b807135a5cd8621fe1d40879749bec94dd3d3e2 100644 (file)
@@ -50,7 +50,7 @@ static const struct file_operations savage_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+           DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA | DRIVER_LEGACY,
        .dev_priv_size = sizeof(drm_savage_buf_priv_t),
        .load = savage_driver_load,
        .firstopen = savage_driver_firstopen,
index c01ad0aeaa5806dd034910eff873d48a149fdbc5..3dc0d8ff95ec6d1e047d1e27d0587cd5d1cc7199 100644 (file)
@@ -1001,15 +1001,9 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
                cmdbuf->cmd_addr = kcmd_addr;
        }
        if (cmdbuf->vb_size) {
-               kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL);
-               if (kvb_addr == NULL) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
-
-               if (copy_from_user(kvb_addr, cmdbuf->vb_addr,
-                                      cmdbuf->vb_size)) {
-                       ret = -EFAULT;
+               kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
+               if (IS_ERR(kvb_addr)) {
+                       ret = PTR_ERR(kvb_addr);
                        goto done;
                }
                cmdbuf->vb_addr = kvb_addr;
index 79bce76cb8f71cc49ca99933cc9dc767d4689151..ae9839886c4d09ed78e706e4d1b7d843034bf434 100644 (file)
@@ -102,7 +102,7 @@ static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
 }
 
 static struct drm_driver driver = {
-       .driver_features = DRIVER_USE_AGP,
+       .driver_features = DRIVER_USE_AGP | DRIVER_LEGACY,
        .load = sis_driver_load,
        .unload = sis_driver_unload,
        .open = sis_driver_open,
index 494ab257f77c3917ebdeb8cd9aeaa940aac4876b..acd72865feacc0ef75779328b52629b02a32a29c 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_STI
-       tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
-       depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+       tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
+       depends on DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
        select RESET_CONTROLLER
        select DRM_KMS_HELPER
        select DRM_GEM_CMA_HELPER
@@ -9,4 +9,4 @@ config DRM_STI
        select FW_LOADER
        select SND_SOC_HDMI_CODEC if SND_SOC
        help
-         Choose this option to enable DRM on STM stiH41x chipset
+         Choose this option to enable DRM on STM stiH4xx chipset
index b8057620b3b35702f23476584c15efc0d2a7763e..d20f7c0b4eac62488674b46ffa8cebf80fdc7f62 100644 (file)
@@ -9,7 +9,6 @@ sti-drm-y := \
        sti_crtc.o \
        sti_plane.o \
        sti_hdmi.o \
-       sti_hdmi_tx3g0c55phy.o \
        sti_hdmi_tx3g4c28phy.o \
        sti_dvo.o \
        sti_awg_utils.o \
index 134201ecc6fd98b65f286c0dd6f1135e04aaabc6..f62041fe8412ac97fd7b16f629546863a593785e 100644 (file)
@@ -25,7 +25,7 @@
 /*
  * stiH407 compositor properties
  */
-struct sti_compositor_data stih407_compositor_data = {
+static const struct sti_compositor_data stih407_compositor_data = {
        .nb_subdev = 8,
        .subdev_desc = {
                        {STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000},
@@ -39,38 +39,18 @@ struct sti_compositor_data stih407_compositor_data = {
        },
 };
 
-/*
- * stiH416 compositor properties
- * Note:
- * on stih416 MIXER_AUX has a different base address from MIXER_MAIN
- * Moreover, GDPx is different for Main and Aux Mixer. So this subdev map does
- * not fit for stiH416 if we want to enable the MIXER_AUX.
- */
-struct sti_compositor_data stih416_compositor_data = {
-       .nb_subdev = 3,
-       .subdev_desc = {
-                       {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
-                       {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
-                       {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
-       },
-};
-
-int sti_compositor_debufs_init(struct sti_compositor *compo,
-                              struct drm_minor *minor)
+int sti_compositor_debugfs_init(struct sti_compositor *compo,
+                               struct drm_minor *minor)
 {
-       int ret = 0, i;
+       unsigned int i;
 
-       for (i = 0; compo->vid[i]; i++) {
-               ret = vid_debugfs_init(compo->vid[i], minor);
-               if (ret)
-                       return ret;
-       }
+       for (i = 0; i < STI_MAX_VID; i++)
+               if (compo->vid[i])
+                       vid_debugfs_init(compo->vid[i], minor);
 
-       for (i = 0; compo->mixer[i]; i++) {
-               ret = sti_mixer_debugfs_init(compo->mixer[i], minor);
-               if (ret)
-                       return ret;
-       }
+       for (i = 0; i < STI_MAX_MIXER; i++)
+               if (compo->mixer[i])
+                       sti_mixer_debugfs_init(compo->mixer[i], minor);
 
        return 0;
 }
@@ -183,9 +163,6 @@ static const struct component_ops sti_compositor_ops = {
 
 static const struct of_device_id compositor_of_match[] = {
        {
-               .compatible = "st,stih416-compositor",
-               .data = &stih416_compositor_data,
-       }, {
                .compatible = "st,stih407-compositor",
                .data = &stih407_compositor_data,
        }, {
@@ -201,6 +178,7 @@ static int sti_compositor_probe(struct platform_device *pdev)
        struct device_node *vtg_np;
        struct sti_compositor *compo;
        struct resource *res;
+       unsigned int i;
 
        compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
        if (!compo) {
@@ -208,7 +186,8 @@ static int sti_compositor_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        compo->dev = dev;
-       compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
+       for (i = 0; i < STI_MAX_MIXER; i++)
+               compo->vtg_vblank_nb[i].notifier_call = sti_crtc_vblank_cb;
 
        /* populate data structure depending on compatibility */
        BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -266,12 +245,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
 
        vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
        if (vtg_np)
-               compo->vtg_main = of_vtg_find(vtg_np);
+               compo->vtg[STI_MIXER_MAIN] = of_vtg_find(vtg_np);
        of_node_put(vtg_np);
 
        vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
        if (vtg_np)
-               compo->vtg_aux = of_vtg_find(vtg_np);
+               compo->vtg[STI_MIXER_AUX] = of_vtg_find(vtg_np);
        of_node_put(vtg_np);
 
        platform_set_drvdata(pdev, compo);
index 24444ef42a98258910fcc72254bb5aae46671dce..2952a2d25a52b6c6fe3efdfc3d4036f66454540d 100644 (file)
@@ -60,9 +60,8 @@ struct sti_compositor_data {
  * @rst_aux: reset control of the aux path
  * @mixer: array of mixers
  * @vid: array of vids
- * @vtg_main: vtg for main data path
- * @vtg_aux: vtg for auxillary data path
- * @vtg_vblank_nb: callback for VTG VSYNC notification
+ * @vtg: array of vtgs
+ * @vtg_vblank_nb: array of callbacks for VTG VSYNC notification
  */
 struct sti_compositor {
        struct device *dev;
@@ -76,12 +75,11 @@ struct sti_compositor {
        struct reset_control *rst_aux;
        struct sti_mixer *mixer[STI_MAX_MIXER];
        struct sti_vid *vid[STI_MAX_VID];
-       struct sti_vtg *vtg_main;
-       struct sti_vtg *vtg_aux;
-       struct notifier_block vtg_vblank_nb;
+       struct sti_vtg *vtg[STI_MAX_MIXER];
+       struct notifier_block vtg_vblank_nb[STI_MAX_MIXER];
 };
 
-int sti_compositor_debufs_init(struct sti_compositor *compo,
-                              struct drm_minor *minor);
+int sti_compositor_debugfs_init(struct sti_compositor *compo,
+                               struct drm_minor *minor);
 
 #endif
index c7d734dc3cf405a1799accc0216efd54e1dac3d9..e992bed98dcba8c7ee441f56709ac5c791fa8fb9 100644 (file)
@@ -86,8 +86,7 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
                goto pix_error;
        }
 
-       sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
-                       compo->vtg_main : compo->vtg_aux, &crtc->mode);
+       sti_vtg_set_config(compo->vtg[mixer->id], &crtc->mode);
 
        if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
                DRM_ERROR("Can't set active video area\n");
@@ -166,6 +165,10 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
 
                switch (plane->status) {
                case STI_PLANE_UPDATED:
+                       /* ignore update for other CRTC */
+                       if (p->state->crtc != crtc)
+                               continue;
+
                        /* update planes tag as updated */
                        DRM_DEBUG_DRIVER("update plane %s\n",
                                         sti_plane_to_str(plane));
@@ -244,8 +247,7 @@ static int sti_crtc_set_property(struct drm_crtc *crtc,
 int sti_crtc_vblank_cb(struct notifier_block *nb,
                       unsigned long event, void *data)
 {
-       struct sti_compositor *compo =
-               container_of(nb, struct sti_compositor, vtg_vblank_nb);
+       struct sti_compositor *compo;
        struct drm_crtc *crtc = data;
        struct sti_mixer *mixer;
        unsigned long flags;
@@ -254,6 +256,7 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
 
        priv = crtc->dev->dev_private;
        pipe = drm_crtc_index(crtc);
+       compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
        mixer = compo->mixer[pipe];
 
        if ((event != VTG_TOP_FIELD_EVENT) &&
@@ -295,14 +298,13 @@ int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct sti_private *dev_priv = dev->dev_private;
        struct sti_compositor *compo = dev_priv->compo;
-       struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+       struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
        struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
+       struct sti_vtg *vtg = compo->vtg[pipe];
 
        DRM_DEBUG_DRIVER("\n");
 
-       if (sti_vtg_register_client(pipe == STI_MIXER_MAIN ?
-                       compo->vtg_main : compo->vtg_aux,
-                       vtg_vblank_nb, crtc)) {
+       if (sti_vtg_register_client(vtg, vtg_vblank_nb, crtc)) {
                DRM_ERROR("Cannot register VTG notifier\n");
                return -EINVAL;
        }
@@ -314,13 +316,13 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
 {
        struct sti_private *priv = drm_dev->dev_private;
        struct sti_compositor *compo = priv->compo;
-       struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+       struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
        struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
+       struct sti_vtg *vtg = compo->vtg[pipe];
 
        DRM_DEBUG_DRIVER("\n");
 
-       if (sti_vtg_unregister_client(pipe == STI_MIXER_MAIN ?
-                       compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
+       if (sti_vtg_unregister_client(vtg, vtg_vblank_nb))
                DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
        /* free the resources of the pending requests */
@@ -336,7 +338,7 @@ static int sti_crtc_late_register(struct drm_crtc *crtc)
        struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
 
        if (drm_crtc_index(crtc) == 0)
-               return sti_compositor_debufs_init(compo, crtc->dev->primary);
+               return sti_compositor_debugfs_init(compo, crtc->dev->primary);
 
        return 0;
 }
index 3b53f7f2e3fc4b377a6ac118d868557acaf77c4b..cca75bddb9ad1aa6d77cc317bfb3d56f36c79c6b 100644 (file)
@@ -309,15 +309,15 @@ static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
 {
        struct sti_plane *plane = to_sti_plane(drm_plane);
 
-       if (!drm_plane->crtc) {
+       if (!oldstate->crtc) {
                DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
                                 drm_plane->base.id);
                return;
        }
 
        DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-                        drm_plane->crtc->base.id,
-                        sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+                        oldstate->crtc->base.id,
+                        sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
                         drm_plane->base.id, sti_plane_to_str(plane));
 
        plane->status = STI_PLANE_DISABLING;
@@ -345,7 +345,7 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
        return cursor_debugfs_init(cursor, drm_plane->dev->primary);
 }
 
-struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = sti_cursor_destroy,
index 96bd3d08b2d424d5dd2ec24d543bfe343c97c2ad..2784919a73664c6287e7d5812839347d572011ca 100644 (file)
@@ -140,7 +140,7 @@ err:
        return ret;
 }
 
-void sti_drm_dbg_cleanup(struct drm_minor *minor)
+static void sti_drm_dbg_cleanup(struct drm_minor *minor)
 {
        drm_debugfs_remove_files(sti_drm_dbg_list,
                                 ARRAY_SIZE(sti_drm_dbg_list), minor);
@@ -178,7 +178,7 @@ static void sti_atomic_complete(struct sti_private *private,
         */
 
        drm_atomic_helper_commit_modeset_disables(drm, state);
-       drm_atomic_helper_commit_planes(drm, state, false);
+       drm_atomic_helper_commit_planes(drm, state, 0);
        drm_atomic_helper_commit_modeset_enables(drm, state);
 
        drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -282,7 +282,7 @@ static const struct file_operations sti_driver_fops = {
 };
 
 static struct drm_driver sti_driver = {
-       .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+       .driver_features = DRIVER_MODESET |
            DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
        .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -365,8 +365,8 @@ static int sti_bind(struct device *dev)
        int ret;
 
        ddev = drm_dev_alloc(&sti_driver, dev);
-       if (!ddev)
-               return -ENOMEM;
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
 
        ddev->platformdev = to_platform_device(dev);
 
index 00881eb4536e85f3cc196c0389f34cfff98716a6..e8c1ed08a9f7eea0726347082a8f35c9e21c762e 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_panel.h>
 
 #include "sti_awg_utils.h"
+#include "sti_drv.h"
 #include "sti_mixer.h"
 
 /* DVO registers */
@@ -106,7 +107,7 @@ struct sti_dvo_connector {
        container_of(x, struct sti_dvo_connector, drm_connector)
 
 #define BLANKING_LEVEL 16
-int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
+static int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
 {
        struct drm_display_mode *mode = &dvo->mode;
        struct dvo_config *config = dvo->config;
index b8d942ca45e80be509230a88d243c6cf36572d61..81df3097b54561ea37cf23a77e39ce5f95108c0c 100644 (file)
@@ -460,6 +460,7 @@ static void sti_gdp_disable(struct sti_gdp *gdp)
                clk_disable_unprepare(gdp->clk_pix);
 
        gdp->plane.status = STI_PLANE_DISABLED;
+       gdp->vtg = NULL;
 }
 
 /**
@@ -473,8 +474,8 @@ static void sti_gdp_disable(struct sti_gdp *gdp)
  * RETURNS:
  * 0 on success.
  */
-int sti_gdp_field_cb(struct notifier_block *nb,
-               unsigned long event, void *data)
+static int sti_gdp_field_cb(struct notifier_block *nb,
+                           unsigned long event, void *data)
 {
        struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
 
@@ -611,7 +612,6 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
        struct drm_crtc *crtc = state->crtc;
        struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
        struct drm_framebuffer *fb =  state->fb;
-       bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
        struct drm_crtc_state *crtc_state;
        struct sti_mixer *mixer;
        struct drm_display_mode *mode;
@@ -628,8 +628,8 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
        mode = &crtc_state->mode;
        dst_x = state->crtc_x;
        dst_y = state->crtc_y;
-       dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-       dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+       dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
        /* src_x are in 16.16 format */
        src_x = state->src_x >> 16;
        src_y = state->src_y >> 16;
@@ -648,10 +648,9 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
                return -EINVAL;
        }
 
-       if (first_prepare) {
+       if (!gdp->vtg) {
                /* Register gdp callback */
-               gdp->vtg = mixer->id == STI_MIXER_MAIN ?
-                                       compo->vtg_main : compo->vtg_aux;
+               gdp->vtg = compo->vtg[mixer->id];
                if (sti_vtg_register_client(gdp->vtg,
                                            &gdp->vtg_field_nb, crtc)) {
                        DRM_ERROR("Cannot register VTG notifier\n");
@@ -719,7 +718,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
        u32 dma_updated_top;
        u32 dma_updated_btm;
        int format;
-       unsigned int depth, bpp;
+       unsigned int bpp;
        u32 ydo, xdo, yds, xds;
 
        if (!crtc || !fb)
@@ -728,8 +727,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
        mode = &crtc->mode;
        dst_x = state->crtc_x;
        dst_y = state->crtc_y;
-       dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-       dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+       dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
        /* src_x are in 16.16 format */
        src_x = state->src_x >> 16;
        src_y = state->src_y >> 16;
@@ -758,9 +757,9 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
                         (unsigned long)cma_obj->paddr);
 
        /* pixel memory location */
-       drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+       bpp = drm_format_plane_cpp(fb->pixel_format, 0);
        top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
-       top_field->gam_gdp_pml += src_x * (bpp >> 3);
+       top_field->gam_gdp_pml += src_x * bpp;
        top_field->gam_gdp_pml += src_y * fb->pitches[0];
 
        /* output parameters (clamped / cropped) */
@@ -810,7 +809,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
        if (!curr_list) {
                /* First update or invalid node should directly write in the
                 * hw register */
-               DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+               DRM_DEBUG_DRIVER("%s first update (or invalid node)\n",
                                 sti_plane_to_str(plane));
 
                writel(gdp->is_curr_top ?
@@ -846,15 +845,15 @@ static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
 {
        struct sti_plane *plane = to_sti_plane(drm_plane);
 
-       if (!drm_plane->crtc) {
+       if (!oldstate->crtc) {
                DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
                                 drm_plane->base.id);
                return;
        }
 
        DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-                        drm_plane->crtc->base.id,
-                        sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+                        oldstate->crtc->base.id,
+                        sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
                         drm_plane->base.id, sti_plane_to_str(plane));
 
        plane->status = STI_PLANE_DISABLING;
@@ -882,7 +881,7 @@ static int sti_gdp_late_register(struct drm_plane *drm_plane)
        return gdp_debugfs_init(gdp, drm_plane->dev->primary);
 }
 
-struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = sti_gdp_destroy,
index 8505569f75deec1489fec0f3a74b7ccf8f3067b5..e7c243f70870292dbafdf3bb2f1919d0953b4efd 100644 (file)
 #define SCALE_CTRL_CR_DFLT              0x00DB0249
 
 /* Video DACs control */
-#define VIDEO_DACS_CONTROL_MASK         0x0FFF
-#define VIDEO_DACS_CONTROL_SYSCFG2535   0x085C /* for stih416 */
-#define DAC_CFG_HD_OFF_SHIFT            5
-#define DAC_CFG_HD_OFF_MASK             (0x7 << DAC_CFG_HD_OFF_SHIFT)
-#define VIDEO_DACS_CONTROL_SYSCFG5072   0x0120 /* for stih407 */
 #define DAC_CFG_HD_HZUVW_OFF_MASK       BIT(1)
 
-
 /* Upsampler values for the alternative 2X Filter */
 #define SAMPLER_COEF_NB                 8
 #define HDA_ANA_SRC_Y_CFG_ALT_2X        0x01130000
@@ -300,28 +294,14 @@ static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
  */
 static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
 {
-       u32 mask;
-
        if (hda->video_dacs_ctrl) {
                u32 val;
 
-               switch ((u32)hda->video_dacs_ctrl & VIDEO_DACS_CONTROL_MASK) {
-               case VIDEO_DACS_CONTROL_SYSCFG2535:
-                       mask = DAC_CFG_HD_OFF_MASK;
-                       break;
-               case VIDEO_DACS_CONTROL_SYSCFG5072:
-                       mask = DAC_CFG_HD_HZUVW_OFF_MASK;
-                       break;
-               default:
-                       DRM_INFO("Video DACS control register not supported!");
-                       return;
-               }
-
                val = readl(hda->video_dacs_ctrl);
                if (enable)
-                       val &= ~mask;
+                       val &= ~DAC_CFG_HD_HZUVW_OFF_MASK;
                else
-                       val |= mask;
+                       val |= DAC_CFG_HD_HZUVW_OFF_MASK;
 
                writel(val, hda->video_dacs_ctrl);
        }
@@ -352,24 +332,11 @@ static void hda_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
 static void hda_dbg_video_dacs_ctrl(struct seq_file *s, void __iomem *reg)
 {
        u32 val = readl(reg);
-       u32 mask;
-
-       switch ((u32)reg & VIDEO_DACS_CONTROL_MASK) {
-       case VIDEO_DACS_CONTROL_SYSCFG2535:
-               mask = DAC_CFG_HD_OFF_MASK;
-               break;
-       case VIDEO_DACS_CONTROL_SYSCFG5072:
-               mask = DAC_CFG_HD_HZUVW_OFF_MASK;
-               break;
-       default:
-               DRM_DEBUG_DRIVER("Warning: DACS ctrl register not supported!");
-               return;
-       }
 
        seq_puts(s, "\n");
        seq_printf(s, "\n  %-25s 0x%08X", "VIDEO_DACS_CONTROL", val);
        seq_puts(s, "\tHD DACs ");
-       seq_puts(s, val & mask ? "disabled" : "enabled");
+       seq_puts(s, val & DAC_CFG_HD_HZUVW_OFF_MASK ? "disabled" : "enabled");
 }
 
 static int hda_dbg_show(struct seq_file *s, void *data)
index fedc17f98d9b029c835a7791e023e127c30e72a7..376b0763c874aaebb88d4bc90927470d20809ed9 100644 (file)
@@ -22,7 +22,6 @@
 
 #include "sti_hdmi.h"
 #include "sti_hdmi_tx3g4c28phy.h"
-#include "sti_hdmi_tx3g0c55phy.h"
 #include "sti_vtg.h"
 
 #define HDMI_CFG                        0x0000
@@ -203,7 +202,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
 
        /* Audio FIFO underrun IRQ */
        if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
-               DRM_INFO("Warning: audio FIFO underrun occurs!");
+               DRM_INFO("Warning: audio FIFO underrun occurs!\n");
 
        return IRQ_HANDLED;
 }
@@ -569,7 +568,7 @@ static void hdmi_swreset(struct sti_hdmi *hdmi)
 
        /* Wait reset completed */
        wait_event_interruptible_timeout(hdmi->wait_event,
-                                        hdmi->event_received == true,
+                                        hdmi->event_received,
                                         msecs_to_jiffies
                                         (HDMI_TIMEOUT_SWRESET));
 
@@ -1054,6 +1053,7 @@ static int sti_hdmi_late_register(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
+       .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = sti_hdmi_connector_detect,
        .destroy = drm_connector_cleanup,
@@ -1181,7 +1181,7 @@ static void hdmi_audio_shutdown(struct device *dev, void *data)
                    HDMI_AUD_CFG_ONE_BIT_INVALID;
        hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
 
-       hdmi->audio.enabled = 0;
+       hdmi->audio.enabled = false;
        hdmi_audio_infoframe_config(hdmi);
 }
 
@@ -1213,7 +1213,7 @@ static int hdmi_audio_hw_params(struct device *dev,
                return -EINVAL;
        }
 
-       audio.enabled = 1;
+       audio.enabled = true;
 
        ret = hdmi_audio_configure(hdmi, &audio);
        if (ret < 0)
@@ -1265,7 +1265,7 @@ static int sti_hdmi_register_audio_driver(struct device *dev,
 
        DRM_DEBUG_DRIVER("\n");
 
-       hdmi->audio.enabled = 0;
+       hdmi->audio.enabled = false;
 
        hdmi->audio_pdev = platform_device_register_data(
                dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
@@ -1373,9 +1373,6 @@ static const struct component_ops sti_hdmi_ops = {
 
 static const struct of_device_id hdmi_of_match[] = {
        {
-               .compatible = "st,stih416-hdmi",
-               .data = &tx3g0c55phy_ops,
-       }, {
                .compatible = "st,stih407-hdmi",
                .data = &tx3g4c28phy_ops,
        }, {
@@ -1422,22 +1419,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
                goto release_adapter;
        }
 
-       if (of_device_is_compatible(np, "st,stih416-hdmi")) {
-               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-                                                  "syscfg");
-               if (!res) {
-                       DRM_ERROR("Invalid syscfg resource\n");
-                       ret = -ENOMEM;
-                       goto release_adapter;
-               }
-               hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
-                                                   resource_size(res));
-               if (!hdmi->syscfg) {
-                       ret = -ENOMEM;
-                       goto release_adapter;
-               }
-       }
-
        hdmi->phy_ops = (struct hdmi_phy_ops *)
                of_match_node(hdmi_of_match, np)->data;
 
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
deleted file mode 100644 (file)
index 49ae8e4..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include "sti_hdmi_tx3g0c55phy.h"
-
-#define HDMI_SRZ_PLL_CFG                0x0504
-#define HDMI_SRZ_TAP_1                  0x0508
-#define HDMI_SRZ_TAP_2                  0x050C
-#define HDMI_SRZ_TAP_3                  0x0510
-#define HDMI_SRZ_CTRL                   0x0514
-
-#define HDMI_SRZ_PLL_CFG_POWER_DOWN     BIT(0)
-#define HDMI_SRZ_PLL_CFG_VCOR_SHIFT     1
-#define HDMI_SRZ_PLL_CFG_VCOR_425MHZ    0
-#define HDMI_SRZ_PLL_CFG_VCOR_850MHZ    1
-#define HDMI_SRZ_PLL_CFG_VCOR_1700MHZ   2
-#define HDMI_SRZ_PLL_CFG_VCOR_3000MHZ   3
-#define HDMI_SRZ_PLL_CFG_VCOR_MASK      3
-#define HDMI_SRZ_PLL_CFG_VCOR(x)        (x << HDMI_SRZ_PLL_CFG_VCOR_SHIFT)
-#define HDMI_SRZ_PLL_CFG_NDIV_SHIFT     8
-#define HDMI_SRZ_PLL_CFG_NDIV_MASK      (0x1F << HDMI_SRZ_PLL_CFG_NDIV_SHIFT)
-#define HDMI_SRZ_PLL_CFG_MODE_SHIFT     16
-#define HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ  0x1
-#define HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ  0x4
-#define HDMI_SRZ_PLL_CFG_MODE_27_MHZ    0x5
-#define HDMI_SRZ_PLL_CFG_MODE_33_75_MHZ 0x6
-#define HDMI_SRZ_PLL_CFG_MODE_40_5_MHZ  0x7
-#define HDMI_SRZ_PLL_CFG_MODE_54_MHZ    0x8
-#define HDMI_SRZ_PLL_CFG_MODE_67_5_MHZ  0x9
-#define HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ 0xA
-#define HDMI_SRZ_PLL_CFG_MODE_81_MHZ    0xB
-#define HDMI_SRZ_PLL_CFG_MODE_82_5_MHZ  0xC
-#define HDMI_SRZ_PLL_CFG_MODE_108_MHZ   0xD
-#define HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ 0xE
-#define HDMI_SRZ_PLL_CFG_MODE_165_MHZ   0xF
-#define HDMI_SRZ_PLL_CFG_MODE_MASK      0xF
-#define HDMI_SRZ_PLL_CFG_MODE(x)        (x << HDMI_SRZ_PLL_CFG_MODE_SHIFT)
-
-#define HDMI_SRZ_CTRL_POWER_DOWN        (1 << 0)
-#define HDMI_SRZ_CTRL_EXTERNAL_DATA_EN  (1 << 1)
-
-/* sysconf registers */
-#define HDMI_REJECTION_PLL_CONFIGURATION 0x0858        /* SYSTEM_CONFIG2534 */
-#define HDMI_REJECTION_PLL_STATUS        0x0948        /* SYSTEM_CONFIG2594 */
-
-#define REJECTION_PLL_HDMI_ENABLE_SHIFT 0
-#define REJECTION_PLL_HDMI_ENABLE_MASK  (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT)
-#define REJECTION_PLL_HDMI_PDIV_SHIFT   24
-#define REJECTION_PLL_HDMI_PDIV_MASK    (0x7 << REJECTION_PLL_HDMI_PDIV_SHIFT)
-#define REJECTION_PLL_HDMI_NDIV_SHIFT   16
-#define REJECTION_PLL_HDMI_NDIV_MASK    (0xFF << REJECTION_PLL_HDMI_NDIV_SHIFT)
-#define REJECTION_PLL_HDMI_MDIV_SHIFT   8
-#define REJECTION_PLL_HDMI_MDIV_MASK    (0xFF << REJECTION_PLL_HDMI_MDIV_SHIFT)
-
-#define REJECTION_PLL_HDMI_REJ_PLL_LOCK BIT(0)
-
-#define HDMI_TIMEOUT_PLL_LOCK  50   /*milliseconds */
-
-/**
- * pll mode structure
- *
- * A pointer to an array of these structures is passed to a TMDS (HDMI) output
- * via the control interface to provide board and SoC specific
- * configurations of the HDMI PHY. Each entry in the array specifies a hardware
- * specific configuration for a given TMDS clock frequency range. The array
- * should be terminated with an entry that has all fields set to zero.
- *
- * @min: Lower bound of TMDS clock frequency this entry applies to
- * @max: Upper bound of TMDS clock frequency this entry applies to
- * @mode: SoC specific register configuration
- */
-struct pllmode {
-       u32 min;
-       u32 max;
-       u32 mode;
-};
-
-#define NB_PLL_MODE 7
-static struct pllmode pllmodes[NB_PLL_MODE] = {
-       {13500000, 13513500, HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ},
-       {25174800, 25200000, HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ},
-       {27000000, 27027000, HDMI_SRZ_PLL_CFG_MODE_27_MHZ},
-       {54000000, 54054000, HDMI_SRZ_PLL_CFG_MODE_54_MHZ},
-       {72000000, 74250000, HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ},
-       {108000000, 108108000, HDMI_SRZ_PLL_CFG_MODE_108_MHZ},
-       {148351648, 297000000, HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ}
-};
-
-#define NB_HDMI_PHY_CONFIG 5
-static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
-       {0, 40000000, {0x00101010, 0x00101010, 0x00101010, 0x02} },
-       {40000000, 140000000, {0x00111111, 0x00111111, 0x00111111, 0x02} },
-       {140000000, 160000000, {0x00131313, 0x00101010, 0x00101010, 0x02} },
-       {160000000, 250000000, {0x00131313, 0x00111111, 0x00111111, 0x03FE} },
-       {250000000, 300000000, {0x00151515, 0x00101010, 0x00101010, 0x03FE} },
-};
-
-#define PLL_CHANGE_DELAY       1 /* ms */
-
-/**
- * Disable the pll rejection
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * return true if the pll has been disabled
- */
-static bool disable_pll_rejection(struct sti_hdmi *hdmi)
-{
-       u32 val;
-
-       DRM_DEBUG_DRIVER("\n");
-
-       val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-       val &= ~REJECTION_PLL_HDMI_ENABLE_MASK;
-       writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
-       msleep(PLL_CHANGE_DELAY);
-       val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
-
-       return !(val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
-}
-
-/**
- * Enable the old BCH/rejection PLL is now reused to provide the CLKPXPLL
- * clock input to the new PHY PLL that generates the serializer clock
- * (TMDS*10) and the TMDS clock which is now fed back into the HDMI
- * formatter instead of the TMDS clock line from ClockGenB.
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * return true if pll has been correctly set
- */
-static bool enable_pll_rejection(struct sti_hdmi *hdmi)
-{
-       unsigned int inputclock;
-       u32 mdiv, ndiv, pdiv, val;
-
-       DRM_DEBUG_DRIVER("\n");
-
-       if (!disable_pll_rejection(hdmi))
-               return false;
-
-       inputclock = hdmi->mode.clock * 1000;
-
-       DRM_DEBUG_DRIVER("hdmi rejection pll input clock = %dHz\n", inputclock);
-
-
-       /* Power up the HDMI rejection PLL
-        * Note: On this SoC (stiH416) we are forced to have the input clock
-        * be equal to the HDMI pixel clock.
-        *
-        * The values here have been suggested by validation however they are
-        * still provisional and subject to change.
-        *
-        * PLLout = (Fin*Mdiv) / ((2 * Ndiv) / 2^Pdiv)
-        */
-       if (inputclock < 50000000) {
-               /*
-                * For slower clocks we need to multiply more to keep the
-                * internal VCO frequency within the physical specification
-                * of the PLL.
-                */
-               pdiv = 4;
-               ndiv = 240;
-               mdiv = 30;
-       } else {
-               pdiv = 2;
-               ndiv = 60;
-               mdiv = 30;
-       }
-
-       val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
-       val &= ~(REJECTION_PLL_HDMI_PDIV_MASK |
-               REJECTION_PLL_HDMI_NDIV_MASK |
-               REJECTION_PLL_HDMI_MDIV_MASK |
-               REJECTION_PLL_HDMI_ENABLE_MASK);
-
-       val |=  (pdiv << REJECTION_PLL_HDMI_PDIV_SHIFT) |
-               (ndiv << REJECTION_PLL_HDMI_NDIV_SHIFT) |
-               (mdiv << REJECTION_PLL_HDMI_MDIV_SHIFT) |
-               (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT);
-
-       writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
-       msleep(PLL_CHANGE_DELAY);
-       val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
-
-       return (val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
-}
-
-/**
- * Start hdmi phy macro cell tx3g0c55
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * Return false if an error occur
- */
-static bool sti_hdmi_tx3g0c55phy_start(struct sti_hdmi *hdmi)
-{
-       u32 ckpxpll = hdmi->mode.clock * 1000;
-       u32 val, tmdsck, freqvco, pllctrl = 0;
-       unsigned int i;
-
-       if (!enable_pll_rejection(hdmi))
-               return false;
-
-       DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
-
-       /* Assuming no pixel repetition and 24bits color */
-       tmdsck = ckpxpll;
-       pllctrl = 2 << HDMI_SRZ_PLL_CFG_NDIV_SHIFT;
-
-       /*
-        * Setup the PLL mode parameter based on the ckpxpll. If we haven't got
-        * a clock frequency supported by one of the specific PLL modes then we
-        * will end up using the generic mode (0) which only supports a 10x
-        * multiplier, hence only 24bit color.
-        */
-       for (i = 0; i < NB_PLL_MODE; i++) {
-               if (ckpxpll >= pllmodes[i].min && ckpxpll <= pllmodes[i].max)
-                       pllctrl |= HDMI_SRZ_PLL_CFG_MODE(pllmodes[i].mode);
-       }
-
-       freqvco = tmdsck * 10;
-       if (freqvco <= 425000000UL)
-               pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_425MHZ);
-       else if (freqvco <= 850000000UL)
-               pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_850MHZ);
-       else if (freqvco <= 1700000000UL)
-               pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_1700MHZ);
-       else if (freqvco <= 2970000000UL)
-               pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_3000MHZ);
-       else {
-               DRM_ERROR("PHY serializer clock out of range\n");
-               goto err;
-       }
-
-       /*
-        * Configure and power up the PHY PLL
-        */
-       hdmi->event_received = false;
-       DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
-       hdmi_write(hdmi, pllctrl, HDMI_SRZ_PLL_CFG);
-
-       /* wait PLL interrupt */
-       wait_event_interruptible_timeout(hdmi->wait_event,
-                                        hdmi->event_received == true,
-                                        msecs_to_jiffies
-                                        (HDMI_TIMEOUT_PLL_LOCK));
-
-       if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
-               DRM_ERROR("hdmi phy pll not locked\n");
-               goto err;
-       }
-
-       DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
-
-       /*
-        * To configure the source termination and pre-emphasis appropriately
-        * for different high speed TMDS clock frequencies a phy configuration
-        * table must be provided, tailored to the SoC and board combination.
-        */
-       for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
-               if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
-                   (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
-                       val = hdmiphy_config[i].config[0];
-                       hdmi_write(hdmi, val, HDMI_SRZ_TAP_1);
-                       val = hdmiphy_config[i].config[1];
-                       hdmi_write(hdmi, val, HDMI_SRZ_TAP_2);
-                       val = hdmiphy_config[i].config[2];
-                       hdmi_write(hdmi, val, HDMI_SRZ_TAP_3);
-                       val = hdmiphy_config[i].config[3];
-                       val |= HDMI_SRZ_CTRL_EXTERNAL_DATA_EN;
-                       val &= ~HDMI_SRZ_CTRL_POWER_DOWN;
-                       hdmi_write(hdmi, val, HDMI_SRZ_CTRL);
-
-                       DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x 0x%x\n",
-                                        hdmiphy_config[i].config[0],
-                                        hdmiphy_config[i].config[1],
-                                        hdmiphy_config[i].config[2],
-                                        hdmiphy_config[i].config[3]);
-                       return true;
-               }
-       }
-
-       /*
-        * Default, power up the serializer with no pre-emphasis or source
-        * termination.
-        */
-       hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_1);
-       hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_2);
-       hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_3);
-       hdmi_write(hdmi, HDMI_SRZ_CTRL_EXTERNAL_DATA_EN, HDMI_SRZ_CTRL);
-
-       return true;
-
-err:
-       disable_pll_rejection(hdmi);
-
-       return false;
-}
-
-/**
- * Stop hdmi phy macro cell tx3g0c55
- *
- * @hdmi: pointer on the hdmi internal structure
- */
-static void sti_hdmi_tx3g0c55phy_stop(struct sti_hdmi *hdmi)
-{
-       DRM_DEBUG_DRIVER("\n");
-
-       hdmi->event_received = false;
-
-       hdmi_write(hdmi, HDMI_SRZ_CTRL_POWER_DOWN, HDMI_SRZ_CTRL);
-       hdmi_write(hdmi, HDMI_SRZ_PLL_CFG_POWER_DOWN, HDMI_SRZ_PLL_CFG);
-
-       /* wait PLL interrupt */
-       wait_event_interruptible_timeout(hdmi->wait_event,
-                                        hdmi->event_received == true,
-                                        msecs_to_jiffies
-                                        (HDMI_TIMEOUT_PLL_LOCK));
-
-       if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
-               DRM_ERROR("hdmi phy pll not well disabled\n");
-
-       disable_pll_rejection(hdmi);
-}
-
-struct hdmi_phy_ops tx3g0c55phy_ops = {
-       .start = sti_hdmi_tx3g0c55phy_start,
-       .stop = sti_hdmi_tx3g0c55phy_stop,
-};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
deleted file mode 100644 (file)
index 068237b..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_HDMI_TX3G0C55PHY_H_
-#define _STI_HDMI_TX3G0C55PHY_H_
-
-#include "sti_hdmi.h"
-
-extern struct hdmi_phy_ops tx3g0c55phy_ops;
-
-#endif
index b5ee783e3e7c9127652a1dffdcd5de5792dab97d..f88130f2eb487e561ebc468258e8b524224b3667 100644 (file)
@@ -17,6 +17,7 @@
 #include "sti_hqvdp_lut.h"
 #include "sti_plane.h"
 #include "sti_vtg.h"
+#include "sti_drv.h"
 
 /* Firmware name */
 #define HQVDP_FMW_NAME          "hqvdp-stih407.bin"
@@ -770,6 +771,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
                DRM_ERROR("XP70 could not revert to idle\n");
 
        hqvdp->plane.status = STI_PLANE_DISABLED;
+       hqvdp->xp70_initialized = false;
 }
 
 /**
@@ -783,7 +785,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
  * RETURNS:
  * 0 on success.
  */
-int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
+static int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
 {
        struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
        int btm_cmd_offset, top_cmd_offest;
@@ -1012,7 +1014,6 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
        struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
        struct drm_crtc *crtc = state->crtc;
        struct drm_framebuffer *fb = state->fb;
-       bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
        struct drm_crtc_state *crtc_state;
        struct drm_display_mode *mode;
        int dst_x, dst_y, dst_w, dst_h;
@@ -1026,8 +1027,8 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
        mode = &crtc_state->mode;
        dst_x = state->crtc_x;
        dst_y = state->crtc_y;
-       dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-       dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+       dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
        /* src_x are in 16.16 format */
        src_x = state->src_x >> 16;
        src_y = state->src_y >> 16;
@@ -1063,7 +1064,7 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
                return -EINVAL;
        }
 
-       if (first_prepare) {
+       if (!hqvdp->xp70_initialized) {
                /* Start HQVDP XP70 coprocessor */
                sti_hqvdp_start_xp70(hqvdp);
 
@@ -1115,8 +1116,8 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
        mode = &crtc->mode;
        dst_x = state->crtc_x;
        dst_y = state->crtc_y;
-       dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-       dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+       dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
        /* src_x are in 16.16 format */
        src_x = state->src_x >> 16;
        src_y = state->src_y >> 16;
@@ -1214,15 +1215,15 @@ static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
 {
        struct sti_plane *plane = to_sti_plane(drm_plane);
 
-       if (!drm_plane->crtc) {
+       if (!oldstate->crtc) {
                DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
                                 drm_plane->base.id);
                return;
        }
 
        DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-                        drm_plane->crtc->base.id,
-                        sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+                        oldstate->crtc->base.id,
+                        sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
                         drm_plane->base.id, sti_plane_to_str(plane));
 
        plane->status = STI_PLANE_DISABLING;
@@ -1250,7 +1251,7 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
        return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
 }
 
-struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = sti_hqvdp_destroy,
@@ -1289,7 +1290,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
        return &hqvdp->plane.drm_plane;
 }
 
-int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
+static int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
 {
        struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
        struct drm_device *drm_dev = data;
index 7d9aea805eab0f80203f8094ad16979ae410712f..4ddc58f7fe2e98bfb8e1f69123263ae22e5358c1 100644 (file)
@@ -16,12 +16,6 @@ static unsigned int bkg_color = 0x000000;
 MODULE_PARM_DESC(bkgcolor, "Value of the background color 0xRRGGBB");
 module_param_named(bkgcolor, bkg_color, int, 0644);
 
-/* Identity: G=Y , B=Cb , R=Cr */
-static const u32 mixerColorSpaceMatIdentity[] = {
-       0x10000000, 0x00000000, 0x10000000, 0x00001000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000
-};
-
 /* regs offset */
 #define GAM_MIXER_CTL      0x00
 #define GAM_MIXER_BKC      0x04
@@ -358,22 +352,12 @@ int sti_mixer_set_plane_status(struct sti_mixer *mixer,
        return 0;
 }
 
-void sti_mixer_set_matrix(struct sti_mixer *mixer)
-{
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(mixerColorSpaceMatIdentity); i++)
-               sti_mixer_reg_write(mixer, GAM_MIXER_MX0 + (i * 4),
-                                   mixerColorSpaceMatIdentity[i]);
-}
-
 struct sti_mixer *sti_mixer_create(struct device *dev,
                                   struct drm_device *drm_dev,
                                   int id,
                                   void __iomem *baseaddr)
 {
        struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
-       struct device_node *np = dev->of_node;
 
        dev_dbg(dev, "%s\n", __func__);
        if (!mixer) {
@@ -384,9 +368,6 @@ struct sti_mixer *sti_mixer_create(struct device *dev,
        mixer->dev = dev;
        mixer->id = id;
 
-       if (of_device_is_compatible(np, "st,stih416-compositor"))
-               sti_mixer_set_matrix(mixer);
-
        DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
                         sti_mixer_to_str(mixer), mixer->regs);
 
index e25995b35715f3ddf4c0ca32e42ec97cdf4d2d82..ad46d3558d9161b20d4cde8bce57d85e4f01d30d 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "sti_crtc.h"
+#include "sti_drv.h"
 #include "sti_vtg.h"
 
 /* glue registers */
@@ -209,13 +210,11 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
  * @tvout: tvout structure
  * @reg: register to set
  * @main_path: main or auxiliary path
- * @sel_input_logic_inverted: need to invert the logic
  * @sel_input: selected_input (main/aux + conv)
  */
 static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
                                    int reg,
                                    bool main_path,
-                                   bool sel_input_logic_inverted,
                                    enum sti_tvout_video_out_type video_out)
 {
        u32 sel_input;
@@ -236,8 +235,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
        }
 
        /* on stih407 chip the sel_input bypass mode logic is inverted */
-       if (sel_input_logic_inverted)
-               sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
+       sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
 
        val &= ~TVO_VIP_SEL_INPUT_MASK;
        val |= sel_input;
@@ -295,8 +293,6 @@ static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
  */
 static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
 {
-       struct device_node *node = tvout->dev->of_node;
-       bool sel_input_logic_inverted = false;
        u32 tvo_in_vid_format;
        int val, tmp;
 
@@ -334,16 +330,11 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
        /* Set round mode (rounded to 8-bit per component) */
        tvout_vip_set_rnd(tvout, TVO_VIP_DVO, TVO_VIP_RND_8BIT_ROUNDED);
 
-       if (of_device_is_compatible(node, "st,stih407-tvout")) {
-               /* Set input video format */
-               tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
-                                        TVO_IN_FMT_SIGNED);
-               sel_input_logic_inverted = true;
-       }
+       /* Set input video format */
+       tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
 
        /* Input selection */
        tvout_vip_set_sel_input(tvout, TVO_VIP_DVO, main_path,
-                               sel_input_logic_inverted,
                                STI_TVOUT_VIDEO_OUT_RGB);
 }
 
@@ -356,8 +347,6 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
  */
 static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
 {
-       struct device_node *node = tvout->dev->of_node;
-       bool sel_input_logic_inverted = false;
        u32 tvo_in_vid_format;
 
        dev_dbg(tvout->dev, "%s\n", __func__);
@@ -390,16 +379,12 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
        /* set round mode (rounded to 8-bit per component) */
        tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
 
-       if (of_device_is_compatible(node, "st,stih407-tvout")) {
-               /* set input video format */
-               tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
-                                       TVO_IN_FMT_SIGNED);
-               sel_input_logic_inverted = true;
-       }
+       /* set input video format */
+       tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
 
        /* input selection */
        tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path,
-                       sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
+                               STI_TVOUT_VIDEO_OUT_RGB);
 }
 
 /**
@@ -411,8 +396,6 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
  */
 static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
 {
-       struct device_node *node = tvout->dev->of_node;
-       bool sel_input_logic_inverted = false;
        u32 tvo_in_vid_format;
        int val;
 
@@ -448,16 +431,11 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
        /* set round mode (rounded to 10-bit per component) */
        tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
 
-       if (of_device_is_compatible(node, "st,stih407-tvout")) {
-               /* set input video format */
-               tvout_vip_set_in_vid_fmt(tvout,
-                       tvo_in_vid_format, TVO_IN_FMT_SIGNED);
-               sel_input_logic_inverted = true;
-       }
+       /* Set input video format */
+       tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
 
        /* Input selection */
        tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path,
-                               sel_input_logic_inverted,
                                STI_TVOUT_VIDEO_OUT_YUV);
 
        /* power up HD DAC */
@@ -905,7 +883,6 @@ static int sti_tvout_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id tvout_of_match[] = {
-       { .compatible = "st,stih416-tvout", },
        { .compatible = "st,stih407-tvout", },
        { /* end node */ }
 };
index 47634a0251fca699916479b4a67ea7fa5f612f86..2ad59892b57e254fffa4467aebe8039d8260181f 100644 (file)
@@ -142,8 +142,8 @@ void sti_vid_commit(struct sti_vid *vid,
        struct drm_display_mode *mode = &crtc->mode;
        int dst_x = state->crtc_x;
        int dst_y = state->crtc_y;
-       int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-       int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       int dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+       int dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
        int src_h = state->src_h >> 16;
        u32 val, ydo, xdo, yds, xds;
 
index b1eb0d77630dfce30ee13373371edfbb32c10dad..cf7fe8a1db42e1b21b59ba9db7df431302d03259 100644 (file)
@@ -12,6 +12,8 @@
 
 #include <drm/drmP.h>
 
+#include "sti_drv.h"
+
 /* registers offset */
 #define VTAC_CONFIG                     0x00
 #define VTAC_RX_FIFO_CONFIG             0x04
index 0bdc385eec178663b823df0b83c5ed2a320965e8..a8882bdd0f8badf46d0921710dc359cee2f88cde 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <drm/drmP.h>
 
+#include "sti_drv.h"
 #include "sti_vtg.h"
 
 #define VTG_MODE_MASTER         0
@@ -72,7 +73,7 @@
 #define AWG_DELAY_ED        (-8)
 #define AWG_DELAY_SD        (-7)
 
-LIST_HEAD(vtg_lookup);
+static LIST_HEAD(vtg_lookup);
 
 /*
  * STI VTG register offset structure
index 58cd551498272fb8ccf7b832612aa2c7c106cdf0..d625a82a6e5fb8e570764e26aa2e202f5f65bc0d 100644 (file)
@@ -9,5 +9,5 @@ sun4i-tcon-y += sun4i_dotclock.o
 
 obj-$(CONFIG_DRM_SUN4I)                += sun4i-drm.o sun4i-tcon.o
 obj-$(CONFIG_DRM_SUN4I)                += sun4i_backend.o
-
+obj-$(CONFIG_DRM_SUN4I)                += sun6i_drc.o
 obj-$(CONFIG_DRM_SUN4I)                += sun4i_tv.o
index 3ab560450a82e666795eddc285214391ee763e71..32c0584e3c3565bc31a77d8912d1c476dc905113 100644 (file)
@@ -83,8 +83,13 @@ void sun4i_backend_layer_enable(struct sun4i_backend *backend,
 }
 EXPORT_SYMBOL(sun4i_backend_layer_enable);
 
-static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
+static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
+                                            u32 format, u32 *mode)
 {
+       if ((plane->type == DRM_PLANE_TYPE_PRIMARY) &&
+           (format == DRM_FORMAT_ARGB8888))
+               format = DRM_FORMAT_XRGB8888;
+
        switch (format) {
        case DRM_FORMAT_ARGB8888:
                *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
@@ -164,7 +169,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
        DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
                         interlaced ? "on" : "off");
 
-       ret = sun4i_backend_drm_format_to_layer(fb->pixel_format, &val);
+       ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val);
        if (ret) {
                DRM_DEBUG_DRIVER("Invalid format\n");
                return val;
@@ -217,6 +222,51 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
 }
 EXPORT_SYMBOL(sun4i_backend_update_layer_buffer);
 
+static int sun4i_backend_init_sat(struct device *dev) {
+       struct sun4i_backend *backend = dev_get_drvdata(dev);
+       int ret;
+
+       backend->sat_reset = devm_reset_control_get(dev, "sat");
+       if (IS_ERR(backend->sat_reset)) {
+               dev_err(dev, "Couldn't get the SAT reset line\n");
+               return PTR_ERR(backend->sat_reset);
+       }
+
+       ret = reset_control_deassert(backend->sat_reset);
+       if (ret) {
+               dev_err(dev, "Couldn't deassert the SAT reset line\n");
+               return ret;
+       }
+
+       backend->sat_clk = devm_clk_get(dev, "sat");
+       if (IS_ERR(backend->sat_clk)) {
+               dev_err(dev, "Couldn't get our SAT clock\n");
+               ret = PTR_ERR(backend->sat_clk);
+               goto err_assert_reset;
+       }
+
+       ret = clk_prepare_enable(backend->sat_clk);
+       if (ret) {
+               dev_err(dev, "Couldn't enable the SAT clock\n");
+               return ret;
+       }
+
+       return 0;
+
+err_assert_reset:
+       reset_control_assert(backend->sat_reset);
+       return ret;
+}
+
+static int sun4i_backend_free_sat(struct device *dev) {
+       struct sun4i_backend *backend = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(backend->sat_clk);
+       reset_control_assert(backend->sat_reset);
+
+       return 0;
+}
+
 static struct regmap_config sun4i_backend_regmap_config = {
        .reg_bits       = 32,
        .val_bits       = 32,
@@ -243,10 +293,8 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        regs = devm_ioremap_resource(dev, res);
-       if (IS_ERR(regs)) {
-               dev_err(dev, "Couldn't map the backend registers\n");
+       if (IS_ERR(regs))
                return PTR_ERR(regs);
-       }
 
        backend->regs = devm_regmap_init_mmio(dev, regs,
                                              &sun4i_backend_regmap_config);
@@ -291,6 +339,15 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
        }
        clk_prepare_enable(backend->ram_clk);
 
+       if (of_device_is_compatible(dev->of_node,
+                                   "allwinner,sun8i-a33-display-backend")) {
+               ret = sun4i_backend_init_sat(dev);
+               if (ret) {
+                       dev_err(dev, "Couldn't init SAT resources\n");
+                       goto err_disable_ram_clk;
+               }
+       }
+
        /* Reset the registers */
        for (i = 0x800; i < 0x1000; i += 4)
                regmap_write(backend->regs, i, 0);
@@ -306,6 +363,8 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
 
        return 0;
 
+err_disable_ram_clk:
+       clk_disable_unprepare(backend->ram_clk);
 err_disable_mod_clk:
        clk_disable_unprepare(backend->mod_clk);
 err_disable_bus_clk:
@@ -320,6 +379,10 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
 {
        struct sun4i_backend *backend = dev_get_drvdata(dev);
 
+       if (of_device_is_compatible(dev->of_node,
+                                   "allwinner,sun8i-a33-display-backend"))
+               sun4i_backend_free_sat(dev);
+
        clk_disable_unprepare(backend->ram_clk);
        clk_disable_unprepare(backend->mod_clk);
        clk_disable_unprepare(backend->bus_clk);
@@ -345,6 +408,7 @@ static int sun4i_backend_remove(struct platform_device *pdev)
 
 static const struct of_device_id sun4i_backend_of_table[] = {
        { .compatible = "allwinner,sun5i-a13-display-backend" },
+       { .compatible = "allwinner,sun8i-a33-display-backend" },
        { }
 };
 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
index 7070bb3434e5de73dca8e4d8f805670895e49cdc..83e63cc702b412e2c0035d4dcd9b7ba27c9395ad 100644 (file)
@@ -52,8 +52,8 @@
 #define SUN4I_BACKEND_LAYFB_L32ADD_REG(l)      (0x850 + (0x4 * (l)))
 
 #define SUN4I_BACKEND_LAYFB_H4ADD_REG          0x860
-#define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l)               GENMASK(3 + ((l) * 8), 0)
-#define SUN4I_BACKEND_LAYFB_H4ADD(l, val)                      ((val) << ((l) * 8))
+#define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l)               GENMASK(3 + ((l) * 8), (l) * 8)
+#define SUN4I_BACKEND_LAYFB_H4ADD(l, val)              ((val) << ((l) * 8))
 
 #define SUN4I_BACKEND_REGBUFFCTL_REG           0x870
 #define SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS          BIT(1)
@@ -146,6 +146,9 @@ struct sun4i_backend {
        struct clk              *bus_clk;
        struct clk              *mod_clk;
        struct clk              *ram_clk;
+
+       struct clk              *sat_clk;
+       struct reset_control    *sat_reset;
 };
 
 void sun4i_backend_apply_color_correction(struct sun4i_backend *backend);
index 5b3463197c488ae3eac98500eeda121443406304..d401156490f36c890f49f23d1eab1b3f7691d108 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/regmap.h>
 
 #include "sun4i_tcon.h"
+#include "sun4i_dotclock.h"
 
 struct sun4i_dclk {
        struct clk_hw   hw;
@@ -61,7 +62,7 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
        regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
 
        val >>= SUN4I_TCON0_DCLK_DIV_SHIFT;
-       val &= SUN4I_TCON0_DCLK_DIV_WIDTH;
+       val &= (1 << SUN4I_TCON0_DCLK_DIV_WIDTH) - 1;
 
        if (!val)
                val = 1;
@@ -76,7 +77,7 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
        u8 best_div = 1;
        int i;
 
-       for (i = 6; i < 127; i++) {
+       for (i = 6; i <= 127; i++) {
                unsigned long ideal = rate * i;
                unsigned long rounded;
 
@@ -89,7 +90,8 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
                        goto out;
                }
 
-               if ((rounded < ideal) && (rounded > best_parent)) {
+               if (abs(rate - rounded / i) <
+                   abs(rate - best_parent / best_div)) {
                        best_parent = rounded;
                        best_div = i;
                }
index 7092daaf6c432b8fa8d2bb93bda63785cf307f01..0da9862ad8ed928e23a6a1e089551967ad5273e8 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_drv.h"
@@ -109,7 +110,7 @@ static void sun4i_remove_framebuffers(void)
        ap->ranges[0].base = 0;
        ap->ranges[0].size = ~0;
 
-       remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
        kfree(ap);
 }
 
@@ -120,8 +121,8 @@ static int sun4i_drv_bind(struct device *dev)
        int ret;
 
        drm = drm_dev_alloc(&sun4i_drv_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
        if (!drv) {
@@ -199,13 +200,14 @@ static const struct component_master_ops sun4i_drv_master_ops = {
 
 static bool sun4i_drv_node_is_frontend(struct device_node *node)
 {
-       return of_device_is_compatible(node,
-                                      "allwinner,sun5i-a13-display-frontend");
+       return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+               of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
 }
 
 static bool sun4i_drv_node_is_tcon(struct device_node *node)
 {
-       return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon");
+       return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+               of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
 }
 
 static int compare_of(struct device *dev, void *data)
@@ -257,8 +259,8 @@ static int sun4i_drv_add_endpoints(struct device *dev,
                }
 
                /*
-                * If the node is our TCON, the first port is used for our
-                * panel, and will not be part of the
+                * If the node is our TCON, the first port is used for
+                * panel or bridges, and will not be part of the
                 * component framework.
                 */
                if (sun4i_drv_node_is_tcon(node)) {
@@ -320,6 +322,7 @@ static int sun4i_drv_remove(struct platform_device *pdev)
 
 static const struct of_device_id sun4i_drv_of_table[] = {
        { .compatible = "allwinner,sun5i-a13-display-engine" },
+       { .compatible = "allwinner,sun8i-a33-display-engine" },
        { }
 };
 MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
index 70688febd7ac8b08ee064ffcaabef506e63515eb..8b6ce619ad814497100a2e9c1ccb6098a707e881 100644 (file)
@@ -15,6 +15,7 @@
 #include <drm/drmP.h>
 
 #include "sun4i_drv.h"
+#include "sun4i_framebuffer.h"
 
 static void sun4i_de_output_poll_changed(struct drm_device *drm)
 {
index 068ab806309b85faef7ca3288ad324d9d417e720..f0035bf5efea68fe89f30e500f85e5c582974099 100644 (file)
 #include "sun4i_drv.h"
 #include "sun4i_layer.h"
 
-#define SUN4I_NUM_LAYERS       2
+struct sun4i_plane_desc {
+              enum drm_plane_type     type;
+              u8                      pipe;
+              const uint32_t          *formats;
+              uint32_t                nformats;
+};
 
 static int sun4i_backend_layer_atomic_check(struct drm_plane *plane,
                                            struct drm_plane_state *state)
@@ -65,14 +70,35 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
        .update_plane           = drm_atomic_helper_update_plane,
 };
 
-static const uint32_t sun4i_backend_layer_formats[] = {
+static const uint32_t sun4i_backend_layer_formats_primary[] = {
        DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_RGB888,
        DRM_FORMAT_XRGB8888,
+};
+
+static const uint32_t sun4i_backend_layer_formats_overlay[] = {
+       DRM_FORMAT_ARGB8888,
        DRM_FORMAT_RGB888,
+       DRM_FORMAT_XRGB8888,
+};
+
+static const struct sun4i_plane_desc sun4i_backend_planes[] = {
+       {
+               .type = DRM_PLANE_TYPE_PRIMARY,
+               .pipe = 0,
+               .formats = sun4i_backend_layer_formats_primary,
+               .nformats = ARRAY_SIZE(sun4i_backend_layer_formats_primary),
+       },
+       {
+               .type = DRM_PLANE_TYPE_OVERLAY,
+               .pipe = 1,
+               .formats = sun4i_backend_layer_formats_overlay,
+               .nformats = ARRAY_SIZE(sun4i_backend_layer_formats_overlay),
+       },
 };
 
 static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
-                                               enum drm_plane_type type)
+                                               const struct sun4i_plane_desc *plane)
 {
        struct sun4i_drv *drv = drm->dev_private;
        struct sun4i_layer *layer;
@@ -84,10 +110,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
 
        ret = drm_universal_plane_init(drm, &layer->plane, BIT(0),
                                       &sun4i_backend_layer_funcs,
-                                      sun4i_backend_layer_formats,
-                                      ARRAY_SIZE(sun4i_backend_layer_formats),
-                                      type,
-                                      NULL);
+                                      plane->formats, plane->nformats,
+                                      plane->type, NULL);
        if (ret) {
                dev_err(drm->dev, "Couldn't initialize layer\n");
                return ERR_PTR(ret);
@@ -97,7 +121,7 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
                             &sun4i_backend_layer_helper_funcs);
        layer->drv = drv;
 
-       if (type == DRM_PLANE_TYPE_PRIMARY)
+       if (plane->type == DRM_PLANE_TYPE_PRIMARY)
                drv->primary = &layer->plane;
 
        return layer;
@@ -109,8 +133,8 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
        struct sun4i_layer **layers;
        int i;
 
-       layers = devm_kcalloc(drm->dev, SUN4I_NUM_LAYERS, sizeof(**layers),
-                             GFP_KERNEL);
+       layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes),
+                             sizeof(**layers), GFP_KERNEL);
        if (!layers)
                return ERR_PTR(-ENOMEM);
 
@@ -135,13 +159,11 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
         * SoCs that support it, sprites could fill the need for more
         * layers.
         */
-       for (i = 0; i < SUN4I_NUM_LAYERS; i++) {
-               enum drm_plane_type type = (i == 0)
-                                        ? DRM_PLANE_TYPE_PRIMARY
-                                        : DRM_PLANE_TYPE_OVERLAY;
+       for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) {
+               const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i];
                struct sun4i_layer *layer = layers[i];
 
-               layer = sun4i_layer_init_one(drm, type);
+               layer = sun4i_layer_init_one(drm, plane);
                if (IS_ERR(layer)) {
                        dev_err(drm->dev, "Couldn't initialize %s plane\n",
                                i ? "overlay" : "primary");
@@ -149,10 +171,10 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
                };
 
                DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
-                                i ? "overlay" : "primary", i);
+                                i ? "overlay" : "primary", plane->pipe);
                regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
                                   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
-                                  SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(i));
+                                  SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe));
 
                layer->id = i;
        };
index f5bbac6efb4c8c01e5f7cd709de037f168a92eb6..c3ff10f559cc4811755f764d6ebbfd999a8ee7e9 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "sun4i_drv.h"
 #include "sun4i_tcon.h"
+#include "sun4i_rgb.h"
 
 struct sun4i_rgb {
        struct drm_connector    connector;
@@ -151,7 +152,14 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
 
        DRM_DEBUG_DRIVER("Enabling RGB output\n");
 
-       drm_panel_enable(tcon->panel);
+       if (!IS_ERR(tcon->panel)) {
+               drm_panel_prepare(tcon->panel);
+               drm_panel_enable(tcon->panel);
+       }
+
+       /* encoder->bridge can be NULL; drm_bridge_enable checks for it */
+       drm_bridge_enable(encoder->bridge);
+
        sun4i_tcon_channel_enable(tcon, 0);
 }
 
@@ -164,7 +172,14 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
        DRM_DEBUG_DRIVER("Disabling RGB output\n");
 
        sun4i_tcon_channel_disable(tcon, 0);
-       drm_panel_disable(tcon->panel);
+
+       /* encoder->bridge can be NULL; drm_bridge_disable checks for it */
+       drm_bridge_disable(encoder->bridge);
+
+       if (!IS_ERR(tcon->panel)) {
+               drm_panel_disable(tcon->panel);
+               drm_panel_unprepare(tcon->panel);
+       }
 }
 
 static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
@@ -203,17 +218,22 @@ int sun4i_rgb_init(struct drm_device *drm)
 {
        struct sun4i_drv *drv = drm->dev_private;
        struct sun4i_tcon *tcon = drv->tcon;
+       struct drm_encoder *encoder;
        struct sun4i_rgb *rgb;
        int ret;
 
-       /* If we don't have a panel, there's no point in going on */
-       if (IS_ERR(tcon->panel))
-               return -ENODEV;
-
        rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
        if (!rgb)
                return -ENOMEM;
        rgb->drv = drv;
+       encoder = &rgb->encoder;
+
+       tcon->panel = sun4i_tcon_find_panel(tcon->dev->of_node);
+       encoder->bridge = sun4i_tcon_find_bridge(tcon->dev->of_node);
+       if (IS_ERR(tcon->panel) && IS_ERR(encoder->bridge)) {
+               dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
+               return 0;
+       }
 
        drm_encoder_helper_add(&rgb->encoder,
                               &sun4i_rgb_enc_helper_funcs);
@@ -230,19 +250,38 @@ int sun4i_rgb_init(struct drm_device *drm)
        /* The RGB encoder can only work with the TCON channel 0 */
        rgb->encoder.possible_crtcs = BIT(0);
 
-       drm_connector_helper_add(&rgb->connector,
-                                &sun4i_rgb_con_helper_funcs);
-       ret = drm_connector_init(drm, &rgb->connector,
-                                &sun4i_rgb_con_funcs,
-                                DRM_MODE_CONNECTOR_Unknown);
-       if (ret) {
-               dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
-               goto err_cleanup_connector;
+       if (!IS_ERR(tcon->panel)) {
+               drm_connector_helper_add(&rgb->connector,
+                                        &sun4i_rgb_con_helper_funcs);
+               ret = drm_connector_init(drm, &rgb->connector,
+                                        &sun4i_rgb_con_funcs,
+                                        DRM_MODE_CONNECTOR_Unknown);
+               if (ret) {
+                       dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
+                       goto err_cleanup_connector;
+               }
+
+               drm_mode_connector_attach_encoder(&rgb->connector,
+                                                 &rgb->encoder);
+
+               ret = drm_panel_attach(tcon->panel, &rgb->connector);
+               if (ret) {
+                       dev_err(drm->dev, "Couldn't attach our panel\n");
+                       goto err_cleanup_connector;
+               }
        }
 
-       drm_mode_connector_attach_encoder(&rgb->connector, &rgb->encoder);
+       if (!IS_ERR(encoder->bridge)) {
+               encoder->bridge->encoder = &rgb->encoder;
 
-       drm_panel_attach(tcon->panel, &rgb->connector);
+               ret = drm_bridge_attach(drm, encoder->bridge);
+               if (ret) {
+                       dev_err(drm->dev, "Couldn't attach our bridge\n");
+                       goto err_cleanup_connector;
+               }
+       } else {
+               encoder->bridge = NULL;
+       }
 
        return 0;
 
index 652385f09735c3f4bb11b713809cc6b67034d459..cadacb517f958f3f910a0a594e1da1a89f4a9b5c 100644 (file)
@@ -59,11 +59,13 @@ void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
                regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
                                   SUN4I_TCON0_CTL_TCON_ENABLE, 0);
                clk_disable_unprepare(tcon->dclk);
-       } else if (channel == 1) {
-               regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
-                                  SUN4I_TCON1_CTL_TCON_ENABLE, 0);
-               clk_disable_unprepare(tcon->sclk1);
+               return;
        }
+
+       WARN_ON(!tcon->has_channel_1);
+       regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+                          SUN4I_TCON1_CTL_TCON_ENABLE, 0);
+       clk_disable_unprepare(tcon->sclk1);
 }
 EXPORT_SYMBOL(sun4i_tcon_channel_disable);
 
@@ -75,12 +77,14 @@ void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
                                   SUN4I_TCON0_CTL_TCON_ENABLE,
                                   SUN4I_TCON0_CTL_TCON_ENABLE);
                clk_prepare_enable(tcon->dclk);
-       } else if (channel == 1) {
-               regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
-                                  SUN4I_TCON1_CTL_TCON_ENABLE,
-                                  SUN4I_TCON1_CTL_TCON_ENABLE);
-               clk_prepare_enable(tcon->sclk1);
+               return;
        }
+
+       WARN_ON(!tcon->has_channel_1);
+       regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+                          SUN4I_TCON1_CTL_TCON_ENABLE,
+                          SUN4I_TCON1_CTL_TCON_ENABLE);
+       clk_prepare_enable(tcon->sclk1);
 }
 EXPORT_SYMBOL(sun4i_tcon_channel_enable);
 
@@ -198,6 +202,8 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
        u8 clk_delay;
        u32 val;
 
+       WARN_ON(!tcon->has_channel_1);
+
        /* Adjust clock delay */
        clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
        regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
@@ -321,10 +327,12 @@ static int sun4i_tcon_init_clocks(struct device *dev,
                return PTR_ERR(tcon->sclk0);
        }
 
-       tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
-       if (IS_ERR(tcon->sclk1)) {
-               dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
-               return PTR_ERR(tcon->sclk1);
+       if (tcon->has_channel_1) {
+               tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+               if (IS_ERR(tcon->sclk1)) {
+                       dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
+                       return PTR_ERR(tcon->sclk1);
+               }
        }
 
        return sun4i_dclk_create(dev, tcon);
@@ -374,10 +382,8 @@ static int sun4i_tcon_init_regmap(struct device *dev,
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        regs = devm_ioremap_resource(dev, res);
-       if (IS_ERR(regs)) {
-               dev_err(dev, "Couldn't map the TCON registers\n");
+       if (IS_ERR(regs))
                return PTR_ERR(regs);
-       }
 
        tcon->regs = devm_regmap_init_mmio(dev, regs,
                                           &sun4i_tcon_regmap_config);
@@ -398,7 +404,7 @@ static int sun4i_tcon_init_regmap(struct device *dev,
        return 0;
 }
 
-static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
+struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
 {
        struct device_node *port, *remote, *child;
        struct device_node *end_node = NULL;
@@ -432,6 +438,40 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
        return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
 }
 
+struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node)
+{
+       struct device_node *port, *remote, *child;
+       struct device_node *end_node = NULL;
+
+       /* Inputs are listed first, then outputs */
+       port = of_graph_get_port_by_id(node, 1);
+
+       /*
+        * Our first output is the RGB interface where the panel will
+        * be connected.
+        */
+       for_each_child_of_node(port, child) {
+               u32 reg;
+
+               of_property_read_u32(child, "reg", &reg);
+               if (reg == 0)
+                       end_node = child;
+       }
+
+       if (!end_node) {
+               DRM_DEBUG_DRIVER("Missing bridge endpoint\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       remote = of_graph_get_remote_port_parent(end_node);
+       if (!remote) {
+               DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       return of_drm_find_bridge(remote) ?: ERR_PTR(-EPROBE_DEFER);
+}
+
 static int sun4i_tcon_bind(struct device *dev, struct device *master,
                           void *data)
 {
@@ -446,9 +486,15 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
        dev_set_drvdata(dev, tcon);
        drv->tcon = tcon;
        tcon->drm = drm;
+       tcon->dev = dev;
 
-       if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon"))
+       if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
                tcon->has_mux = true;
+               tcon->has_channel_1 = true;
+       } else {
+               tcon->has_mux = false;
+               tcon->has_channel_1 = false;
+       }
 
        tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
        if (IS_ERR(tcon->lcd_rst)) {
@@ -484,12 +530,6 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
                goto err_free_clocks;
        }
 
-       tcon->panel = sun4i_tcon_find_panel(dev->of_node);
-       if (IS_ERR(tcon->panel)) {
-               dev_info(dev, "No panel found... RGB output disabled\n");
-               return 0;
-       }
-
        ret = sun4i_rgb_init(drm);
        if (ret < 0)
                goto err_free_clocks;
@@ -519,19 +559,22 @@ static struct component_ops sun4i_tcon_ops = {
 static int sun4i_tcon_probe(struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
+       struct drm_bridge *bridge;
        struct drm_panel *panel;
 
        /*
-        * The panel is not ready.
+        * Neither the bridge or the panel is ready.
         * Defer the probe.
         */
        panel = sun4i_tcon_find_panel(node);
+       bridge = sun4i_tcon_find_bridge(node);
 
        /*
         * If we don't have a panel endpoint, just go on
         */
-       if (PTR_ERR(panel) == -EPROBE_DEFER) {
-               DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+       if ((PTR_ERR(panel) == -EPROBE_DEFER) &&
+           (PTR_ERR(bridge) == -EPROBE_DEFER)) {
+               DRM_DEBUG_DRIVER("Still waiting for our panel/bridge. Deferring...\n");
                return -EPROBE_DEFER;
        }
 
@@ -547,6 +590,7 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
 
 static const struct of_device_id sun4i_tcon_of_table[] = {
        { .compatible = "allwinner,sun5i-a13-tcon" },
+       { .compatible = "allwinner,sun8i-a33-tcon" },
        { }
 };
 MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
index 0e0b11db401bda735a482a240971855f25fa0cac..12bd48925f4d9970fc6b3881d73b0d0a0e6b1ca8 100644 (file)
 #define SUN4I_TCON_MAX_CHANNELS                2
 
 struct sun4i_tcon {
+       struct device                   *dev;
        struct drm_device               *drm;
        struct regmap                   *regs;
 
@@ -163,8 +164,13 @@ struct sun4i_tcon {
        bool                            has_mux;
 
        struct drm_panel                *panel;
+
+       bool                            has_channel_1;
 };
 
+struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
+struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
+
 /* Global Control */
 void sun4i_tcon_disable(struct sun4i_tcon *tcon);
 void sun4i_tcon_enable(struct sun4i_tcon *tcon);
index b84147896294a62df47c8ffb37c2c64fd92e7867..1dd3d9eabf2e5098c617d72001bdcc3a82737629 100644 (file)
@@ -161,10 +161,10 @@ struct tv_mode {
        bool            dac3_en;
        bool            dac_bit25_en;
 
-       struct color_gains              *color_gains;
-       struct burst_levels             *burst_levels;
-       struct video_levels             *video_levels;
-       struct resync_parameters        *resync_params;
+       const struct color_gains        *color_gains;
+       const struct burst_levels       *burst_levels;
+       const struct video_levels       *video_levels;
+       const struct resync_parameters  *resync_params;
 };
 
 struct sun4i_tv {
@@ -178,39 +178,39 @@ struct sun4i_tv {
        struct sun4i_drv        *drv;
 };
 
-struct video_levels ntsc_video_levels = {
+static const struct video_levels ntsc_video_levels = {
        .black = 282,   .blank = 240,
 };
 
-struct video_levels pal_video_levels = {
+static const struct video_levels pal_video_levels = {
        .black = 252,   .blank = 252,
 };
 
-struct burst_levels ntsc_burst_levels = {
+static const struct burst_levels ntsc_burst_levels = {
        .cb = 79,       .cr = 0,
 };
 
-struct burst_levels pal_burst_levels = {
+static const struct burst_levels pal_burst_levels = {
        .cb = 40,       .cr = 40,
 };
 
-struct color_gains ntsc_color_gains = {
+static const struct color_gains ntsc_color_gains = {
        .cb = 160,      .cr = 160,
 };
 
-struct color_gains pal_color_gains = {
+static const struct color_gains pal_color_gains = {
        .cb = 224,      .cr = 224,
 };
 
-struct resync_parameters ntsc_resync_parameters = {
+static const struct resync_parameters ntsc_resync_parameters = {
        .field = false, .line = 14,     .pixel = 12,
 };
 
-struct resync_parameters pal_resync_parameters = {
+static const struct resync_parameters pal_resync_parameters = {
        .field = true,  .line = 13,     .pixel = 12,
 };
 
-struct tv_mode tv_modes[] = {
+static const struct tv_mode tv_modes[] = {
        {
                .name           = "NTSC",
                .mode           = SUN4I_TVE_CFG0_RES_480i,
@@ -289,13 +289,13 @@ drm_connector_to_sun4i_tv(struct drm_connector *connector)
  * So far, it doesn't seem to be preserved when the mode is passed by
  * to mode_set for some reason.
  */
-static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
+static const struct tv_mode *sun4i_tv_find_tv_by_mode(const struct drm_display_mode *mode)
 {
        int i;
 
        /* First try to identify the mode by name */
        for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
-               struct tv_mode *tv_mode = &tv_modes[i];
+               const struct tv_mode *tv_mode = &tv_modes[i];
 
                DRM_DEBUG_DRIVER("Comparing mode %s vs %s",
                                 mode->name, tv_mode->name);
@@ -306,7 +306,7 @@ static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
 
        /* Then by number of lines */
        for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
-               struct tv_mode *tv_mode = &tv_modes[i];
+               const struct tv_mode *tv_mode = &tv_modes[i];
 
                DRM_DEBUG_DRIVER("Comparing mode %s vs %s (X: %d vs %d)",
                                 mode->name, tv_mode->name,
@@ -319,7 +319,7 @@ static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
        return NULL;
 }
 
-static void sun4i_tv_mode_to_drm_mode(struct tv_mode *tv_mode,
+static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
                                      struct drm_display_mode *mode)
 {
        DRM_DEBUG_DRIVER("Creating mode %s\n", mode->name);
@@ -386,7 +386,7 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
        struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
        struct sun4i_drv *drv = tv->drv;
        struct sun4i_tcon *tcon = drv->tcon;
-       struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
+       const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
 
        sun4i_tcon1_mode_set(tcon, mode);
 
@@ -507,8 +507,14 @@ static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
-               struct drm_display_mode *mode = drm_mode_create(connector->dev);
-               struct tv_mode *tv_mode = &tv_modes[i];
+               struct drm_display_mode *mode;
+               const struct tv_mode *tv_mode = &tv_modes[i];
+
+               mode = drm_mode_create(connector->dev);
+               if (!mode) {
+                       DRM_ERROR("Failed to create a new display mode\n");
+                       return 0;
+               }
 
                strcpy(mode->name, tv_mode->name);
 
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
new file mode 100644 (file)
index 0000000..bf6d846
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2016 Free Electrons
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+struct sun6i_drc {
+       struct clk              *bus_clk;
+       struct clk              *mod_clk;
+       struct reset_control    *reset;
+};
+
+static int sun6i_drc_bind(struct device *dev, struct device *master,
+                        void *data)
+{
+       struct sun6i_drc *drc;
+       int ret;
+
+       drc = devm_kzalloc(dev, sizeof(*drc), GFP_KERNEL);
+       if (!drc)
+               return -ENOMEM;
+       dev_set_drvdata(dev, drc);
+
+       drc->reset = devm_reset_control_get(dev, NULL);
+       if (IS_ERR(drc->reset)) {
+               dev_err(dev, "Couldn't get our reset line\n");
+               return PTR_ERR(drc->reset);
+       }
+
+       ret = reset_control_deassert(drc->reset);
+       if (ret) {
+               dev_err(dev, "Couldn't deassert our reset line\n");
+               return ret;
+       }
+
+       drc->bus_clk = devm_clk_get(dev, "ahb");
+       if (IS_ERR(drc->bus_clk)) {
+               dev_err(dev, "Couldn't get our bus clock\n");
+               ret = PTR_ERR(drc->bus_clk);
+               goto err_assert_reset;
+       }
+       clk_prepare_enable(drc->bus_clk);
+
+       drc->mod_clk = devm_clk_get(dev, "mod");
+       if (IS_ERR(drc->mod_clk)) {
+               dev_err(dev, "Couldn't get our mod clock\n");
+               ret = PTR_ERR(drc->mod_clk);
+               goto err_disable_bus_clk;
+       }
+       clk_prepare_enable(drc->mod_clk);
+
+       return 0;
+
+err_disable_bus_clk:
+       clk_disable_unprepare(drc->bus_clk);
+err_assert_reset:
+       reset_control_assert(drc->reset);
+       return ret;
+}
+
+static void sun6i_drc_unbind(struct device *dev, struct device *master,
+                           void *data)
+{
+       struct sun6i_drc *drc = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(drc->mod_clk);
+       clk_disable_unprepare(drc->bus_clk);
+       reset_control_assert(drc->reset);
+}
+
+static struct component_ops sun6i_drc_ops = {
+       .bind   = sun6i_drc_bind,
+       .unbind = sun6i_drc_unbind,
+};
+
+static int sun6i_drc_probe(struct platform_device *pdev)
+{
+       return component_add(&pdev->dev, &sun6i_drc_ops);
+}
+
+static int sun6i_drc_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &sun6i_drc_ops);
+
+       return 0;
+}
+
+static const struct of_device_id sun6i_drc_of_table[] = {
+       { .compatible = "allwinner,sun8i-a33-drc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
+
+static struct platform_driver sun6i_drc_platform_driver = {
+       .probe          = sun6i_drc_probe,
+       .remove         = sun6i_drc_remove,
+       .driver         = {
+               .name           = "sun6i-drc",
+               .of_match_table = sun6i_drc_of_table,
+       },
+};
+module_platform_driver(sun6i_drc_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 Dynamic Range Control (DRC) Driver");
+MODULE_LICENSE("GPL");
index fab5ebcb0fef09dbaafd4eade233151350d26278..f418892b0c7127681a31acee118dbd2cad3405a2 100644 (file)
@@ -56,6 +56,7 @@ static const struct file_operations tdfx_driver_fops = {
 };
 
 static struct drm_driver driver = {
+       .driver_features = DRIVER_LEGACY,
        .set_busid = drm_pci_set_busid,
        .fops = &tdfx_driver_fops,
        .name = DRIVER_NAME,
index 981d24ae8328ca8aac999721a74651debfc7f1da..4010d69cbd08404714fe92df3a61ee1a2f3d3bfb 100644 (file)
@@ -480,17 +480,6 @@ static const struct drm_plane_funcs tegra_primary_plane_funcs = {
        .atomic_destroy_state = tegra_plane_atomic_destroy_state,
 };
 
-static int tegra_plane_prepare_fb(struct drm_plane *plane,
-                                 const struct drm_plane_state *new_state)
-{
-       return 0;
-}
-
-static void tegra_plane_cleanup_fb(struct drm_plane *plane,
-                                  const struct drm_plane_state *old_fb)
-{
-}
-
 static int tegra_plane_state_add(struct tegra_plane *plane,
                                 struct drm_plane_state *state)
 {
@@ -631,8 +620,6 @@ static void tegra_plane_atomic_disable(struct drm_plane *plane,
 }
 
 static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = {
-       .prepare_fb = tegra_plane_prepare_fb,
-       .cleanup_fb = tegra_plane_cleanup_fb,
        .atomic_check = tegra_plane_atomic_check,
        .atomic_update = tegra_plane_atomic_update,
        .atomic_disable = tegra_plane_atomic_disable,
@@ -803,8 +790,6 @@ static const struct drm_plane_funcs tegra_cursor_plane_funcs = {
 };
 
 static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
-       .prepare_fb = tegra_plane_prepare_fb,
-       .cleanup_fb = tegra_plane_cleanup_fb,
        .atomic_check = tegra_cursor_atomic_check,
        .atomic_update = tegra_cursor_atomic_update,
        .atomic_disable = tegra_cursor_atomic_disable,
@@ -873,8 +858,6 @@ static const uint32_t tegra_overlay_plane_formats[] = {
 };
 
 static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = {
-       .prepare_fb = tegra_plane_prepare_fb,
-       .cleanup_fb = tegra_plane_cleanup_fb,
        .atomic_check = tegra_plane_atomic_check,
        .atomic_update = tegra_plane_atomic_update,
        .atomic_disable = tegra_plane_atomic_disable,
index 755264d9db229be535a05654250cd69e6876a722..8ab47b502d8382f9740e6795e56bf0afd545b632 100644 (file)
@@ -57,7 +57,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
 
        drm_atomic_helper_commit_modeset_disables(drm, state);
        drm_atomic_helper_commit_modeset_enables(drm, state);
-       drm_atomic_helper_commit_planes(drm, state, true);
+       drm_atomic_helper_commit_planes(drm, state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
        drm_atomic_helper_wait_for_vblanks(drm, state);
 
@@ -982,8 +983,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
        int err;
 
        drm = drm_dev_alloc(driver, &dev->dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        dev_set_drvdata(&dev->dev, drm);
 
index deeca4869d94159b7eb932cecfdc47f9a258e769..6f675175a9e54aefc4d9222175b877ec9b3f1ab9 100644 (file)
@@ -7,6 +7,7 @@ obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
                                         tilcdc_slave_compat.dtb.o
 
 tilcdc-y := \
+       tilcdc_plane.o \
        tilcdc_crtc.o \
        tilcdc_tfp410.o \
        tilcdc_panel.o \
index 107c8bd04f6dee0d81f63c0369b39264043e646b..52ebe8fc17844eab580c9b308fe23a897f442ec1 100644 (file)
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "drm_flip_work.h"
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
 #include <drm/drm_plane_helper.h>
+#include <linux/workqueue.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
 struct tilcdc_crtc {
        struct drm_crtc base;
 
+       struct drm_plane primary;
        const struct tilcdc_panel_info *info;
        struct drm_pending_vblank_event *event;
-       int dpms;
+       bool enabled;
        wait_queue_head_t frame_done_wq;
        bool frame_done;
        spinlock_t irq_lock;
 
+       unsigned int lcd_fck_rate;
+
        ktime_t last_vblank;
 
        struct drm_framebuffer *curr_fb;
@@ -67,6 +74,7 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
        struct drm_gem_cma_object *gem;
        unsigned int depth, bpp;
        dma_addr_t start, end;
+       u64 dma_base_and_ceiling;
 
        drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
        gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -77,8 +85,13 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
 
        end = start + (crtc->mode.vdisplay * fb->pitches[0]);
 
-       tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, start);
-       tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, end);
+       /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
+        * with a single insruction, if available. This should make it more
+        * unlikely that LCDC would fetch the DMA addresses in the middle of
+        * an update.
+        */
+       dma_base_and_ceiling = (u64)(end - 1) << 32 | start;
+       tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
 
        if (tilcdc_crtc->curr_fb)
                drm_flip_work_queue(&tilcdc_crtc->unref_work,
@@ -87,6 +100,43 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
        tilcdc_crtc->curr_fb = fb;
 }
 
+static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
+{
+       struct tilcdc_drm_private *priv = dev->dev_private;
+
+       tilcdc_clear_irqstatus(dev, 0xffffffff);
+
+       if (priv->rev == 1) {
+               tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+                       LCDC_V1_UNDERFLOW_INT_ENA);
+               tilcdc_set(dev, LCDC_DMA_CTRL_REG,
+                       LCDC_V1_END_OF_FRAME_INT_ENA);
+       } else {
+               tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
+                       LCDC_V2_UNDERFLOW_INT_ENA |
+                       LCDC_V2_END_OF_FRAME0_INT_ENA |
+                       LCDC_FRAME_DONE | LCDC_SYNC_LOST);
+       }
+}
+
+static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
+{
+       struct tilcdc_drm_private *priv = dev->dev_private;
+
+       /* disable irqs that we might have enabled: */
+       if (priv->rev == 1) {
+               tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+                       LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
+               tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
+                       LCDC_V1_END_OF_FRAME_INT_ENA);
+       } else {
+               tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+                       LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
+                       LCDC_V2_END_OF_FRAME0_INT_ENA |
+                       LCDC_FRAME_DONE | LCDC_SYNC_LOST);
+       }
+}
+
 static void reset(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -100,66 +150,112 @@ static void reset(struct drm_crtc *crtc)
        tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
 }
 
-static void start(struct drm_crtc *crtc)
+static void tilcdc_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
+       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+       if (tilcdc_crtc->enabled)
+               return;
+
+       pm_runtime_get_sync(dev->dev);
 
        reset(crtc);
 
+       tilcdc_crtc_enable_irqs(dev);
+
        tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
        tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
        tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+       drm_crtc_vblank_on(crtc);
+
+       tilcdc_crtc->enabled = true;
 }
 
-static void stop(struct drm_crtc *crtc)
+void tilcdc_crtc_disable(struct drm_crtc *crtc)
 {
+       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
+       struct tilcdc_drm_private *priv = dev->dev_private;
+
+       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+       if (!tilcdc_crtc->enabled)
+               return;
 
+       tilcdc_crtc->frame_done = false;
        tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+       /*
+        * if necessary wait for framedone irq which will still come
+        * before putting things to sleep..
+        */
+       if (priv->rev == 2) {
+               int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
+                                            tilcdc_crtc->frame_done,
+                                            msecs_to_jiffies(500));
+               if (ret == 0)
+                       dev_err(dev->dev, "%s: timeout waiting for framedone\n",
+                               __func__);
+       }
+
+       drm_crtc_vblank_off(crtc);
+
+       tilcdc_crtc_disable_irqs(dev);
+
+       pm_runtime_put_sync(dev->dev);
+
+       if (tilcdc_crtc->next_fb) {
+               drm_flip_work_queue(&tilcdc_crtc->unref_work,
+                                   tilcdc_crtc->next_fb);
+               tilcdc_crtc->next_fb = NULL;
+       }
+
+       if (tilcdc_crtc->curr_fb) {
+               drm_flip_work_queue(&tilcdc_crtc->unref_work,
+                                   tilcdc_crtc->curr_fb);
+               tilcdc_crtc->curr_fb = NULL;
+       }
+
+       drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
+       tilcdc_crtc->last_vblank = ktime_set(0, 0);
+
+       tilcdc_crtc->enabled = false;
+}
+
+static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
+{
+       return crtc->state && crtc->state->enable && crtc->state->active;
 }
 
 static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
 {
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+       struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+
+       drm_modeset_lock_crtc(crtc, NULL);
+       tilcdc_crtc_disable(crtc);
+       drm_modeset_unlock_crtc(crtc);
 
-       tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       flush_workqueue(priv->wq);
 
        of_node_put(crtc->port);
        drm_crtc_cleanup(crtc);
        drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
 }
 
-static int tilcdc_verify_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb)
-{
-       struct drm_device *dev = crtc->dev;
-       unsigned int depth, bpp;
-
-       drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
-
-       if (fb->pitches[0] != crtc->mode.hdisplay * bpp / 8) {
-               dev_err(dev->dev,
-                       "Invalid pitch: fb and crtc widths must be the same");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
                struct drm_framebuffer *fb,
-               struct drm_pending_vblank_event *event,
-               uint32_t page_flip_flags)
+               struct drm_pending_vblank_event *event)
 {
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       int r;
        unsigned long flags;
-       s64 tdiff;
-       ktime_t next_vblank;
 
-       r = tilcdc_verify_fb(crtc, fb);
-       if (r)
-               return r;
+       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
        if (tilcdc_crtc->event) {
                dev_err(dev->dev, "already pending page flip!\n");
@@ -170,82 +266,31 @@ static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
 
        crtc->primary->fb = fb;
 
-       pm_runtime_get_sync(dev->dev);
-
        spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
 
-       next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
-               1000000 / crtc->hwmode.vrefresh);
+       if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+               ktime_t next_vblank;
+               s64 tdiff;
+
+               next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+                       1000000 / crtc->hwmode.vrefresh);
 
-       tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+               tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
 
-       if (tdiff >= TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+               if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+                       tilcdc_crtc->next_fb = fb;
+       }
+
+       if (tilcdc_crtc->next_fb != fb)
                set_scanout(crtc, fb);
-       else
-               tilcdc_crtc->next_fb = fb;
 
        tilcdc_crtc->event = event;
 
        spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
 
-       pm_runtime_put_sync(dev->dev);
-
        return 0;
 }
 
-void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct tilcdc_drm_private *priv = dev->dev_private;
-
-       /* we really only care about on or off: */
-       if (mode != DRM_MODE_DPMS_ON)
-               mode = DRM_MODE_DPMS_OFF;
-
-       if (tilcdc_crtc->dpms == mode)
-               return;
-
-       tilcdc_crtc->dpms = mode;
-
-       if (mode == DRM_MODE_DPMS_ON) {
-               pm_runtime_get_sync(dev->dev);
-               start(crtc);
-       } else {
-               tilcdc_crtc->frame_done = false;
-               stop(crtc);
-
-               /*
-                * if necessary wait for framedone irq which will still come
-                * before putting things to sleep..
-                */
-               if (priv->rev == 2) {
-                       int ret = wait_event_timeout(
-                                       tilcdc_crtc->frame_done_wq,
-                                       tilcdc_crtc->frame_done,
-                                       msecs_to_jiffies(50));
-                       if (ret == 0)
-                               dev_err(dev->dev, "timeout waiting for framedone\n");
-               }
-
-               pm_runtime_put_sync(dev->dev);
-
-               if (tilcdc_crtc->next_fb) {
-                       drm_flip_work_queue(&tilcdc_crtc->unref_work,
-                                           tilcdc_crtc->next_fb);
-                       tilcdc_crtc->next_fb = NULL;
-               }
-
-               if (tilcdc_crtc->curr_fb) {
-                       drm_flip_work_queue(&tilcdc_crtc->unref_work,
-                                           tilcdc_crtc->curr_fb);
-                       tilcdc_crtc->curr_fb = NULL;
-               }
-
-               drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-       }
-}
-
 static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
                const struct drm_display_mode *mode,
                struct drm_display_mode *adjusted_mode)
@@ -275,41 +320,54 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
        return true;
 }
 
-static void tilcdc_crtc_prepare(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
 {
-       tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
+       struct drm_device *dev = crtc->dev;
+       struct tilcdc_drm_private *priv = dev->dev_private;
+       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+       const unsigned clkdiv = 2; /* using a fixed divider of 2 */
+       int ret;
 
-static void tilcdc_crtc_commit(struct drm_crtc *crtc)
-{
-       tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+       /* mode.clock is in KHz, set_rate wants parameter in Hz */
+       ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
+       if (ret < 0) {
+               dev_err(dev->dev, "failed to set display clock rate to: %d\n",
+                       crtc->mode.clock);
+               return;
+       }
+
+       tilcdc_crtc->lcd_fck_rate = clk_get_rate(priv->clk);
+
+       DBG("lcd_clk=%u, mode clock=%d, div=%u",
+           tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
+
+       /* Configure the LCD clock divisor. */
+       tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
+                    LCDC_RASTER_MODE);
+
+       if (priv->rev == 2)
+               tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
+                               LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
+                               LCDC_V2_CORE_CLK_EN);
 }
 
-static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode,
-               int x, int y,
-               struct drm_framebuffer *old_fb)
+static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct tilcdc_drm_private *priv = dev->dev_private;
        const struct tilcdc_panel_info *info = tilcdc_crtc->info;
        uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
-       int ret;
+       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       struct drm_framebuffer *fb = crtc->primary->state->fb;
 
-       ret = tilcdc_crtc_mode_valid(crtc, mode);
-       if (WARN_ON(ret))
-               return ret;
+       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
        if (WARN_ON(!info))
-               return -EINVAL;
-
-       ret = tilcdc_verify_fb(crtc, crtc->primary->fb);
-       if (ret)
-               return ret;
+               return;
 
-       pm_runtime_get_sync(dev->dev);
+       if (WARN_ON(!fb))
+               return;
 
        /* Configure the Burst Size and fifo threshold of DMA: */
        reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
@@ -330,7 +388,8 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
                reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
                break;
        default:
-               return -EINVAL;
+               dev_err(dev->dev, "invalid burst size\n");
+               return;
        }
        reg |= (info->fifo_th << 8);
        tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
@@ -344,9 +403,9 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
        vsw = mode->vsync_end - mode->vsync_start;
 
        DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
-                       mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
+           mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
 
-       /* Configure the AC Bias Period and Number of Transitions per Interrupt: */
+       /* Set AC Bias Period and Number of Transitions per Interrupt: */
        reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
        reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
                LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
@@ -381,7 +440,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
        /*
         * be sure to set Bit 10 for the V2 LCDC controller,
         * otherwise limited to 1024 pixels width, stopping
-        * 1920x1080 being suppoted.
+        * 1920x1080 being supported.
         */
        if (priv->rev == 2) {
                if ((mode->vdisplay - 1) & 0x400) {
@@ -396,14 +455,15 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
        /* Configure display type: */
        reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
                ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
-                       LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000);
+                 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
+                 0x000ff000 /* Palette Loading Delay bits */);
        reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
        if (info->tft_alt_mode)
                reg |= LCDC_TFT_ALT_ENABLE;
        if (priv->rev == 2) {
                unsigned int depth, bpp;
 
-               drm_fb_get_bpp_depth(crtc->primary->fb->pixel_format, &depth, &bpp);
+               drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
                switch (bpp) {
                case 16:
                        break;
@@ -415,7 +475,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
                        break;
                default:
                        dev_err(dev->dev, "invalid pixel format\n");
-                       return -EINVAL;
+                       return;
                }
        }
        reg |= info->fdd < 12;
@@ -436,12 +496,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
        else
                tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
 
-       /*
-        * use value from adjusted_mode here as this might have been
-        * changed as part of the fixup for slave encoders to solve the
-        * issue where tilcdc timings are not VESA compliant
-        */
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
                tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
        else
                tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -456,51 +511,56 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
        else
                tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
 
-       drm_framebuffer_reference(crtc->primary->fb);
-
-       set_scanout(crtc, crtc->primary->fb);
+       drm_framebuffer_reference(fb);
 
-       tilcdc_crtc_update_clk(crtc);
+       set_scanout(crtc, fb);
 
-       pm_runtime_put_sync(dev->dev);
+       tilcdc_crtc_set_clk(crtc);
 
-       return 0;
+       crtc->hwmode = crtc->state->adjusted_mode;
 }
 
-static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-               struct drm_framebuffer *old_fb)
+static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *state)
 {
-       struct drm_device *dev = crtc->dev;
-       int r;
-
-       r = tilcdc_verify_fb(crtc, crtc->primary->fb);
-       if (r)
-               return r;
-
-       drm_framebuffer_reference(crtc->primary->fb);
+       struct drm_display_mode *mode = &state->mode;
+       int ret;
 
-       pm_runtime_get_sync(dev->dev);
+       /* If we are not active we don't care */
+       if (!state->active)
+               return 0;
 
-       set_scanout(crtc, crtc->primary->fb);
+       if (state->state->planes[0].ptr != crtc->primary ||
+           state->state->planes[0].state == NULL ||
+           state->state->planes[0].state->crtc != crtc) {
+               dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
+               return -EINVAL;
+       }
 
-       pm_runtime_put_sync(dev->dev);
+       ret = tilcdc_crtc_mode_valid(crtc, mode);
+       if (ret) {
+               dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
+               return -EINVAL;
+       }
 
        return 0;
 }
 
 static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
-               .destroy        = tilcdc_crtc_destroy,
-               .set_config     = drm_crtc_helper_set_config,
-               .page_flip      = tilcdc_crtc_page_flip,
+       .destroy        = tilcdc_crtc_destroy,
+       .set_config     = drm_atomic_helper_set_config,
+       .page_flip      = drm_atomic_helper_page_flip,
+       .reset          = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 };
 
 static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
-               .dpms           = tilcdc_crtc_dpms,
                .mode_fixup     = tilcdc_crtc_mode_fixup,
-               .prepare        = tilcdc_crtc_prepare,
-               .commit         = tilcdc_crtc_commit,
-               .mode_set       = tilcdc_crtc_mode_set,
-               .mode_set_base  = tilcdc_crtc_mode_set_base,
+               .enable         = tilcdc_crtc_enable,
+               .disable        = tilcdc_crtc_disable,
+               .atomic_check   = tilcdc_crtc_atomic_check,
+               .mode_set_nofb  = tilcdc_crtc_mode_set_nofb,
 };
 
 int tilcdc_crtc_max_width(struct drm_crtc *crtc)
@@ -622,46 +682,23 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
 
 void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
 {
-       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct tilcdc_drm_private *priv = dev->dev_private;
-       int dpms = tilcdc_crtc->dpms;
-       unsigned long lcd_clk;
-       const unsigned clkdiv = 2; /* using a fixed divider of 2 */
-       int ret;
+       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 
-       pm_runtime_get_sync(dev->dev);
+       drm_modeset_lock_crtc(crtc, NULL);
+       if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
+               if (tilcdc_crtc_is_on(crtc)) {
+                       pm_runtime_get_sync(dev->dev);
+                       tilcdc_crtc_disable(crtc);
 
-       if (dpms == DRM_MODE_DPMS_ON)
-               tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+                       tilcdc_crtc_set_clk(crtc);
 
-       /* mode.clock is in KHz, set_rate wants parameter in Hz */
-       ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
-       if (ret < 0) {
-               dev_err(dev->dev, "failed to set display clock rate to: %d\n",
-                               crtc->mode.clock);
-               goto out;
+                       tilcdc_crtc_enable(crtc);
+                       pm_runtime_put_sync(dev->dev);
+               }
        }
-
-       lcd_clk = clk_get_rate(priv->clk);
-
-       DBG("lcd_clk=%lu, mode clock=%d, div=%u",
-               lcd_clk, crtc->mode.clock, clkdiv);
-
-       /* Configure the LCD clock divisor. */
-       tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
-                       LCDC_RASTER_MODE);
-
-       if (priv->rev == 2)
-               tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
-                               LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
-                               LCDC_V2_CORE_CLK_EN);
-
-       if (dpms == DRM_MODE_DPMS_ON)
-               tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-
-out:
-       pm_runtime_put_sync(dev->dev);
+       drm_modeset_unlock_crtc(crtc);
 }
 
 #define SYNC_LOST_COUNT_LIMIT 50
@@ -718,30 +755,34 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
                        tilcdc_crtc->frame_intact = true;
        }
 
+       if (stat & LCDC_FIFO_UNDERFLOW)
+               dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
+                                   __func__, stat);
+
+       /* For revision 2 only */
        if (priv->rev == 2) {
                if (stat & LCDC_FRAME_DONE) {
                        tilcdc_crtc->frame_done = true;
                        wake_up(&tilcdc_crtc->frame_done_wq);
                }
-               tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
-       }
 
-       if (stat & LCDC_SYNC_LOST) {
-               dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
-                                   __func__, stat);
-               tilcdc_crtc->frame_intact = false;
-               if (tilcdc_crtc->sync_lost_count++ > SYNC_LOST_COUNT_LIMIT) {
-                       dev_err(dev->dev,
-                               "%s(0x%08x): Sync lost flood detected, disabling the interrupt",
-                               __func__, stat);
-                       tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
-                                    LCDC_SYNC_LOST);
+               if (stat & LCDC_SYNC_LOST) {
+                       dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
+                                           __func__, stat);
+                       tilcdc_crtc->frame_intact = false;
+                       if (tilcdc_crtc->sync_lost_count++ >
+                           SYNC_LOST_COUNT_LIMIT) {
+                               dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat);
+                               tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+                                            LCDC_SYNC_LOST);
+                       }
                }
-       }
 
-       if (stat & LCDC_FIFO_UNDERFLOW)
-               dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
-                                   __func__, stat);
+               /* Indicate to LCDC that the interrupt service routine has
+                * completed, see 13.3.6.1.6 in AM335x TRM.
+                */
+               tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
+       }
 
        return IRQ_HANDLED;
 }
@@ -761,7 +802,10 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
 
        crtc = &tilcdc_crtc->base;
 
-       tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
+       ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
+       if (ret < 0)
+               goto fail;
+
        init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
 
        drm_flip_work_init(&tilcdc_crtc->unref_work,
@@ -769,7 +813,11 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
 
        spin_lock_init(&tilcdc_crtc->irq_lock);
 
-       ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
+       ret = drm_crtc_init_with_planes(dev, crtc,
+                                       &tilcdc_crtc->primary,
+                                       NULL,
+                                       &tilcdc_crtc_funcs,
+                                       "tilcdc crtc");
        if (ret < 0)
                goto fail;
 
index d27809372d54491f19534ae43fc5745c74ae1131..a694977c32f4bfb88448aec5e604536db2f14a5d 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/component.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/suspend.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
 
 static LIST_HEAD(module_list);
 
+static const u32 tilcdc_rev1_formats[] = { DRM_FORMAT_RGB565 };
+
+static const u32 tilcdc_straight_formats[] = { DRM_FORMAT_RGB565,
+                                              DRM_FORMAT_BGR888,
+                                              DRM_FORMAT_XBGR8888 };
+
+static const u32 tilcdc_crossed_formats[] = { DRM_FORMAT_BGR565,
+                                             DRM_FORMAT_RGB888,
+                                             DRM_FORMAT_XRGB8888 };
+
+static const u32 tilcdc_legacy_formats[] = { DRM_FORMAT_RGB565,
+                                            DRM_FORMAT_RGB888,
+                                            DRM_FORMAT_XRGB8888 };
+
 void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
                const struct tilcdc_module_ops *funcs)
 {
@@ -59,9 +75,84 @@ static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
        drm_fbdev_cma_hotplug_event(priv->fbdev);
 }
 
+static int tilcdc_atomic_check(struct drm_device *dev,
+                              struct drm_atomic_state *state)
+{
+       int ret;
+
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       ret = drm_atomic_helper_check_planes(dev, state);
+       if (ret)
+               return ret;
+
+       /*
+        * tilcdc ->atomic_check can update ->mode_changed if pixel format
+        * changes, hence will we check modeset changes again.
+        */
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static int tilcdc_commit(struct drm_device *dev,
+                 struct drm_atomic_state *state,
+                 bool async)
+{
+       int ret;
+
+       ret = drm_atomic_helper_prepare_planes(dev, state);
+       if (ret)
+               return ret;
+
+       drm_atomic_helper_swap_state(state, true);
+
+       /*
+        * Everything below can be run asynchronously without the need to grab
+        * any modeset locks at all under one condition: It must be guaranteed
+        * that the asynchronous work has either been cancelled (if the driver
+        * supports it, which at least requires that the framebuffers get
+        * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+        * before the new state gets committed on the software side with
+        * drm_atomic_helper_swap_state().
+        *
+        * This scheme allows new atomic state updates to be prepared and
+        * checked in parallel to the asynchronous completion of the previous
+        * update. Which is important since compositors need to figure out the
+        * composition of the next frame right after having submitted the
+        * current layout.
+        */
+
+       /* Keep HW on while we commit the state. */
+       pm_runtime_get_sync(dev->dev);
+
+       drm_atomic_helper_commit_modeset_disables(dev, state);
+
+       drm_atomic_helper_commit_planes(dev, state, 0);
+
+       drm_atomic_helper_commit_modeset_enables(dev, state);
+
+       /* Now HW should remain on if need becase the crtc is enabled */
+       pm_runtime_put_sync(dev->dev);
+
+       drm_atomic_helper_wait_for_vblanks(dev, state);
+
+       drm_atomic_helper_cleanup_planes(dev, state);
+
+       drm_atomic_state_free(state);
+
+       return 0;
+}
+
 static const struct drm_mode_config_funcs mode_config_funcs = {
        .fb_create = tilcdc_fb_create,
        .output_poll_changed = tilcdc_fb_output_poll_changed,
+       .atomic_check = tilcdc_atomic_check,
+       .atomic_commit = tilcdc_commit,
 };
 
 static int modeset_init(struct drm_device *dev)
@@ -93,12 +184,9 @@ static int cpufreq_transition(struct notifier_block *nb,
 {
        struct tilcdc_drm_private *priv = container_of(nb,
                        struct tilcdc_drm_private, freq_transition);
-       if (val == CPUFREQ_POSTCHANGE) {
-               if (priv->lcd_fck_rate != clk_get_rate(priv->clk)) {
-                       priv->lcd_fck_rate = clk_get_rate(priv->clk);
-                       tilcdc_crtc_update_clk(priv->crtc);
-               }
-       }
+
+       if (val == CPUFREQ_POSTCHANGE)
+               tilcdc_crtc_update_clk(priv->crtc);
 
        return 0;
 }
@@ -112,8 +200,6 @@ static int tilcdc_unload(struct drm_device *dev)
 {
        struct tilcdc_drm_private *priv = dev->dev_private;
 
-       tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
-
        tilcdc_remove_external_encoders(dev);
 
        drm_fbdev_cma_fini(priv->fbdev);
@@ -121,9 +207,7 @@ static int tilcdc_unload(struct drm_device *dev)
        drm_mode_config_cleanup(dev);
        drm_vblank_cleanup(dev);
 
-       pm_runtime_get_sync(dev->dev);
        drm_irq_uninstall(dev);
-       pm_runtime_put_sync(dev->dev);
 
 #ifdef CONFIG_CPU_FREQ
        cpufreq_unregister_notifier(&priv->freq_transition,
@@ -146,24 +230,17 @@ static int tilcdc_unload(struct drm_device *dev)
        return 0;
 }
 
-static size_t tilcdc_num_regs(void);
-
 static int tilcdc_load(struct drm_device *dev, unsigned long flags)
 {
        struct platform_device *pdev = dev->platformdev;
        struct device_node *node = pdev->dev.of_node;
        struct tilcdc_drm_private *priv;
-       struct tilcdc_module *mod;
        struct resource *res;
        u32 bpp = 0;
        int ret;
 
        priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
-       if (priv)
-               priv->saved_register =
-                       devm_kcalloc(dev->dev, tilcdc_num_regs(),
-                                    sizeof(*priv->saved_register), GFP_KERNEL);
-       if (!priv || !priv->saved_register) {
+       if (!priv) {
                dev_err(dev->dev, "failed to allocate private data\n");
                return -ENOMEM;
        }
@@ -201,7 +278,6 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
        }
 
 #ifdef CONFIG_CPU_FREQ
-       priv->lcd_fck_rate = clk_get_rate(priv->clk);
        priv->freq_transition.notifier_call = cpufreq_transition;
        ret = cpufreq_register_notifier(&priv->freq_transition,
                        CPUFREQ_TRANSITION_NOTIFIER);
@@ -249,6 +325,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
 
        pm_runtime_put_sync(dev->dev);
 
+       if (priv->rev == 1) {
+               DBG("Revision 1 LCDC supports only RGB565 format");
+               priv->pixelformats = tilcdc_rev1_formats;
+               priv->num_pixelformats = ARRAY_SIZE(tilcdc_rev1_formats);
+               bpp = 16;
+       } else {
+               const char *str = "\0";
+
+               of_property_read_string(node, "blue-and-red-wiring", &str);
+               if (0 == strcmp(str, "crossed")) {
+                       DBG("Configured for crossed blue and red wires");
+                       priv->pixelformats = tilcdc_crossed_formats;
+                       priv->num_pixelformats =
+                               ARRAY_SIZE(tilcdc_crossed_formats);
+                       bpp = 32; /* Choose bpp with RGB support for fbdef */
+               } else if (0 == strcmp(str, "straight")) {
+                       DBG("Configured for straight blue and red wires");
+                       priv->pixelformats = tilcdc_straight_formats;
+                       priv->num_pixelformats =
+                               ARRAY_SIZE(tilcdc_straight_formats);
+                       bpp = 16; /* Choose bpp with RGB support for fbdef */
+               } else {
+                       DBG("Blue and red wiring '%s' unknown, use legacy mode",
+                           str);
+                       priv->pixelformats = tilcdc_legacy_formats;
+                       priv->num_pixelformats =
+                               ARRAY_SIZE(tilcdc_legacy_formats);
+                       bpp = 16; /* This is just a guess */
+               }
+       }
+
        ret = modeset_init(dev);
        if (ret < 0) {
                dev_err(dev->dev, "failed to initialize mode setting\n");
@@ -262,7 +369,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
                if (ret < 0)
                        goto fail_mode_config_cleanup;
 
-               ret = tilcdc_add_external_encoders(dev, &bpp);
+               ret = tilcdc_add_external_encoders(dev);
                if (ret < 0)
                        goto fail_component_cleanup;
        }
@@ -279,22 +386,14 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
                goto fail_external_cleanup;
        }
 
-       pm_runtime_get_sync(dev->dev);
        ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
-       pm_runtime_put_sync(dev->dev);
        if (ret < 0) {
                dev_err(dev->dev, "failed to install IRQ handler\n");
                goto fail_vblank_cleanup;
        }
 
-       list_for_each_entry(mod, &module_list, list) {
-               DBG("%s: preferred_bpp: %d", mod->name, mod->preferred_bpp);
-               bpp = mod->preferred_bpp;
-               if (bpp > 0)
-                       break;
-       }
+       drm_mode_config_reset(dev);
 
-       drm_helper_disable_unused_functions(dev);
        priv->fbdev = drm_fbdev_cma_init(dev, bpp,
                        dev->mode_config.num_crtc,
                        dev->mode_config.num_connector);
@@ -308,20 +407,18 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
        return 0;
 
 fail_irq_uninstall:
-       pm_runtime_get_sync(dev->dev);
        drm_irq_uninstall(dev);
-       pm_runtime_put_sync(dev->dev);
 
 fail_vblank_cleanup:
        drm_vblank_cleanup(dev);
 
-fail_mode_config_cleanup:
-       drm_mode_config_cleanup(dev);
-
 fail_component_cleanup:
        if (priv->is_componentized)
                component_unbind_all(dev->dev, dev);
 
+fail_mode_config_cleanup:
+       drm_mode_config_cleanup(dev);
+
 fail_external_cleanup:
        tilcdc_remove_external_encoders(dev);
 
@@ -361,45 +458,6 @@ static irqreturn_t tilcdc_irq(int irq, void *arg)
        return tilcdc_crtc_irq(priv->crtc);
 }
 
-static void tilcdc_irq_preinstall(struct drm_device *dev)
-{
-       tilcdc_clear_irqstatus(dev, 0xffffffff);
-}
-
-static int tilcdc_irq_postinstall(struct drm_device *dev)
-{
-       struct tilcdc_drm_private *priv = dev->dev_private;
-
-       /* enable FIFO underflow irq: */
-       if (priv->rev == 1) {
-               tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
-       } else {
-               tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
-                          LCDC_V2_UNDERFLOW_INT_ENA |
-                          LCDC_V2_END_OF_FRAME0_INT_ENA |
-                          LCDC_FRAME_DONE | LCDC_SYNC_LOST);
-       }
-
-       return 0;
-}
-
-static void tilcdc_irq_uninstall(struct drm_device *dev)
-{
-       struct tilcdc_drm_private *priv = dev->dev_private;
-
-       /* disable irqs that we might have enabled: */
-       if (priv->rev == 1) {
-               tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
-                               LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
-               tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
-       } else {
-               tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
-                       LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
-                       LCDC_V2_END_OF_FRAME0_INT_ENA |
-                       LCDC_FRAME_DONE | LCDC_SYNC_LOST);
-       }
-}
-
 static int tilcdc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        return 0;
@@ -410,7 +468,7 @@ static void tilcdc_disable_vblank(struct drm_device *dev, unsigned int pipe)
        return;
 }
 
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
+#if defined(CONFIG_DEBUG_FS)
 static const struct {
        const char *name;
        uint8_t  rev;
@@ -441,15 +499,6 @@ static const struct {
 #undef REG
 };
 
-static size_t tilcdc_num_regs(void)
-{
-       return ARRAY_SIZE(registers);
-}
-#else
-static size_t tilcdc_num_regs(void)
-{
-       return 0;
-}
 #endif
 
 #ifdef CONFIG_DEBUG_FS
@@ -537,14 +586,11 @@ static const struct file_operations fops = {
 
 static struct drm_driver tilcdc_driver = {
        .driver_features    = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
-                              DRIVER_PRIME),
+                              DRIVER_PRIME | DRIVER_ATOMIC),
        .load               = tilcdc_load,
        .unload             = tilcdc_unload,
        .lastclose          = tilcdc_lastclose,
        .irq_handler        = tilcdc_irq,
-       .irq_preinstall     = tilcdc_irq_preinstall,
-       .irq_postinstall    = tilcdc_irq_postinstall,
-       .irq_uninstall      = tilcdc_irq_uninstall,
        .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank      = tilcdc_enable_vblank,
        .disable_vblank     = tilcdc_disable_vblank,
@@ -584,28 +630,12 @@ static int tilcdc_pm_suspend(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct tilcdc_drm_private *priv = ddev->dev_private;
-       unsigned i, n = 0;
 
-       drm_kms_helper_poll_disable(ddev);
+       priv->saved_state = drm_atomic_helper_suspend(ddev);
 
        /* Select sleep pin state */
        pinctrl_pm_select_sleep_state(dev);
 
-       if (pm_runtime_suspended(dev)) {
-               priv->ctx_valid = false;
-               return 0;
-       }
-
-       /* Disable the LCDC controller, to avoid locking up the PRCM */
-       tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
-
-       /* Save register state: */
-       for (i = 0; i < ARRAY_SIZE(registers); i++)
-               if (registers[i].save && (priv->rev >= registers[i].rev))
-                       priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
-
-       priv->ctx_valid = true;
-
        return 0;
 }
 
@@ -613,23 +643,15 @@ static int tilcdc_pm_resume(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct tilcdc_drm_private *priv = ddev->dev_private;
-       unsigned i, n = 0;
+       int ret = 0;
 
        /* Select default pin state */
        pinctrl_pm_select_default_state(dev);
 
-       if (priv->ctx_valid == true) {
-               /* Restore register state: */
-               for (i = 0; i < ARRAY_SIZE(registers); i++)
-                       if (registers[i].save &&
-                           (priv->rev >= registers[i].rev))
-                               tilcdc_write(ddev, registers[i].reg,
-                                            priv->saved_register[n++]);
-       }
-
-       drm_kms_helper_poll_enable(ddev);
+       if (priv->saved_state)
+               ret = drm_atomic_helper_resume(ddev, priv->saved_state);
 
-       return 0;
+       return ret;
 }
 #endif
 
@@ -648,6 +670,12 @@ static int tilcdc_bind(struct device *dev)
 
 static void tilcdc_unbind(struct device *dev)
 {
+       struct drm_device *ddev = dev_get_drvdata(dev);
+
+       /* Check if a subcomponent has already triggered the unloading. */
+       if (!ddev->dev_private)
+               return;
+
        drm_put_dev(dev_get_drvdata(dev));
 }
 
@@ -680,17 +708,15 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
 
 static int tilcdc_pdev_remove(struct platform_device *pdev)
 {
-       struct drm_device *ddev = dev_get_drvdata(&pdev->dev);
-       struct tilcdc_drm_private *priv = ddev->dev_private;
-
-       /* Check if a subcomponent has already triggered the unloading. */
-       if (!priv)
-               return 0;
+       int ret;
 
-       if (priv->is_componentized)
-               component_master_del(&pdev->dev, &tilcdc_comp_ops);
-       else
+       ret = tilcdc_get_external_components(&pdev->dev, NULL);
+       if (ret < 0)
+               return ret;
+       else if (ret == 0)
                drm_put_dev(platform_get_drvdata(pdev));
+       else
+               component_master_del(&pdev->dev, &tilcdc_comp_ops);
 
        return 0;
 }
index c1de18bae41530df2eb1d8390b4876d60f29007a..9780c37ec4cd5f7f0b3a4781013ab7a6f46d8923 100644 (file)
@@ -65,13 +65,15 @@ struct tilcdc_drm_private {
         */
        uint32_t max_width;
 
-       /* register contents saved across suspend/resume: */
-       u32 *saved_register;
-       bool ctx_valid;
+       /* Supported pixel formats */
+       const uint32_t *pixelformats;
+       uint32_t num_pixelformats;
+
+       /* The context for pm susped/resume cycle is stored here */
+       struct drm_atomic_state *saved_state;
 
 #ifdef CONFIG_CPU_FREQ
        struct notifier_block freq_transition;
-       unsigned int lcd_fck_rate;
 #endif
 
        struct workqueue_struct *wq;
@@ -113,7 +115,6 @@ struct tilcdc_module {
        const char *name;
        struct list_head list;
        const struct tilcdc_module_ops *funcs;
-       unsigned int preferred_bpp;
 };
 
 void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
@@ -171,6 +172,11 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
                                        bool simulate_vesa_sync);
 int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
 int tilcdc_crtc_max_width(struct drm_crtc *crtc);
-void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode);
+void tilcdc_crtc_disable(struct drm_crtc *crtc);
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
+               struct drm_framebuffer *fb,
+               struct drm_pending_vblank_event *event);
+
+int tilcdc_plane_init(struct drm_device *dev, struct drm_plane *plane);
 
 #endif /* __TILCDC_DRV_H__ */
index 03acb4f99982f6f219fabf701738fc916930440c..68e895021005cc62034110748f8f1bf028a2391d 100644 (file)
@@ -52,7 +52,7 @@ static int tilcdc_external_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
+static int tilcdc_add_external_encoder(struct drm_device *dev,
                                       struct drm_connector *connector)
 {
        struct tilcdc_drm_private *priv = dev->dev_private;
@@ -64,7 +64,6 @@ static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
        /* Only tda998x is supported at the moment. */
        tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
        tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
-       *bpp = panel_info_tda998x.bpp;
 
        connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
                                       GFP_KERNEL);
@@ -94,7 +93,7 @@ static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
        return 0;
 }
 
-int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp)
+int tilcdc_add_external_encoders(struct drm_device *dev)
 {
        struct tilcdc_drm_private *priv = dev->dev_private;
        struct drm_connector *connector;
@@ -108,7 +107,7 @@ int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp)
                        if (connector == priv->connectors[i])
                                found = true;
                if (!found) {
-                       ret = tilcdc_add_external_encoder(dev, bpp, connector);
+                       ret = tilcdc_add_external_encoder(dev, connector);
                        if (ret)
                                return ret;
                }
@@ -138,14 +137,23 @@ static int dev_match_of(struct device *dev, void *data)
 int tilcdc_get_external_components(struct device *dev,
                                   struct component_match **match)
 {
+       struct device_node *node;
        struct device_node *ep = NULL;
        int count = 0;
 
-       while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
-               struct device_node *node;
+       /* Avoid error print by of_graph_get_next_endpoint() if there
+        * is no ports present.
+        */
+       node = of_get_child_by_name(dev->of_node, "ports");
+       if (!node)
+               node = of_get_child_by_name(dev->of_node, "port");
+       if (!node)
+               return 0;
+       of_node_put(node);
 
+       while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
                node = of_graph_get_remote_port_parent(ep);
-               if (!node && !of_device_is_available(node)) {
+               if (!node || !of_device_is_available(node)) {
                        of_node_put(node);
                        continue;
                }
index 6aabe2788760f24296cf5d8f5fa2ae6bd9fe0a99..c700e0c1623e41e2e5d2dc07e5c924f323b10770 100644 (file)
@@ -18,7 +18,7 @@
 #ifndef __TILCDC_EXTERNAL_H__
 #define __TILCDC_EXTERNAL_H__
 
-int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp);
+int tilcdc_add_external_encoders(struct drm_device *dev);
 void tilcdc_remove_external_encoders(struct drm_device *dev);
 int tilcdc_get_external_components(struct device *dev,
                                   struct component_match **match);
index ff7774c17d7cd4ccfbf6af4a066ef1177d20f227..2134bb20fbe9d169c6f238ae57cc2685c6e17ea9 100644 (file)
 #include <video/display_timing.h>
 #include <video/of_display_timing.h>
 #include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
 
 #include "tilcdc_drv.h"
+#include "tilcdc_panel.h"
 
 struct panel_module {
        struct tilcdc_module base;
@@ -64,9 +66,7 @@ static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
 
 static void panel_encoder_prepare(struct drm_encoder *encoder)
 {
-       struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
        panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-       tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
 }
 
 static void panel_encoder_commit(struct drm_encoder *encoder)
@@ -196,9 +196,12 @@ static struct drm_encoder *panel_connector_best_encoder(
 
 static const struct drm_connector_funcs panel_connector_funcs = {
        .destroy            = panel_connector_destroy,
-       .dpms               = drm_helper_connector_dpms,
+       .dpms               = drm_atomic_helper_connector_dpms,
        .detect             = panel_connector_detect,
        .fill_modes         = drm_helper_probe_single_connector_modes,
+       .reset              = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
@@ -268,6 +271,9 @@ static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
        priv->encoders[priv->num_encoders++] = encoder;
        priv->connectors[priv->num_connectors++] = connector;
 
+       tilcdc_crtc_set_panel_info(priv->crtc,
+                                  to_panel_encoder(encoder)->mod->info);
+
        return 0;
 }
 
@@ -392,8 +398,6 @@ static int panel_probe(struct platform_device *pdev)
                goto fail_timings;
        }
 
-       mod->preferred_bpp = panel_mod->info->bpp;
-
        return 0;
 
 fail_timings:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
new file mode 100644 (file)
index 0000000..74c65fa
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2015 Texas Instruments
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <uapi/drm/drm_fourcc.h>
+
+#include "tilcdc_drv.h"
+
+static struct drm_plane_funcs tilcdc_plane_funcs = {
+       .update_plane   = drm_atomic_helper_update_plane,
+       .disable_plane  = drm_atomic_helper_disable_plane,
+       .destroy        = drm_plane_cleanup,
+       .set_property   = drm_atomic_helper_plane_set_property,
+       .reset          = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int tilcdc_plane_atomic_check(struct drm_plane *plane,
+                                    struct drm_plane_state *state)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane_state *old_state = plane->state;
+       unsigned int depth, bpp;
+
+       if (!state->crtc)
+               return 0;
+
+       if (WARN_ON(!state->fb))
+               return -EINVAL;
+
+       if (state->crtc_x || state->crtc_y) {
+               dev_err(plane->dev->dev, "%s: crtc position must be zero.",
+                       __func__);
+               return -EINVAL;
+       }
+
+       crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+                                                       state->crtc);
+       /* we should have a crtc state if the plane is attached to a crtc */
+       if (WARN_ON(!crtc_state))
+               return 0;
+
+       if (crtc_state->mode.hdisplay != state->crtc_w ||
+           crtc_state->mode.vdisplay != state->crtc_h) {
+               dev_err(plane->dev->dev,
+                       "%s: Size must match mode (%dx%d == %dx%d)", __func__,
+                       crtc_state->mode.hdisplay, crtc_state->mode.vdisplay,
+                       state->crtc_w, state->crtc_h);
+               return -EINVAL;
+       }
+
+       drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
+       if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
+               dev_err(plane->dev->dev,
+                       "Invalid pitch: fb and crtc widths must be the same");
+               return -EINVAL;
+       }
+
+       if (state->fb && old_state->fb &&
+           state->fb->pixel_format != old_state->fb->pixel_format) {
+               dev_dbg(plane->dev->dev,
+                       "%s(): pixel format change requires mode_change\n",
+                       __func__);
+               crtc_state->mode_changed = true;
+       }
+
+       return 0;
+}
+
+static void tilcdc_plane_atomic_update(struct drm_plane *plane,
+                                      struct drm_plane_state *old_state)
+{
+       struct drm_plane_state *state = plane->state;
+
+       if (!state->crtc)
+               return;
+
+       if (WARN_ON(!state->fb || !state->crtc->state))
+               return;
+
+       tilcdc_crtc_update_fb(state->crtc,
+                             state->fb,
+                             state->crtc->state->event);
+}
+
+static const struct drm_plane_helper_funcs plane_helper_funcs = {
+       .atomic_check = tilcdc_plane_atomic_check,
+       .atomic_update = tilcdc_plane_atomic_update,
+};
+
+int tilcdc_plane_init(struct drm_device *dev,
+                     struct drm_plane *plane)
+{
+       struct tilcdc_drm_private *priv = dev->dev_private;
+       int ret;
+
+       ret = drm_plane_init(dev, plane, 1,
+                            &tilcdc_plane_funcs,
+                            priv->pixelformats,
+                            priv->num_pixelformats,
+                            true);
+       if (ret) {
+               dev_err(dev->dev, "Failed to initialize plane: %d\n", ret);
+               return ret;
+       }
+
+       drm_plane_helper_add(plane, &plane_helper_funcs);
+
+       return 0;
+}
index 1bf5e2553acc41062ae2754789d30025d05f9764..f57c0d62c76a23896d172bd1bbc54a60e7871172 100644 (file)
@@ -119,6 +119,20 @@ static inline void tilcdc_write(struct drm_device *dev, u32 reg, u32 data)
        iowrite32(data, priv->mmio + reg);
 }
 
+static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data)
+{
+       struct tilcdc_drm_private *priv = dev->dev_private;
+       volatile void __iomem *addr = priv->mmio + reg;
+
+#ifdef iowrite64
+       iowrite64(data, addr);
+#else
+       __iowmb();
+       /* This compiles to strd (=64-bit write) on ARM7 */
+       *(volatile u64 __force *)addr = __cpu_to_le64(data);
+#endif
+}
+
 static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
 {
        struct tilcdc_drm_private *priv = dev->dev_private;
index f9c79dabce20029401f7d25cc58cf1400f324da1..623a9140493c681bd9a9495d21395e41155844e9 100644 (file)
@@ -139,7 +139,7 @@ static void __init tilcdc_node_disable(struct device_node *node)
        of_update_property(node, prop);
 }
 
-struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
+static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
 {
        const int size = __dtb_tilcdc_slave_compat_end -
                __dtb_tilcdc_slave_compat_begin;
@@ -195,7 +195,7 @@ static const char * const tilcdc_slave_props[] __initconst = {
        NULL
 };
 
-void __init tilcdc_convert_slave_node(void)
+static void __init tilcdc_convert_slave_node(void)
 {
        struct device_node *slave = NULL, *lcdc = NULL;
        struct device_node *i2c = NULL, *fragment = NULL;
@@ -207,7 +207,7 @@ void __init tilcdc_convert_slave_node(void)
        int ret;
 
        if (kfree_table_init(&kft))
-               goto out;
+               return;
 
        lcdc = of_find_matching_node(NULL, tilcdc_of_match);
        slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
@@ -261,7 +261,7 @@ out:
        of_node_put(fragment);
 }
 
-int __init tilcdc_slave_compat_init(void)
+static int __init tilcdc_slave_compat_init(void)
 {
        tilcdc_convert_slave_node();
        return 0;
index 6b8c5b3bf588b01b56a8a398f0f61918507086c3..458043a539959ec08a867aaf98ea4274f78b6693 100644 (file)
 #include <linux/of_gpio.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/consumer.h>
+#include <drm/drm_atomic_helper.h>
 
 #include "tilcdc_drv.h"
+#include "tilcdc_tfp410.h"
 
 struct tfp410_module {
        struct tilcdc_module base;
@@ -75,7 +77,6 @@ static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
 static void tfp410_encoder_prepare(struct drm_encoder *encoder)
 {
        tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-       tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info);
 }
 
 static void tfp410_encoder_commit(struct drm_encoder *encoder)
@@ -201,9 +202,12 @@ static struct drm_encoder *tfp410_connector_best_encoder(
 
 static const struct drm_connector_funcs tfp410_connector_funcs = {
        .destroy            = tfp410_connector_destroy,
-       .dpms               = drm_helper_connector_dpms,
+       .dpms               = drm_atomic_helper_connector_dpms,
        .detect             = tfp410_connector_detect,
        .fill_modes         = drm_helper_probe_single_connector_modes,
+       .reset              = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
@@ -276,6 +280,7 @@ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev
        priv->encoders[priv->num_encoders++] = encoder;
        priv->connectors[priv->num_connectors++] = connector;
 
+       tilcdc_crtc_set_panel_info(priv->crtc, &dvi_info);
        return 0;
 }
 
@@ -323,8 +328,6 @@ static int tfp410_probe(struct platform_device *pdev)
                goto fail;
        }
 
-       mod->preferred_bpp = dvi_info.bpp;
-
        i2c_node = of_find_node_by_phandle(i2c_phandle);
        if (!i2c_node) {
                dev_err(&pdev->dev, "could not get i2c bus node\n");
index 4054d804fe068f5821d49d675549d40a7ea23fb6..fc6217dfe4016ebe7ea270189b486cba4b7db76c 100644 (file)
@@ -57,14 +57,14 @@ static struct attribute ttm_bo_count = {
 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
                                          uint32_t *mem_type)
 {
-       int i;
+       int pos;
 
-       for (i = 0; i <= TTM_PL_PRIV5; i++)
-               if (place->flags & (1 << i)) {
-                       *mem_type = i;
-                       return 0;
-               }
-       return -EINVAL;
+       pos = ffs(place->flags & TTM_PL_MASK_MEM);
+       if (unlikely(!pos))
+               return -EINVAL;
+
+       *mem_type = pos - 1;
+       return 0;
 }
 
 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
@@ -354,13 +354,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+               ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
                                         no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, interruptible,
-                                        no_wait_gpu, mem);
+               ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
 
        if (ret) {
                if (bdev->driver->move_notify) {
index 2df602a35f9291ce178a6634ad9d0531ff331df4..bf6e21655c576aed2810193e576441e69af406b9 100644 (file)
@@ -45,14 +45,22 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   bool evict,
-                   bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+                   bool interruptible, bool no_wait_gpu,
+                   struct ttm_mem_reg *new_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
        struct ttm_mem_reg *old_mem = &bo->mem;
        int ret;
 
        if (old_mem->mem_type != TTM_PL_SYSTEM) {
+               ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+
+               if (unlikely(ret != 0)) {
+                       if (ret != -ERESTARTSYS)
+                               pr_err("Failed to expire sync object before unbinding TTM\n");
+                       return ret;
+               }
+
                ttm_tt_unbind(ttm);
                ttm_bo_free_old_node(bo);
                ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
@@ -321,8 +329,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool interruptible,
-                      bool no_wait_gpu,
+                      bool interruptible, bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
index a1803fbcc898a9f347b696ac454442592bc22004..29855be96be096bc7622e398e119e5376ce4bd8e 100644 (file)
@@ -600,3 +600,9 @@ size_t ttm_round_pot(size_t size)
        return 0;
 }
 EXPORT_SYMBOL(ttm_round_pot);
+
+uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
+{
+       return glob->zone_kernel->max_mem;
+}
+EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
index bef9f6feb635a45017835767545c3ec79b23b5e2..cec4b4baa17987fcdca8b911af67b4183adfbc7d 100644 (file)
@@ -858,7 +858,6 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
        if (count) {
                d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
                ttm->pages[index] = d_page->p;
-               ttm_dma->cpu_address[index] = d_page->vaddr;
                ttm_dma->dma_address[index] = d_page->dma;
                list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
                r = 0;
@@ -989,7 +988,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
        INIT_LIST_HEAD(&ttm_dma->pages_list);
        for (i = 0; i < ttm->num_pages; i++) {
                ttm->pages[i] = NULL;
-               ttm_dma->cpu_address[i] = 0;
                ttm_dma->dma_address[i] = 0;
        }
 
index bc5aa573f466f12384b1746053db66436e133a95..aee3c00f836e723fed991843c22eea5ca0b974d0 100644 (file)
@@ -57,10 +57,8 @@ static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 {
        ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
                                          sizeof(*ttm->ttm.pages) +
-                                         sizeof(*ttm->dma_address) +
-                                         sizeof(*ttm->cpu_address));
-       ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
-       ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
+                                         sizeof(*ttm->dma_address));
+       ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
 }
 
 #ifdef CONFIG_X86
@@ -244,7 +242,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
 
        drm_free_large(ttm->pages);
        ttm->pages = NULL;
-       ttm_dma->cpu_address = NULL;
        ttm_dma->dma_address = NULL;
 }
 EXPORT_SYMBOL(ttm_dma_tt_fini);
index 4709b54c204c21f71c6efbd8ad3d1070f126e489..d2f57c52f7db2c74dcba7bcc35a6a3c622009cbe 100644 (file)
@@ -150,8 +150,5 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
        drm_connector_register(connector);
        drm_mode_connector_attach_encoder(connector, encoder);
 
-       drm_object_attach_property(&connector->base,
-                                     dev->mode_config.dirty_info_property,
-                                     1);
        return 0;
 }
index 17d34e0edbdd513e4c7860e3ddf77c595bd2fa41..cc45d98f9bb5fb749071b2f0bd0e15e16e6cf423 100644 (file)
@@ -16,6 +16,20 @@ static int udl_driver_set_busid(struct drm_device *d, struct drm_master *m)
        return 0;
 }
 
+static int udl_usb_suspend(struct usb_interface *interface,
+                          pm_message_t message)
+{
+       return 0;
+}
+
+static int udl_usb_resume(struct usb_interface *interface)
+{
+       struct drm_device *dev = usb_get_intfdata(interface);
+
+       udl_modeset_restore(dev);
+       return 0;
+}
+
 static const struct vm_operations_struct udl_gem_vm_ops = {
        .fault = udl_gem_fault,
        .open = drm_gem_vm_open,
@@ -72,8 +86,8 @@ static int udl_usb_probe(struct usb_interface *interface,
        int r;
 
        dev = drm_dev_alloc(&driver, &interface->dev);
-       if (!dev)
-               return -ENOMEM;
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
 
        r = drm_dev_register(dev, (unsigned long)udev);
        if (r)
@@ -122,6 +136,8 @@ static struct usb_driver udl_driver = {
        .name = "udl",
        .probe = udl_usb_probe,
        .disconnect = udl_usb_disconnect,
+       .suspend = udl_usb_suspend,
+       .resume = udl_usb_resume,
        .id_table = id_table,
 };
 module_usb_driver(udl_driver);
index 0b03d34ffdee8c4c569e46ad94a994f83d5c6418..f338a576efc8220ce1298e4849cb3a0e1a5bc837 100644 (file)
@@ -52,6 +52,7 @@ struct udl_device {
        struct device *dev;
        struct drm_device *ddev;
        struct usb_device *udev;
+       struct drm_crtc *crtc;
 
        int sku_pixel_limit;
 
@@ -87,6 +88,7 @@ struct udl_framebuffer {
 
 /* modeset */
 int udl_modeset_init(struct drm_device *dev);
+void udl_modeset_restore(struct drm_device *dev);
 void udl_modeset_cleanup(struct drm_device *dev);
 int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
 
index d5df555aeba026612aba00a6f4738d33f71249b5..9688bfa92ccd33545982dbeedbdc8154c3ca38f1 100644 (file)
@@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user)
 
        ufbdev->fb_count++;
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
        if (fb_defio && (info->fbdefio == NULL)) {
                /* enable defio at last moment if not disabled by client */
 
@@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user)
                info->fbdefio = fbdefio;
                fb_deferred_io_init(info);
        }
+#endif
 
        pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
                  info->node, user, info, ufbdev->fb_count);
@@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user)
 
        ufbdev->fb_count--;
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
        if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
                fb_deferred_io_cleanup(info);
                kfree(info->fbdefio);
                info->fbdefio = NULL;
                info->fbops->fb_mmap = udl_fb_mmap;
        }
+#endif
 
        pr_warn("released /dev/fb%d user=%d count=%d\n",
                info->node, user, ufbdev->fb_count);
index 33dbfb2c47486ee3b7a88cc318d3e6230b4dc0f1..29f0207fa677064dc4b7bd93ea360fd5d3f414a4 100644 (file)
@@ -16,6 +16,8 @@
 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
 #define BULK_SIZE 512
 
+#define NR_USB_REQUEST_CHANNEL 0x12
+
 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
 #define WRITES_IN_FLIGHT (4)
 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
@@ -90,6 +92,26 @@ success:
        return true;
 }
 
+/*
+ * Need to ensure a channel is selected before submitting URBs
+ */
+static int udl_select_std_channel(struct udl_device *udl)
+{
+       int ret;
+       u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
+                           0x1C, 0x88, 0x5E, 0x15,
+                           0x60, 0xFE, 0xC6, 0x97,
+                           0x16, 0x3D, 0x47, 0xF2};
+
+       ret = usb_control_msg(udl->udev,
+                             usb_sndctrlpipe(udl->udev, 0),
+                             NR_USB_REQUEST_CHANNEL,
+                             (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
+                             set_def_chn, sizeof(set_def_chn),
+                             USB_CTRL_SET_TIMEOUT);
+       return ret < 0 ? ret : 0;
+}
+
 static void udl_release_urb_work(struct work_struct *work)
 {
        struct urb_node *unode = container_of(work, struct urb_node,
@@ -301,6 +323,9 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
                goto err;
        }
 
+       if (udl_select_std_channel(udl))
+               DRM_ERROR("Selecting channel failed\n");
+
        if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
                DRM_ERROR("udl_alloc_urb_list failed\n");
                goto err;
index f92ea957967471f80f14a83da4c0d71ad3cd4aae..f2b2481cad525785868444002289803c42e1ab64 100644 (file)
@@ -309,6 +309,8 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
        char *wrptr;
        int color_depth = 0;
 
+       udl->crtc = crtc;
+
        buf = (char *)udl->mode_buf;
 
        /* for now we just clip 24 -> 16 - if we fix that fix this */
@@ -441,8 +443,6 @@ int udl_modeset_init(struct drm_device *dev)
 
        dev->mode_config.funcs = &udl_mode_funcs;
 
-       drm_mode_create_dirty_info_property(dev);
-
        udl_crtc_init(dev);
 
        encoder = udl_encoder_init(dev);
@@ -452,6 +452,18 @@ int udl_modeset_init(struct drm_device *dev)
        return 0;
 }
 
+void udl_modeset_restore(struct drm_device *dev)
+{
+       struct udl_device *udl = dev->dev_private;
+       struct udl_framebuffer *ufb;
+
+       if (!udl->crtc || !udl->crtc->primary->fb)
+               return;
+       udl_crtc_commit(udl->crtc);
+       ufb = to_udl_fb(udl->crtc->primary->fb);
+       udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+}
+
 void udl_modeset_cleanup(struct drm_device *dev)
 {
        drm_mode_config_cleanup(dev);
index 59adcf8532dd804348d31e4f73249e3561cb5353..3f6704cf6608d7be47637c6aa585de087b7f74ee 100644 (file)
@@ -144,7 +144,7 @@ static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
        return &vc4->bo_cache.size_list[page_index];
 }
 
-void vc4_bo_cache_purge(struct drm_device *dev)
+static void vc4_bo_cache_purge(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
index 8fc2b731b59a613fddce5853981e1ab8fd982837..2682f07d8f1e6d3dc89ad0858f136c511040b517 100644 (file)
@@ -163,14 +163,6 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
        int vblank_lines;
        int ret = 0;
 
-       /*
-        * XXX Doesn't work well in interlaced mode yet, partially due
-        * to problems in vc4 kms or drm core interlaced mode handling,
-        * so disable for now in interlaced mode.
-        */
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-               return ret;
-
        /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
 
        /* Get optional system timestamp before query. */
@@ -191,10 +183,15 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
 
        /* Vertical position of hvs composed scanline. */
        *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+       *hpos = 0;
 
-       /* No hpos info available. */
-       if (hpos)
-               *hpos = 0;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               *vpos /= 2;
+
+               /* Use hpos to correct for field offset in interlaced mode. */
+               if (VC4_GET_FIELD(val, SCALER_DISPSTATX_FRAME_COUNT) % 2)
+                       *hpos += mode->crtc_htotal / 2;
+       }
 
        /* This is the offset we need for translating hvs -> pv scanout pos. */
        fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
@@ -217,8 +214,6 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
                 * position of the PV.
                 */
                *vpos -= fifo_lines + 1;
-               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-                       *vpos /= 2;
 
                ret |= DRM_SCANOUTPOS_ACCURATE;
                return ret;
@@ -480,6 +475,9 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
        int ret;
        require_hvs_enabled(dev);
 
+       /* Disable vblank irq handling before crtc is disabled. */
+       drm_crtc_vblank_off(crtc);
+
        CRTC_WRITE(PV_V_CONTROL,
                   CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
        ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
@@ -530,6 +528,33 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
        /* Turn on the pixel valve, which will emit the vstart signal. */
        CRTC_WRITE(PV_V_CONTROL,
                   CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+
+       /* Enable vblank irq handling after crtc is started. */
+       drm_crtc_vblank_on(crtc);
+}
+
+static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
+                               const struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       /* Do not allow doublescan modes from user space */
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+               DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n",
+                             crtc->base.id);
+               return false;
+       }
+
+       /*
+        * Interlaced video modes got CRTC_INTERLACE_HALVE_V applied when
+        * coming from user space. We don't want this, as it screws up
+        * vblank timestamping, so fix it up.
+        */
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       DRM_DEBUG_KMS("[CRTC:%d] adjusted_mode :\n", crtc->base.id);
+       drm_mode_debug_printmodeline(adjusted_mode);
+
+       return true;
 }
 
 static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
@@ -819,6 +844,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
        .mode_set_nofb = vc4_crtc_mode_set_nofb,
        .disable = vc4_crtc_disable,
        .enable = vc4_crtc_enable,
+       .mode_fixup = vc4_crtc_mode_fixup,
        .atomic_check = vc4_crtc_atomic_check,
        .atomic_flush = vc4_crtc_atomic_flush,
 };
index 275fedbdbd9eae420939627e7fcdf9236cfce4fa..1e1f6b8184d058c6e55e092653798f4d75b55b7f 100644 (file)
@@ -340,9 +340,20 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
        }
 }
 
+static bool vc4_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
+                                      const struct drm_display_mode *mode,
+                                      struct drm_display_mode *adjusted_mode)
+{
+       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return false;
+
+       return true;
+}
+
 static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
        .disable = vc4_dpi_encoder_disable,
        .enable = vc4_dpi_encoder_enable,
+       .mode_fixup = vc4_dpi_encoder_mode_fixup,
 };
 
 static const struct of_device_id vc4_dpi_dt_match[] = {
index 8b42d31a7f0e8b61ed776200841285d7d5687dbb..8703f56b794774ac4130a7b487472e293ebe7cdf 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include "drm_fb_cma_helper.h"
+#include <drm/drm_fb_helper.h>
 
 #include "uapi/drm/vc4_drm.h"
 #include "vc4_drv.h"
@@ -57,21 +58,21 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
        switch (args->param) {
        case DRM_VC4_PARAM_V3D_IDENT0:
                ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
-               if (ret)
+               if (ret < 0)
                        return ret;
                args->value = V3D_READ(V3D_IDENT0);
                pm_runtime_put(&vc4->v3d->pdev->dev);
                break;
        case DRM_VC4_PARAM_V3D_IDENT1:
                ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
-               if (ret)
+               if (ret < 0)
                        return ret;
                args->value = V3D_READ(V3D_IDENT1);
                pm_runtime_put(&vc4->v3d->pdev->dev);
                break;
        case DRM_VC4_PARAM_V3D_IDENT2:
                ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
-               if (ret)
+               if (ret < 0)
                        return ret;
                args->value = V3D_READ(V3D_IDENT2);
                pm_runtime_put(&vc4->v3d->pdev->dev);
@@ -214,7 +215,7 @@ static void vc4_kick_out_firmware_fb(void)
        ap->ranges[0].base = 0;
        ap->ranges[0].size = ~0;
 
-       remove_conflicting_framebuffers(ap, "vc4drmfb", false);
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "vc4drmfb", false);
        kfree(ap);
 }
 
@@ -232,8 +233,8 @@ static int vc4_drm_bind(struct device *dev)
                return -ENOMEM;
 
        drm = drm_dev_alloc(&vc4_drm_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
        platform_set_drvdata(pdev, drm);
        vc4->dev = drm;
        drm->dev_private = vc4;
index 489e3de0c050df0eb3022afa421c579449c3ebdf..428e24919ef11919a027b67225f7145173430d98 100644 (file)
@@ -321,6 +321,15 @@ vc4_first_render_job(struct vc4_dev *vc4)
                                struct vc4_exec_info, head);
 }
 
+static inline struct vc4_exec_info *
+vc4_last_render_job(struct vc4_dev *vc4)
+{
+       if (list_empty(&vc4->render_job_list))
+               return NULL;
+       return list_last_entry(&vc4->render_job_list,
+                              struct vc4_exec_info, head);
+}
+
 /**
  * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
  * setup parameters.
index 6155e8aca1c6490039f14cd8addcf467a7de8eb4..77daea6cb8668df85d70d4b760f4e5a5be41800a 100644 (file)
@@ -419,10 +419,6 @@ again:
 
        vc4_flush_caches(dev);
 
-       /* Disable the binner's pre-loaded overflow memory address */
-       V3D_WRITE(V3D_BPOA, 0);
-       V3D_WRITE(V3D_BPOS, 0);
-
        /* Either put the job in the binner if it uses the binner, or
         * immediately move it to the to-be-rendered queue.
         */
@@ -534,8 +530,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
                return -EINVAL;
        }
 
-       exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
-                          GFP_KERNEL);
+       exec->bo = drm_calloc_large(exec->bo_count,
+                                   sizeof(struct drm_gem_cma_object *));
        if (!exec->bo) {
                DRM_ERROR("Failed to allocate validated BO pointers\n");
                return -ENOMEM;
@@ -572,8 +568,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
        spin_unlock(&file_priv->table_lock);
 
 fail:
-       kfree(handles);
-       return 0;
+       drm_free_large(handles);
+       return ret;
 }
 
 static int
@@ -608,7 +604,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
         * read the contents back for validation, and I think the
         * bo->vaddr is uncached access.
         */
-       temp = kmalloc(temp_size, GFP_KERNEL);
+       temp = drm_malloc_ab(temp_size, 1);
        if (!temp) {
                DRM_ERROR("Failed to allocate storage for copying "
                          "in bin/render CLs.\n");
@@ -675,7 +671,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
        ret = vc4_validate_shader_recs(dev, exec);
 
 fail:
-       kfree(temp);
+       drm_free_large(temp);
        return ret;
 }
 
@@ -688,7 +684,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
        if (exec->bo) {
                for (i = 0; i < exec->bo_count; i++)
                        drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
-               kfree(exec->bo);
+               drm_free_large(exec->bo);
        }
 
        while (!list_empty(&exec->unref_list)) {
@@ -942,8 +938,8 @@ vc4_gem_destroy(struct drm_device *dev)
                vc4->overflow_mem = NULL;
        }
 
-       vc4_bo_cache_destroy(dev);
-
        if (vc4->hang_state)
                vc4_free_hang_state(dev, vc4->hang_state);
+
+       vc4_bo_cache_destroy(dev);
 }
index 4452f3631cacea37bbd5dc8a594367631e308adc..68ad10634b29ec7b716f70f0b5fd9f2046da1fe5 100644 (file)
@@ -208,10 +208,35 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
        return ret;
 }
 
+/*
+ * drm_helper_probe_single_connector_modes() applies drm_mode_set_crtcinfo to
+ * all modes with flag CRTC_INTERLACE_HALVE_V. We don't want this, as it
+ * screws up vblank timestamping for interlaced modes, so fix it up.
+ */
+static int vc4_hdmi_connector_probe_modes(struct drm_connector *connector,
+                                         uint32_t maxX, uint32_t maxY)
+{
+       struct drm_display_mode *mode;
+       int count;
+
+       count = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+       if (count == 0)
+               return 0;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed adapted modes :\n",
+                     connector->base.id, connector->name);
+       list_for_each_entry(mode, &connector->modes, head) {
+               drm_mode_set_crtcinfo(mode, 0);
+               drm_mode_debug_printmodeline(mode);
+       }
+
+       return count;
+}
+
 static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = vc4_hdmi_connector_detect,
-       .fill_modes = drm_helper_probe_single_connector_modes,
+       .fill_modes = vc4_hdmi_connector_probe_modes,
        .destroy = vc4_hdmi_connector_destroy,
        .reset = drm_atomic_helper_connector_reset,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -246,7 +271,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
        connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
                             DRM_CONNECTOR_POLL_DISCONNECT);
 
-       connector->interlace_allowed = 0;
+       connector->interlace_allowed = 1;
        connector->doublescan_allowed = 0;
 
        drm_mode_connector_attach_encoder(connector, encoder);
index b0104a346a74eb44f0230d1a04968906661ae4fa..094bc6a475c1773923dfe8225cba9886fc9e5026 100644 (file)
@@ -83,8 +83,10 @@ vc4_overflow_mem_work(struct work_struct *work)
 
                spin_lock_irqsave(&vc4->job_lock, irqflags);
                current_exec = vc4_first_bin_job(vc4);
+               if (!current_exec)
+                       current_exec = vc4_last_render_job(vc4);
                if (current_exec) {
-                       vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
+                       vc4->overflow_mem->seqno = current_exec->seqno;
                        list_add_tail(&vc4->overflow_mem->unref_head,
                                      &current_exec->unref_list);
                        vc4->overflow_mem = NULL;
index 4ac894d993cd80472777b2a3bacc2fcf0dd52810..c1f65c6c8e601e9331768ca040a5609cad686b2e 100644 (file)
@@ -44,7 +44,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
-       drm_atomic_helper_commit_planes(dev, state, false);
+       drm_atomic_helper_commit_planes(dev, state, 0);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
index 29e4b400e25e34a63e4710e9edbba62541b9cdc5..881bf489478b01b34e9e4df6013fe608c42215ee 100644 (file)
@@ -735,8 +735,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
 }
 
 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
-       .prepare_fb = NULL,
-       .cleanup_fb = NULL,
        .atomic_check = vc4_plane_atomic_check,
        .atomic_update = vc4_plane_atomic_update,
 };
index 46527e989ce37839b0bd76d27dd0d5a4821bf58c..2543cf5b8b51869d51b72a5db5017dded38761be 100644 (file)
@@ -309,8 +309,14 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
         * of uniforms on each side.  However, this scheme is easy to
         * validate so it's all we allow for now.
         */
-
-       if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
+       switch (QPU_GET_FIELD(inst, QPU_SIG)) {
+       case QPU_SIG_NONE:
+       case QPU_SIG_SCOREBOARD_UNLOCK:
+       case QPU_SIG_COLOR_LOAD:
+       case QPU_SIG_LOAD_TMU0:
+       case QPU_SIG_LOAD_TMU1:
+               break;
+       default:
                DRM_ERROR("uniforms address change must be "
                          "normal math\n");
                return false;
index c15bafb06665f0ec8c788cd1c21d7067dc5a8df3..f36c14729b5547b2a36f67b65255b73618063919 100644 (file)
@@ -334,8 +334,8 @@ static int __init vgem_init(void)
        int ret;
 
        vgem_device = drm_dev_alloc(&vgem_driver, NULL);
-       if (!vgem_device) {
-               ret = -ENOMEM;
+       if (IS_ERR(vgem_device)) {
+               ret = PTR_ERR(vgem_device);
                goto out;
        }
 
index ed8aa8ff861a14f1cc42ef636e505fc0ba816085..e5582bab7e3c926999088d848e5a37669cf1399b 100644 (file)
@@ -72,7 +72,7 @@ static const struct file_operations via_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
+           DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY |
            DRIVER_IRQ_SHARED,
        .load = via_driver_load,
        .unload = via_driver_unload,
index 4e192aa2d0216fe15d9f7746e3092886b55e031d..7cf3678623c3a1b0254b88b76d8f8ad95f7a08ce 100644 (file)
@@ -338,7 +338,8 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
        drm_atomic_helper_commit_modeset_enables(dev, state);
-       drm_atomic_helper_commit_planes(dev, state, true);
+       drm_atomic_helper_commit_planes(dev, state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
        drm_atomic_helper_commit_hw_done(state);
 
index 7f0e93f87a554ee8e93a81ee2f4d80f9745ad3bc..26197dd95d5c95aa02e0af4ffd04b514cd4b6ab4 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/pci.h>
+#include <drm/drm_fb_helper.h>
 
 #include "virtgpu_drv.h"
 
@@ -42,7 +43,7 @@ static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
        primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
                & IORESOURCE_ROM_SHADOW;
 
-       remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
 
        kfree(ap);
 }
@@ -53,8 +54,8 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
        int ret;
 
        dev = drm_dev_alloc(driver, &vdev->dev);
-       if (!dev)
-               return -ENOMEM;
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
        dev->virtdev = vdev;
        vdev->priv = dev;
 
index b18ef3111f0c40c054c33e66877927a6fbbd4d5b..06ad9238044e03b4d196e290e01bb62094868a72 100644 (file)
@@ -75,6 +75,7 @@ typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
 struct virtio_gpu_fence_driver {
        atomic64_t       last_seq;
        uint64_t         sync_seq;
+       uint64_t         context;
        struct list_head fences;
        spinlock_t       lock;
 };
index cf4418709e7644a557c0392176308c4fdb1d8a24..f3f70fa8a4c7f5eefb6fba0a8798d282c860c60e 100644 (file)
@@ -89,7 +89,7 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
        (*fence)->drv = drv;
        (*fence)->seq = ++drv->sync_seq;
        fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
-                  0, (*fence)->seq);
+                  drv->context, (*fence)->seq);
        fence_get(&(*fence)->f);
        list_add_tail(&(*fence)->node, &drv->fences);
        spin_unlock_irqrestore(&drv->lock, irq_flags);
index c046903cb47ba535f10a5849adb86e5f06326c66..818478b4c4f0aec5baf5fe737e9dfd9a954afeff 100644 (file)
@@ -89,10 +89,16 @@ static void virtio_gpu_unref_list(struct list_head *head)
        }
 }
 
-static int virtio_gpu_execbuffer(struct drm_device *dev,
-                                struct drm_virtgpu_execbuffer *exbuf,
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full VIRTIO_GPUDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
+ */
+static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *drm_file)
 {
+       struct drm_virtgpu_execbuffer *exbuf = data;
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
        struct drm_gem_object *gobj;
@@ -152,15 +158,10 @@ static int virtio_gpu_execbuffer(struct drm_device *dev,
        if (ret)
                goto out_free;
 
-       buf = kmalloc(exbuf->size, GFP_KERNEL);
-       if (!buf) {
-               ret = -ENOMEM;
-               goto out_unresv;
-       }
-       if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
-                          exbuf->size)) {
-               kfree(buf);
-               ret = -EFAULT;
+       buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
+                         exbuf->size);
+       if (IS_ERR(buf)) {
+               ret = PTR_ERR(buf);
                goto out_unresv;
        }
        virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
@@ -182,20 +183,6 @@ out_free:
        return ret;
 }
 
-/*
- * Usage of execbuffer:
- * Relocations need to take into account the full VIRTIO_GPUDrawable size.
- * However, the command as passed from user space must *not* contain the initial
- * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
- */
-static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
-                                      struct drm_file *file_priv)
-{
-       struct drm_virtgpu_execbuffer *execbuffer = data;
-       return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
-}
-
-
 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv)
 {
index 4150873d432efb1fb05cb2129e93267436620d0f..036b0fbae0fb7d2ce57245eb870390df81aa13e5 100644 (file)
@@ -159,6 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
        virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
        virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
 
+       vgdev->fence_drv.context = fence_context_alloc(1);
        spin_lock_init(&vgdev->fence_drv.lock);
        INIT_LIST_HEAD(&vgdev->fence_drv.fences);
        INIT_LIST_HEAD(&vgdev->cap_cache);
index 925ca25209dfa12a60128b333d21d19fad9c03af..ba28c0f6f28a352b3f6f030b09be90d76eca6cec 100644 (file)
@@ -76,7 +76,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
                output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
        if (old_state->crtc)
                output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
-       WARN_ON(!output);
+       if (WARN_ON(!output))
+               return;
 
        if (plane->state->fb) {
                vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
@@ -129,7 +130,8 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
                output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
        if (old_state->crtc)
                output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
-       WARN_ON(!output);
+       if (WARN_ON(!output))
+               return;
 
        if (plane->state->fb) {
                vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
index b49445df8a7e6c672f5ec7e590ded51647288b62..fb7b82aad763c4ae1b4f9cb954ecba2bc7344e6a 100644 (file)
@@ -6,6 +6,7 @@ config DRM_VMWGFX
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
        select DRM_TTM
+       select FB
        # Only needed for the transitional use of drm_crtc_init - can be removed
        # again once vmwgfx sets up the primary plane itself.
        select DRM_KMS_HELPER
index 74304b03f9d4811973c27e0259cd4cbbf9417ae6..070d750af16d33022f8b3496f38d3667d77169ca 100644 (file)
                        VMWGFX_NUM_GB_SURFACE +\
                        VMWGFX_NUM_GB_SCREEN_TARGET)
 
-#define VMW_PL_GMR TTM_PL_PRIV0
-#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
-#define VMW_PL_MOB TTM_PL_PRIV1
-#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
 
 #define VMW_RES_CONTEXT ttm_driver_type0
 #define VMW_RES_SURFACE ttm_driver_type1
index 63ccd9871ec94266a25ee435b05ef3dd7b20e1b0..23ec673d5e16290108c7ec30a8dbdbe62f8c87a1 100644 (file)
@@ -376,9 +376,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
-       drm_object_attach_property(&connector->base,
-                                  dev->mode_config.dirty_info_property,
-                                  1);
        drm_object_attach_property(&connector->base,
                                   dev_priv->hotplug_mode_update_property, 1);
        drm_object_attach_property(&connector->base,
@@ -421,10 +418,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
        if (ret != 0)
                goto err_free;
 
-       ret = drm_mode_create_dirty_info_property(dev);
-       if (ret != 0)
-               goto err_vblank_cleanup;
-
        vmw_kms_create_implicit_placement_property(dev_priv, true);
 
        if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
@@ -439,8 +432,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
 
        return 0;
 
-err_vblank_cleanup:
-       drm_vblank_cleanup(dev);
 err_free:
        kfree(dev_priv->ldu_priv);
        dev_priv->ldu_priv = NULL;
index b74eae2b8594d54a6083311fae8dd9fd8854e2d9..f42359084adc4e9890427c7d8a1ac5fcfce6507f 100644 (file)
@@ -537,9 +537,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
-       drm_object_attach_property(&connector->base,
-                                  dev->mode_config.dirty_info_property,
-                                  1);
        drm_object_attach_property(&connector->base,
                                   dev_priv->hotplug_mode_update_property, 1);
        drm_object_attach_property(&connector->base,
@@ -574,10 +571,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
        if (unlikely(ret != 0))
                return ret;
 
-       ret = drm_mode_create_dirty_info_property(dev);
-       if (unlikely(ret != 0))
-               goto err_vblank_cleanup;
-
        vmw_kms_create_implicit_placement_property(dev_priv, false);
 
        for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
@@ -588,10 +581,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
        DRM_INFO("Screen Objects Display Unit initialized\n");
 
        return 0;
-
-err_vblank_cleanup:
-       drm_vblank_cleanup(dev);
-       return ret;
 }
 
 int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
index 41932a7c4f79516da86f9e35d1223c8e033cf248..94ad8d2acf9a2106cafdc9349c4f1518d5d85503 100644 (file)
@@ -1130,9 +1130,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
-       drm_object_attach_property(&connector->base,
-                                  dev->mode_config.dirty_info_property,
-                                  1);
        drm_object_attach_property(&connector->base,
                                   dev_priv->hotplug_mode_update_property, 1);
        drm_object_attach_property(&connector->base,
@@ -1202,10 +1199,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
        if (unlikely(ret != 0))
                return ret;
 
-       ret = drm_mode_create_dirty_info_property(dev);
-       if (unlikely(ret != 0))
-               goto err_vblank_cleanup;
-
        dev_priv->active_display_unit = vmw_du_screen_target;
 
        vmw_kms_create_implicit_placement_property(dev_priv, false);
index 107ec236a4a6fb633d5c6974c037c02360dc7eab..5f961416c4eeb44213140cb04b3c3f552f5170b8 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
 
 imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
-               ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o
+               ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
+               ipu-smfc.o ipu-vdi.o
index 99dcacf05b9986f97c44dbb1b962d47db85bf818..b9539f7c5e9adc43f4acc91db35226371699e919 100644 (file)
@@ -45,6 +45,12 @@ static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
        writel(value, ipu->cm_reg + offset);
 }
 
+int ipu_get_num(struct ipu_soc *ipu)
+{
+       return ipu->id;
+}
+EXPORT_SYMBOL_GPL(ipu_get_num);
+
 void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
 {
        u32 val;
@@ -724,6 +730,137 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
 }
 EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
 
+
+/* Frame Synchronization Unit Channel Linking */
+
+struct fsu_link_reg_info {
+       int chno;
+       u32 reg;
+       u32 mask;
+       u32 val;
+};
+
+struct fsu_link_info {
+       struct fsu_link_reg_info src;
+       struct fsu_link_reg_info sink;
+};
+
+static const struct fsu_link_info fsu_link_info[] = {
+       {
+               .src  = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
+                         FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
+               .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
+                         FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
+       }, {
+               .src =  { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
+                         FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
+               .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
+                         FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
+       }, {
+               .src =  { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
+                         FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
+               .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
+                         FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
+       }, {
+               .src =  { IPUV3_CHANNEL_CSI_DIRECT, 0 },
+               .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
+                         FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
+       },
+};
+
+static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
+               if (src == fsu_link_info[i].src.chno &&
+                   sink == fsu_link_info[i].sink.chno)
+                       return &fsu_link_info[i];
+       }
+
+       return NULL;
+}
+
+/*
+ * Links a source channel to a sink channel in the FSU.
+ */
+int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
+{
+       const struct fsu_link_info *link;
+       u32 src_reg, sink_reg;
+       unsigned long flags;
+
+       link = find_fsu_link_info(src_ch, sink_ch);
+       if (!link)
+               return -EINVAL;
+
+       spin_lock_irqsave(&ipu->lock, flags);
+
+       if (link->src.mask) {
+               src_reg = ipu_cm_read(ipu, link->src.reg);
+               src_reg &= ~link->src.mask;
+               src_reg |= link->src.val;
+               ipu_cm_write(ipu, src_reg, link->src.reg);
+       }
+
+       if (link->sink.mask) {
+               sink_reg = ipu_cm_read(ipu, link->sink.reg);
+               sink_reg &= ~link->sink.mask;
+               sink_reg |= link->sink.val;
+               ipu_cm_write(ipu, sink_reg, link->sink.reg);
+       }
+
+       spin_unlock_irqrestore(&ipu->lock, flags);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fsu_link);
+
+/*
+ * Unlinks source and sink channels in the FSU.
+ */
+int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
+{
+       const struct fsu_link_info *link;
+       u32 src_reg, sink_reg;
+       unsigned long flags;
+
+       link = find_fsu_link_info(src_ch, sink_ch);
+       if (!link)
+               return -EINVAL;
+
+       spin_lock_irqsave(&ipu->lock, flags);
+
+       if (link->src.mask) {
+               src_reg = ipu_cm_read(ipu, link->src.reg);
+               src_reg &= ~link->src.mask;
+               ipu_cm_write(ipu, src_reg, link->src.reg);
+       }
+
+       if (link->sink.mask) {
+               sink_reg = ipu_cm_read(ipu, link->sink.reg);
+               sink_reg &= ~link->sink.mask;
+               ipu_cm_write(ipu, sink_reg, link->sink.reg);
+       }
+
+       spin_unlock_irqrestore(&ipu->lock, flags);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
+
+/* Link IDMAC channels in the FSU */
+int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+{
+       return ipu_fsu_link(src->ipu, src->num, sink->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_link);
+
+/* Unlink IDMAC channels in the FSU */
+int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+{
+       return ipu_fsu_unlink(src->ipu, src->num, sink->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
+
 struct ipu_devtype {
        const char *name;
        unsigned long cm_ofs;
@@ -833,6 +970,20 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
                goto err_ic;
        }
 
+       ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
+                          IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
+                          IPU_CONF_IC_INPUT);
+       if (ret) {
+               unit = "vdi";
+               goto err_vdi;
+       }
+
+       ret = ipu_image_convert_init(ipu, dev);
+       if (ret) {
+               unit = "image_convert";
+               goto err_image_convert;
+       }
+
        ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
                          IPU_CONF_DI0_EN, ipu_clk);
        if (ret) {
@@ -887,6 +1038,10 @@ err_dc:
 err_di_1:
        ipu_di_exit(ipu, 0);
 err_di_0:
+       ipu_image_convert_exit(ipu);
+err_image_convert:
+       ipu_vdi_exit(ipu);
+err_vdi:
        ipu_ic_exit(ipu);
 err_ic:
        ipu_csi_exit(ipu, 1);
@@ -971,6 +1126,8 @@ static void ipu_submodules_exit(struct ipu_soc *ipu)
        ipu_dc_exit(ipu);
        ipu_di_exit(ipu, 1);
        ipu_di_exit(ipu, 0);
+       ipu_image_convert_exit(ipu);
+       ipu_vdi_exit(ipu);
        ipu_ic_exit(ipu);
        ipu_csi_exit(ipu, 1);
        ipu_csi_exit(ipu, 0);
@@ -1004,14 +1161,14 @@ static struct ipu_platform_reg client_reg[] = {
                        .dma[0] = IPUV3_CHANNEL_CSI0,
                        .dma[1] = -EINVAL,
                },
-               .name = "imx-ipuv3-camera",
+               .name = "imx-ipuv3-csi",
        }, {
                .pdata = {
                        .csi = 1,
                        .dma[0] = IPUV3_CHANNEL_CSI1,
                        .dma[1] = -EINVAL,
                },
-               .name = "imx-ipuv3-camera",
+               .name = "imx-ipuv3-csi",
        }, {
                .pdata = {
                        .di = 0,
@@ -1207,15 +1364,16 @@ EXPORT_SYMBOL_GPL(ipu_dump);
 
 static int ipu_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *of_id =
-                       of_match_device(imx_ipu_dt_ids, &pdev->dev);
+       struct device_node *np = pdev->dev.of_node;
        struct ipu_soc *ipu;
        struct resource *res;
        unsigned long ipu_base;
        int i, ret, irq_sync, irq_err;
        const struct ipu_devtype *devtype;
 
-       devtype = of_id->data;
+       devtype = of_device_get_match_data(&pdev->dev);
+       if (!devtype)
+               return -EINVAL;
 
        irq_sync = platform_get_irq(pdev, 0);
        irq_err = platform_get_irq(pdev, 1);
@@ -1237,6 +1395,7 @@ static int ipu_probe(struct platform_device *pdev)
                ipu->channel[i].ipu = ipu;
        ipu->devtype = devtype;
        ipu->ipu_type = devtype->type;
+       ipu->id = of_alias_get_id(np, "ipu");
 
        spin_lock_init(&ipu->lock);
        mutex_init(&ipu->channel_lock);
index 6494a4d2817149e06e1ab241ccb10552d52dc553..fcb7dc86167b385dee4c866f7c20c07fb361f195 100644 (file)
@@ -253,6 +253,13 @@ void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
 }
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
 
+void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
+{
+       ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
+       ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
+
 void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
 {
        ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
@@ -268,6 +275,12 @@ void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
 }
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
 
+int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch)
+{
+       return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize);
+
 void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
 {
        ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
index 06631ac61b045ca35de0fd54b78095252096c9cd..d6e5ded2441813949709f230a389a812851eac84 100644 (file)
@@ -258,12 +258,8 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
                cfg->data_width = IPU_CSI_DATA_WIDTH_8;
                break;
        case MEDIA_BUS_FMT_UYVY8_1X16:
-               cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
-               cfg->mipi_dt = MIPI_DT_YUV422;
-               cfg->data_width = IPU_CSI_DATA_WIDTH_16;
-               break;
        case MEDIA_BUS_FMT_YUYV8_1X16:
-               cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+               cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
                cfg->mipi_dt = MIPI_DT_YUV422;
                cfg->data_width = IPU_CSI_DATA_WIDTH_16;
                break;
@@ -365,10 +361,14 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
 {
        struct ipu_csi_bus_config cfg;
        unsigned long flags;
-       u32 data = 0;
+       u32 width, height, data = 0;
 
        fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
 
+       /* set default sensor frame width and height */
+       width = mbus_fmt->width;
+       height = mbus_fmt->height;
+
        /* Set the CSI_SENS_CONF register remaining fields */
        data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
                cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
@@ -386,11 +386,6 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
 
        ipu_csi_write(csi, data, CSI_SENS_CONF);
 
-       /* Setup sensor frame size */
-       ipu_csi_write(csi,
-                     (mbus_fmt->width - 1) | ((mbus_fmt->height - 1) << 16),
-                     CSI_SENS_FRM_SIZE);
-
        /* Set CCIR registers */
 
        switch (cfg.clk_mode) {
@@ -408,11 +403,12 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
                         * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
                         * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
                         */
+                       height = 625; /* framelines for PAL */
+
                        ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
                                          CSI_CCIR_CODE_1);
                        ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
                        ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
-
                } else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) {
                        /*
                         * NTSC case
@@ -422,6 +418,8 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
                         * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
                         * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
                         */
+                       height = 525; /* framelines for NTSC */
+
                        ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
                                          CSI_CCIR_CODE_1);
                        ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
@@ -447,6 +445,10 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
                break;
        }
 
+       /* Setup sensor frame size */
+       ipu_csi_write(csi, (width - 1) | ((height - 1) << 16),
+                     CSI_SENS_FRM_SIZE);
+
        dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
                ipu_csi_read(csi, CSI_SENS_CONF));
        dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
index 42705bb5aaa36b6e42d495032df6758ef9a0ddcd..a40f211f382faf86678e3419ecd0bf4014b95ac6 100644 (file)
@@ -123,20 +123,6 @@ int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
 
-static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
-       while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
-               if (time_after(jiffies, timeout)) {
-                       dev_warn(priv->dev,
-                                "Timeout waiting for DMFC FIFOs to clear\n");
-                       break;
-               }
-               cpu_relax();
-       }
-}
-
 void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
 {
        struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -145,10 +131,8 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
 
        priv->use_count--;
 
-       if (!priv->use_count) {
-               ipu_dmfc_wait_fifos(priv);
+       if (!priv->use_count)
                ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
-       }
 
        if (priv->use_count < 0)
                priv->use_count = 0;
index 1dcb96ccda660f17a2f6fdd436d6474a115ac156..321eb983c2f59e4a3841e4a979eb1f15c2285235 100644 (file)
@@ -160,6 +160,7 @@ struct ipu_ic_priv {
        spinlock_t lock;
        struct ipu_soc *ipu;
        int use_count;
+       int irt_use_count;
        struct ipu_ic task[IC_NUM_TASKS];
 };
 
@@ -379,8 +380,6 @@ void ipu_ic_task_disable(struct ipu_ic *ic)
 
        ipu_ic_write(ic, ic_conf, IC_CONF);
 
-       ic->rotation = ic->graphics = false;
-
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
@@ -620,7 +619,7 @@ int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
        ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
        ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
 
-       if (rot >= IPU_ROTATE_90_RIGHT)
+       if (ipu_rot_mode_is_irt(rot))
                ic->rotation = true;
 
 unlock:
@@ -629,22 +628,41 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
 
+static void ipu_irt_enable(struct ipu_ic *ic)
+{
+       struct ipu_ic_priv *priv = ic->priv;
+
+       if (!priv->irt_use_count)
+               ipu_module_enable(priv->ipu, IPU_CONF_ROT_EN);
+
+       priv->irt_use_count++;
+}
+
+static void ipu_irt_disable(struct ipu_ic *ic)
+{
+       struct ipu_ic_priv *priv = ic->priv;
+
+       if (priv->irt_use_count) {
+               if (!--priv->irt_use_count)
+                       ipu_module_disable(priv->ipu, IPU_CONF_ROT_EN);
+       }
+}
+
 int ipu_ic_enable(struct ipu_ic *ic)
 {
        struct ipu_ic_priv *priv = ic->priv;
        unsigned long flags;
-       u32 module = IPU_CONF_IC_EN;
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       if (ic->rotation)
-               module |= IPU_CONF_ROT_EN;
-
        if (!priv->use_count)
-               ipu_module_enable(priv->ipu, module);
+               ipu_module_enable(priv->ipu, IPU_CONF_IC_EN);
 
        priv->use_count++;
 
+       if (ic->rotation)
+               ipu_irt_enable(ic);
+
        spin_unlock_irqrestore(&priv->lock, flags);
 
        return 0;
@@ -655,18 +673,22 @@ int ipu_ic_disable(struct ipu_ic *ic)
 {
        struct ipu_ic_priv *priv = ic->priv;
        unsigned long flags;
-       u32 module = IPU_CONF_IC_EN | IPU_CONF_ROT_EN;
 
        spin_lock_irqsave(&priv->lock, flags);
 
        priv->use_count--;
 
        if (!priv->use_count)
-               ipu_module_disable(priv->ipu, module);
+               ipu_module_disable(priv->ipu, IPU_CONF_IC_EN);
 
        if (priv->use_count < 0)
                priv->use_count = 0;
 
+       if (ic->rotation)
+               ipu_irt_disable(ic);
+
+       ic->rotation = ic->graphics = false;
+
        spin_unlock_irqrestore(&priv->lock, flags);
 
        return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
new file mode 100644 (file)
index 0000000..2ba7d43
--- /dev/null
@@ -0,0 +1,1709 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ *
+ * Queued image conversion support, with tiling and rotation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <video/imx-ipu-image-convert.h>
+#include "ipu-prv.h"
+
+/*
+ * The IC Resizer has a restriction that the output frame from the
+ * resizer must be 1024 or less in both width (pixels) and height
+ * (lines).
+ *
+ * The image converter attempts to split up a conversion when
+ * the desired output (converted) frame resolution exceeds the
+ * IC resizer limit of 1024 in either dimension.
+ *
+ * If either dimension of the output frame exceeds the limit, the
+ * dimension is split into 1, 2, or 4 equal stripes, for a maximum
+ * of 4*4 or 16 tiles. A conversion is then carried out for each
+ * tile (but taking care to pass the full frame stride length to
+ * the DMA channel's parameter memory!). IDMA double-buffering is used
+ * to convert each tile back-to-back when possible (see note below
+ * when double_buffering boolean is set).
+ *
+ * Note that the input frame must be split up into the same number
+ * of tiles as the output frame.
+ *
+ * FIXME: at this point there is no attempt to deal with visible seams
+ * at the tile boundaries when upscaling. The seams are caused by a reset
+ * of the bilinear upscale interpolation when starting a new tile. The
+ * seams are barely visible for small upscale factors, but become
+ * increasingly visible as the upscale factor gets larger, since more
+ * interpolated pixels get thrown out at the tile boundaries. A possilble
+ * fix might be to overlap tiles of different sizes, but this must be done
+ * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
+ * alignment restrictions of each tile.
+ */
+
+#define MAX_STRIPES_W    4
+#define MAX_STRIPES_H    4
+#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
+
+#define MIN_W     16
+#define MIN_H     8
+#define MAX_W     4096
+#define MAX_H     4096
+
+enum ipu_image_convert_type {
+       IMAGE_CONVERT_IN = 0,
+       IMAGE_CONVERT_OUT,
+};
+
+struct ipu_image_convert_dma_buf {
+       void          *virt;
+       dma_addr_t    phys;
+       unsigned long len;
+};
+
+struct ipu_image_convert_dma_chan {
+       int in;
+       int out;
+       int rot_in;
+       int rot_out;
+       int vdi_in_p;
+       int vdi_in;
+       int vdi_in_n;
+};
+
+/* dimensions of one tile */
+struct ipu_image_tile {
+       u32 width;
+       u32 height;
+       /* size and strides are in bytes */
+       u32 size;
+       u32 stride;
+       u32 rot_stride;
+       /* start Y or packed offset of this tile */
+       u32 offset;
+       /* offset from start to tile in U plane, for planar formats */
+       u32 u_off;
+       /* offset from start to tile in V plane, for planar formats */
+       u32 v_off;
+};
+
+struct ipu_image_convert_image {
+       struct ipu_image base;
+       enum ipu_image_convert_type type;
+
+       const struct ipu_image_pixfmt *fmt;
+       unsigned int stride;
+
+       /* # of rows (horizontal stripes) if dest height is > 1024 */
+       unsigned int num_rows;
+       /* # of columns (vertical stripes) if dest width is > 1024 */
+       unsigned int num_cols;
+
+       struct ipu_image_tile tile[MAX_TILES];
+};
+
+struct ipu_image_pixfmt {
+       u32     fourcc;        /* V4L2 fourcc */
+       int     bpp;           /* total bpp */
+       int     uv_width_dec;  /* decimation in width for U/V planes */
+       int     uv_height_dec; /* decimation in height for U/V planes */
+       bool    planar;        /* planar format */
+       bool    uv_swapped;    /* U and V planes are swapped */
+       bool    uv_packed;     /* partial planar (U and V in same plane) */
+};
+
+struct ipu_image_convert_ctx;
+struct ipu_image_convert_chan;
+struct ipu_image_convert_priv;
+
+struct ipu_image_convert_ctx {
+       struct ipu_image_convert_chan *chan;
+
+       ipu_image_convert_cb_t complete;
+       void *complete_context;
+
+       /* Source/destination image data and rotation mode */
+       struct ipu_image_convert_image in;
+       struct ipu_image_convert_image out;
+       enum ipu_rotate_mode rot_mode;
+
+       /* intermediate buffer for rotation */
+       struct ipu_image_convert_dma_buf rot_intermediate[2];
+
+       /* current buffer number for double buffering */
+       int cur_buf_num;
+
+       bool aborting;
+       struct completion aborted;
+
+       /* can we use double-buffering for this conversion operation? */
+       bool double_buffering;
+       /* num_rows * num_cols */
+       unsigned int num_tiles;
+       /* next tile to process */
+       unsigned int next_tile;
+       /* where to place converted tile in dest image */
+       unsigned int out_tile_map[MAX_TILES];
+
+       struct list_head list;
+};
+
+struct ipu_image_convert_chan {
+       struct ipu_image_convert_priv *priv;
+
+       enum ipu_ic_task ic_task;
+       const struct ipu_image_convert_dma_chan *dma_ch;
+
+       struct ipu_ic *ic;
+       struct ipuv3_channel *in_chan;
+       struct ipuv3_channel *out_chan;
+       struct ipuv3_channel *rotation_in_chan;
+       struct ipuv3_channel *rotation_out_chan;
+
+       /* the IPU end-of-frame irqs */
+       int out_eof_irq;
+       int rot_out_eof_irq;
+
+       spinlock_t irqlock;
+
+       /* list of convert contexts */
+       struct list_head ctx_list;
+       /* queue of conversion runs */
+       struct list_head pending_q;
+       /* queue of completed runs */
+       struct list_head done_q;
+
+       /* the current conversion run */
+       struct ipu_image_convert_run *current_run;
+};
+
+struct ipu_image_convert_priv {
+       struct ipu_image_convert_chan chan[IC_NUM_TASKS];
+       struct ipu_soc *ipu;
+};
+
+static const struct ipu_image_convert_dma_chan
+image_convert_dma_chan[IC_NUM_TASKS] = {
+       [IC_TASK_VIEWFINDER] = {
+               .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
+               .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
+               .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
+               .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
+               .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
+               .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
+               .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
+       },
+       [IC_TASK_POST_PROCESSOR] = {
+               .in = IPUV3_CHANNEL_MEM_IC_PP,
+               .out = IPUV3_CHANNEL_IC_PP_MEM,
+               .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
+               .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
+       },
+};
+
+static const struct ipu_image_pixfmt image_convert_formats[] = {
+       {
+               .fourcc = V4L2_PIX_FMT_RGB565,
+               .bpp    = 16,
+       }, {
+               .fourcc = V4L2_PIX_FMT_RGB24,
+               .bpp    = 24,
+       }, {
+               .fourcc = V4L2_PIX_FMT_BGR24,
+               .bpp    = 24,
+       }, {
+               .fourcc = V4L2_PIX_FMT_RGB32,
+               .bpp    = 32,
+       }, {
+               .fourcc = V4L2_PIX_FMT_BGR32,
+               .bpp    = 32,
+       }, {
+               .fourcc = V4L2_PIX_FMT_YUYV,
+               .bpp    = 16,
+               .uv_width_dec = 2,
+               .uv_height_dec = 1,
+       }, {
+               .fourcc = V4L2_PIX_FMT_UYVY,
+               .bpp    = 16,
+               .uv_width_dec = 2,
+               .uv_height_dec = 1,
+       }, {
+               .fourcc = V4L2_PIX_FMT_YUV420,
+               .bpp    = 12,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 2,
+       }, {
+               .fourcc = V4L2_PIX_FMT_YVU420,
+               .bpp    = 12,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 2,
+               .uv_swapped = true,
+       }, {
+               .fourcc = V4L2_PIX_FMT_NV12,
+               .bpp    = 12,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 2,
+               .uv_packed = true,
+       }, {
+               .fourcc = V4L2_PIX_FMT_YUV422P,
+               .bpp    = 16,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 1,
+       }, {
+               .fourcc = V4L2_PIX_FMT_NV16,
+               .bpp    = 16,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 1,
+               .uv_packed = true,
+       },
+};
+
+static const struct ipu_image_pixfmt *get_format(u32 fourcc)
+{
+       const struct ipu_image_pixfmt *ret = NULL;
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
+               if (image_convert_formats[i].fourcc == fourcc) {
+                       ret = &image_convert_formats[i];
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static void dump_format(struct ipu_image_convert_ctx *ctx,
+                       struct ipu_image_convert_image *ic_image)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+
+       dev_dbg(priv->ipu->dev,
+               "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
+               chan->ic_task, ctx,
+               ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
+               ic_image->base.pix.width, ic_image->base.pix.height,
+               ic_image->num_cols, ic_image->num_rows,
+               ic_image->tile[0].width, ic_image->tile[0].height,
+               ic_image->fmt->fourcc & 0xff,
+               (ic_image->fmt->fourcc >> 8) & 0xff,
+               (ic_image->fmt->fourcc >> 16) & 0xff,
+               (ic_image->fmt->fourcc >> 24) & 0xff);
+}
+
+int ipu_image_convert_enum_format(int index, u32 *fourcc)
+{
+       const struct ipu_image_pixfmt *fmt;
+
+       if (index >= (int)ARRAY_SIZE(image_convert_formats))
+               return -EINVAL;
+
+       /* Format found */
+       fmt = &image_convert_formats[index];
+       *fourcc = fmt->fourcc;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
+
+static void free_dma_buf(struct ipu_image_convert_priv *priv,
+                        struct ipu_image_convert_dma_buf *buf)
+{
+       if (buf->virt)
+               dma_free_coherent(priv->ipu->dev,
+                                 buf->len, buf->virt, buf->phys);
+       buf->virt = NULL;
+       buf->phys = 0;
+}
+
+static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
+                        struct ipu_image_convert_dma_buf *buf,
+                        int size)
+{
+       buf->len = PAGE_ALIGN(size);
+       buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
+                                      GFP_DMA | GFP_KERNEL);
+       if (!buf->virt) {
+               dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static inline int num_stripes(int dim)
+{
+       if (dim <= 1024)
+               return 1;
+       else if (dim <= 2048)
+               return 2;
+       else
+               return 4;
+}
+
+static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
+                                struct ipu_image_convert_image *image)
+{
+       int i;
+
+       for (i = 0; i < ctx->num_tiles; i++) {
+               struct ipu_image_tile *tile = &image->tile[i];
+
+               tile->height = image->base.pix.height / image->num_rows;
+               tile->width = image->base.pix.width / image->num_cols;
+               tile->size = ((tile->height * image->fmt->bpp) >> 3) *
+                       tile->width;
+
+               if (image->fmt->planar) {
+                       tile->stride = tile->width;
+                       tile->rot_stride = tile->height;
+               } else {
+                       tile->stride =
+                               (image->fmt->bpp * tile->width) >> 3;
+                       tile->rot_stride =
+                               (image->fmt->bpp * tile->height) >> 3;
+               }
+       }
+}
+
+/*
+ * Use the rotation transformation to find the tile coordinates
+ * (row, col) of a tile in the destination frame that corresponds
+ * to the given tile coordinates of a source frame. The destination
+ * coordinate is then converted to a tile index.
+ */
+static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
+                               int src_row, int src_col)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_image *s_image = &ctx->in;
+       struct ipu_image_convert_image *d_image = &ctx->out;
+       int dst_row, dst_col;
+
+       /* with no rotation it's a 1:1 mapping */
+       if (ctx->rot_mode == IPU_ROTATE_NONE)
+               return src_row * s_image->num_cols + src_col;
+
+       /*
+        * before doing the transform, first we have to translate
+        * source row,col for an origin in the center of s_image
+        */
+       src_row = src_row * 2 - (s_image->num_rows - 1);
+       src_col = src_col * 2 - (s_image->num_cols - 1);
+
+       /* do the rotation transform */
+       if (ctx->rot_mode & IPU_ROT_BIT_90) {
+               dst_col = -src_row;
+               dst_row = src_col;
+       } else {
+               dst_col = src_col;
+               dst_row = src_row;
+       }
+
+       /* apply flip */
+       if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+               dst_col = -dst_col;
+       if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
+               dst_row = -dst_row;
+
+       dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
+               chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
+
+       /*
+        * finally translate dest row,col using an origin in upper
+        * left of d_image
+        */
+       dst_row += d_image->num_rows - 1;
+       dst_col += d_image->num_cols - 1;
+       dst_row /= 2;
+       dst_col /= 2;
+
+       return dst_row * d_image->num_cols + dst_col;
+}
+
+/*
+ * Fill the out_tile_map[] with transformed destination tile indeces.
+ */
+static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
+{
+       struct ipu_image_convert_image *s_image = &ctx->in;
+       unsigned int row, col, tile = 0;
+
+       for (row = 0; row < s_image->num_rows; row++) {
+               for (col = 0; col < s_image->num_cols; col++) {
+                       ctx->out_tile_map[tile] =
+                               transform_tile_index(ctx, row, col);
+                       tile++;
+               }
+       }
+}
+
+static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
+                                    struct ipu_image_convert_image *image)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       const struct ipu_image_pixfmt *fmt = image->fmt;
+       unsigned int row, col, tile = 0;
+       u32 H, w, h, y_stride, uv_stride;
+       u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
+       u32 y_row_off, y_col_off, y_off;
+       u32 y_size, uv_size;
+
+       /* setup some convenience vars */
+       H = image->base.pix.height;
+
+       y_stride = image->stride;
+       uv_stride = y_stride / fmt->uv_width_dec;
+       if (fmt->uv_packed)
+               uv_stride *= 2;
+
+       y_size = H * y_stride;
+       uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
+
+       for (row = 0; row < image->num_rows; row++) {
+               w = image->tile[tile].width;
+               h = image->tile[tile].height;
+               y_row_off = row * h * y_stride;
+               uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
+
+               for (col = 0; col < image->num_cols; col++) {
+                       y_col_off = col * w;
+                       uv_col_off = y_col_off / fmt->uv_width_dec;
+                       if (fmt->uv_packed)
+                               uv_col_off *= 2;
+
+                       y_off = y_row_off + y_col_off;
+                       uv_off = uv_row_off + uv_col_off;
+
+                       u_off = y_size - y_off + uv_off;
+                       v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
+                       if (fmt->uv_swapped) {
+                               tmp = u_off;
+                               u_off = v_off;
+                               v_off = tmp;
+                       }
+
+                       image->tile[tile].offset = y_off;
+                       image->tile[tile].u_off = u_off;
+                       image->tile[tile++].v_off = v_off;
+
+                       dev_dbg(priv->ipu->dev,
+                               "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
+                               chan->ic_task, ctx,
+                               image->type == IMAGE_CONVERT_IN ?
+                               "Input" : "Output", row, col,
+                               y_off, u_off, v_off);
+               }
+       }
+}
+
+static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
+                                    struct ipu_image_convert_image *image)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       const struct ipu_image_pixfmt *fmt = image->fmt;
+       unsigned int row, col, tile = 0;
+       u32 w, h, bpp, stride;
+       u32 row_off, col_off;
+
+       /* setup some convenience vars */
+       stride = image->stride;
+       bpp = fmt->bpp;
+
+       for (row = 0; row < image->num_rows; row++) {
+               w = image->tile[tile].width;
+               h = image->tile[tile].height;
+               row_off = row * h * stride;
+
+               for (col = 0; col < image->num_cols; col++) {
+                       col_off = (col * w * bpp) >> 3;
+
+                       image->tile[tile].offset = row_off + col_off;
+                       image->tile[tile].u_off = 0;
+                       image->tile[tile++].v_off = 0;
+
+                       dev_dbg(priv->ipu->dev,
+                               "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
+                               chan->ic_task, ctx,
+                               image->type == IMAGE_CONVERT_IN ?
+                               "Input" : "Output", row, col,
+                               row_off + col_off);
+               }
+       }
+}
+
+static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
+                             struct ipu_image_convert_image *image)
+{
+       if (image->fmt->planar)
+               calc_tile_offsets_planar(ctx, image);
+       else
+               calc_tile_offsets_packed(ctx, image);
+}
+
+/*
+ * return the number of runs in given queue (pending_q or done_q)
+ * for this context. hold irqlock when calling.
+ */
+static int get_run_count(struct ipu_image_convert_ctx *ctx,
+                        struct list_head *q)
+{
+       struct ipu_image_convert_run *run;
+       int count = 0;
+
+       lockdep_assert_held(&ctx->chan->irqlock);
+
+       list_for_each_entry(run, q, list) {
+               if (run->ctx == ctx)
+                       count++;
+       }
+
+       return count;
+}
+
+static void convert_stop(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_ctx *ctx = run->ctx;
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
+               __func__, chan->ic_task, ctx, run);
+
+       /* disable IC tasks and the channels */
+       ipu_ic_task_disable(chan->ic);
+       ipu_idmac_disable_channel(chan->in_chan);
+       ipu_idmac_disable_channel(chan->out_chan);
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               ipu_idmac_disable_channel(chan->rotation_in_chan);
+               ipu_idmac_disable_channel(chan->rotation_out_chan);
+               ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
+       }
+
+       ipu_ic_disable(chan->ic);
+}
+
+static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
+                              struct ipuv3_channel *channel,
+                              struct ipu_image_convert_image *image,
+                              enum ipu_rotate_mode rot_mode,
+                              bool rot_swap_width_height)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       unsigned int burst_size;
+       u32 width, height, stride;
+       dma_addr_t addr0, addr1 = 0;
+       struct ipu_image tile_image;
+       unsigned int tile_idx[2];
+
+       if (image->type == IMAGE_CONVERT_OUT) {
+               tile_idx[0] = ctx->out_tile_map[0];
+               tile_idx[1] = ctx->out_tile_map[1];
+       } else {
+               tile_idx[0] = 0;
+               tile_idx[1] = 1;
+       }
+
+       if (rot_swap_width_height) {
+               width = image->tile[0].height;
+               height = image->tile[0].width;
+               stride = image->tile[0].rot_stride;
+               addr0 = ctx->rot_intermediate[0].phys;
+               if (ctx->double_buffering)
+                       addr1 = ctx->rot_intermediate[1].phys;
+       } else {
+               width = image->tile[0].width;
+               height = image->tile[0].height;
+               stride = image->stride;
+               addr0 = image->base.phys0 +
+                       image->tile[tile_idx[0]].offset;
+               if (ctx->double_buffering)
+                       addr1 = image->base.phys0 +
+                               image->tile[tile_idx[1]].offset;
+       }
+
+       ipu_cpmem_zero(channel);
+
+       memset(&tile_image, 0, sizeof(tile_image));
+       tile_image.pix.width = tile_image.rect.width = width;
+       tile_image.pix.height = tile_image.rect.height = height;
+       tile_image.pix.bytesperline = stride;
+       tile_image.pix.pixelformat =  image->fmt->fourcc;
+       tile_image.phys0 = addr0;
+       tile_image.phys1 = addr1;
+       ipu_cpmem_set_image(channel, &tile_image);
+
+       if (image->fmt->planar && !rot_swap_width_height)
+               ipu_cpmem_set_uv_offset(channel,
+                                       image->tile[tile_idx[0]].u_off,
+                                       image->tile[tile_idx[0]].v_off);
+
+       if (rot_mode)
+               ipu_cpmem_set_rotation(channel, rot_mode);
+
+       if (channel == chan->rotation_in_chan ||
+           channel == chan->rotation_out_chan) {
+               burst_size = 8;
+               ipu_cpmem_set_block_mode(channel);
+       } else
+               burst_size = (width % 16) ? 8 : 16;
+
+       ipu_cpmem_set_burstsize(channel, burst_size);
+
+       ipu_ic_task_idma_init(chan->ic, channel, width, height,
+                             burst_size, rot_mode);
+
+       ipu_cpmem_set_axi_id(channel, 1);
+
+       ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
+}
+
+static int convert_start(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_ctx *ctx = run->ctx;
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_image *s_image = &ctx->in;
+       struct ipu_image_convert_image *d_image = &ctx->out;
+       enum ipu_color_space src_cs, dest_cs;
+       unsigned int dest_width, dest_height;
+       int ret;
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
+               __func__, chan->ic_task, ctx, run);
+
+       src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
+       dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               /* swap width/height for resizer */
+               dest_width = d_image->tile[0].height;
+               dest_height = d_image->tile[0].width;
+       } else {
+               dest_width = d_image->tile[0].width;
+               dest_height = d_image->tile[0].height;
+       }
+
+       /* setup the IC resizer and CSC */
+       ret = ipu_ic_task_init(chan->ic,
+                              s_image->tile[0].width,
+                              s_image->tile[0].height,
+                              dest_width,
+                              dest_height,
+                              src_cs, dest_cs);
+       if (ret) {
+               dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
+               return ret;
+       }
+
+       /* init the source MEM-->IC PP IDMAC channel */
+       init_idmac_channel(ctx, chan->in_chan, s_image,
+                          IPU_ROTATE_NONE, false);
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               /* init the IC PP-->MEM IDMAC channel */
+               init_idmac_channel(ctx, chan->out_chan, d_image,
+                                  IPU_ROTATE_NONE, true);
+
+               /* init the MEM-->IC PP ROT IDMAC channel */
+               init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
+                                  ctx->rot_mode, true);
+
+               /* init the destination IC PP ROT-->MEM IDMAC channel */
+               init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
+                                  IPU_ROTATE_NONE, false);
+
+               /* now link IC PP-->MEM to MEM-->IC PP ROT */
+               ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
+       } else {
+               /* init the destination IC PP-->MEM IDMAC channel */
+               init_idmac_channel(ctx, chan->out_chan, d_image,
+                                  ctx->rot_mode, false);
+       }
+
+       /* enable the IC */
+       ipu_ic_enable(chan->ic);
+
+       /* set buffers ready */
+       ipu_idmac_select_buffer(chan->in_chan, 0);
+       ipu_idmac_select_buffer(chan->out_chan, 0);
+       if (ipu_rot_mode_is_irt(ctx->rot_mode))
+               ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
+       if (ctx->double_buffering) {
+               ipu_idmac_select_buffer(chan->in_chan, 1);
+               ipu_idmac_select_buffer(chan->out_chan, 1);
+               if (ipu_rot_mode_is_irt(ctx->rot_mode))
+                       ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
+       }
+
+       /* enable the channels! */
+       ipu_idmac_enable_channel(chan->in_chan);
+       ipu_idmac_enable_channel(chan->out_chan);
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               ipu_idmac_enable_channel(chan->rotation_in_chan);
+               ipu_idmac_enable_channel(chan->rotation_out_chan);
+       }
+
+       ipu_ic_task_enable(chan->ic);
+
+       ipu_cpmem_dump(chan->in_chan);
+       ipu_cpmem_dump(chan->out_chan);
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               ipu_cpmem_dump(chan->rotation_in_chan);
+               ipu_cpmem_dump(chan->rotation_out_chan);
+       }
+
+       ipu_dump(priv->ipu);
+
+       return 0;
+}
+
+/* hold irqlock when calling */
+static int do_run(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_ctx *ctx = run->ctx;
+       struct ipu_image_convert_chan *chan = ctx->chan;
+
+       lockdep_assert_held(&chan->irqlock);
+
+       ctx->in.base.phys0 = run->in_phys;
+       ctx->out.base.phys0 = run->out_phys;
+
+       ctx->cur_buf_num = 0;
+       ctx->next_tile = 1;
+
+       /* remove run from pending_q and set as current */
+       list_del(&run->list);
+       chan->current_run = run;
+
+       return convert_start(run);
+}
+
+/* hold irqlock when calling */
+static void run_next(struct ipu_image_convert_chan *chan)
+{
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_run *run, *tmp;
+       int ret;
+
+       lockdep_assert_held(&chan->irqlock);
+
+       list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+               /* skip contexts that are aborting */
+               if (run->ctx->aborting) {
+                       dev_dbg(priv->ipu->dev,
+                               "%s: task %u: skipping aborting ctx %p run %p\n",
+                               __func__, chan->ic_task, run->ctx, run);
+                       continue;
+               }
+
+               ret = do_run(run);
+               if (!ret)
+                       break;
+
+               /*
+                * something went wrong with start, add the run
+                * to done q and continue to the next run in the
+                * pending q.
+                */
+               run->status = ret;
+               list_add_tail(&run->list, &chan->done_q);
+               chan->current_run = NULL;
+       }
+}
+
+static void empty_done_q(struct ipu_image_convert_chan *chan)
+{
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_run *run;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       while (!list_empty(&chan->done_q)) {
+               run = list_entry(chan->done_q.next,
+                                struct ipu_image_convert_run,
+                                list);
+
+               list_del(&run->list);
+
+               dev_dbg(priv->ipu->dev,
+                       "%s: task %u: completing ctx %p run %p with %d\n",
+                       __func__, chan->ic_task, run->ctx, run, run->status);
+
+               /* call the completion callback and free the run */
+               spin_unlock_irqrestore(&chan->irqlock, flags);
+               run->ctx->complete(run, run->ctx->complete_context);
+               spin_lock_irqsave(&chan->irqlock, flags);
+       }
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+}
+
+/*
+ * the bottom half thread clears out the done_q, calling the
+ * completion handler for each.
+ */
+static irqreturn_t do_bh(int irq, void *dev_id)
+{
+       struct ipu_image_convert_chan *chan = dev_id;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_ctx *ctx;
+       unsigned long flags;
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
+               chan->ic_task);
+
+       empty_done_q(chan);
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       /*
+        * the done_q is cleared out, signal any contexts
+        * that are aborting that abort can complete.
+        */
+       list_for_each_entry(ctx, &chan->ctx_list, list) {
+               if (ctx->aborting) {
+                       dev_dbg(priv->ipu->dev,
+                               "%s: task %u: signaling abort for ctx %p\n",
+                               __func__, chan->ic_task, ctx);
+                       complete(&ctx->aborted);
+               }
+       }
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
+               chan->ic_task);
+
+       return IRQ_HANDLED;
+}
+
+/* hold irqlock when calling */
+static irqreturn_t do_irq(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_ctx *ctx = run->ctx;
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_tile *src_tile, *dst_tile;
+       struct ipu_image_convert_image *s_image = &ctx->in;
+       struct ipu_image_convert_image *d_image = &ctx->out;
+       struct ipuv3_channel *outch;
+       unsigned int dst_idx;
+
+       lockdep_assert_held(&chan->irqlock);
+
+       outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
+               chan->rotation_out_chan : chan->out_chan;
+
+       /*
+        * It is difficult to stop the channel DMA before the channels
+        * enter the paused state. Without double-buffering the channels
+        * are always in a paused state when the EOF irq occurs, so it
+        * is safe to stop the channels now. For double-buffering we
+        * just ignore the abort until the operation completes, when it
+        * is safe to shut down.
+        */
+       if (ctx->aborting && !ctx->double_buffering) {
+               convert_stop(run);
+               run->status = -EIO;
+               goto done;
+       }
+
+       if (ctx->next_tile == ctx->num_tiles) {
+               /*
+                * the conversion is complete
+                */
+               convert_stop(run);
+               run->status = 0;
+               goto done;
+       }
+
+       /*
+        * not done, place the next tile buffers.
+        */
+       if (!ctx->double_buffering) {
+
+               src_tile = &s_image->tile[ctx->next_tile];
+               dst_idx = ctx->out_tile_map[ctx->next_tile];
+               dst_tile = &d_image->tile[dst_idx];
+
+               ipu_cpmem_set_buffer(chan->in_chan, 0,
+                                    s_image->base.phys0 + src_tile->offset);
+               ipu_cpmem_set_buffer(outch, 0,
+                                    d_image->base.phys0 + dst_tile->offset);
+               if (s_image->fmt->planar)
+                       ipu_cpmem_set_uv_offset(chan->in_chan,
+                                               src_tile->u_off,
+                                               src_tile->v_off);
+               if (d_image->fmt->planar)
+                       ipu_cpmem_set_uv_offset(outch,
+                                               dst_tile->u_off,
+                                               dst_tile->v_off);
+
+               ipu_idmac_select_buffer(chan->in_chan, 0);
+               ipu_idmac_select_buffer(outch, 0);
+
+       } else if (ctx->next_tile < ctx->num_tiles - 1) {
+
+               src_tile = &s_image->tile[ctx->next_tile + 1];
+               dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
+               dst_tile = &d_image->tile[dst_idx];
+
+               ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
+                                    s_image->base.phys0 + src_tile->offset);
+               ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
+                                    d_image->base.phys0 + dst_tile->offset);
+
+               ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
+               ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
+
+               ctx->cur_buf_num ^= 1;
+       }
+
+       ctx->next_tile++;
+       return IRQ_HANDLED;
+done:
+       list_add_tail(&run->list, &chan->done_q);
+       chan->current_run = NULL;
+       run_next(chan);
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t norotate_irq(int irq, void *data)
+{
+       struct ipu_image_convert_chan *chan = data;
+       struct ipu_image_convert_ctx *ctx;
+       struct ipu_image_convert_run *run;
+       unsigned long flags;
+       irqreturn_t ret;
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       /* get current run and its context */
+       run = chan->current_run;
+       if (!run) {
+               ret = IRQ_NONE;
+               goto out;
+       }
+
+       ctx = run->ctx;
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               /* this is a rotation operation, just ignore */
+               spin_unlock_irqrestore(&chan->irqlock, flags);
+               return IRQ_HANDLED;
+       }
+
+       ret = do_irq(run);
+out:
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+       return ret;
+}
+
+static irqreturn_t rotate_irq(int irq, void *data)
+{
+       struct ipu_image_convert_chan *chan = data;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_ctx *ctx;
+       struct ipu_image_convert_run *run;
+       unsigned long flags;
+       irqreturn_t ret;
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       /* get current run and its context */
+       run = chan->current_run;
+       if (!run) {
+               ret = IRQ_NONE;
+               goto out;
+       }
+
+       ctx = run->ctx;
+
+       if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               /* this was NOT a rotation operation, shouldn't happen */
+               dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
+               spin_unlock_irqrestore(&chan->irqlock, flags);
+               return IRQ_HANDLED;
+       }
+
+       ret = do_irq(run);
+out:
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+       return ret;
+}
+
+/*
+ * try to force the completion of runs for this ctx. Called when
+ * abort wait times out in ipu_image_convert_abort().
+ */
+static void force_abort(struct ipu_image_convert_ctx *ctx)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_run *run;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       run = chan->current_run;
+       if (run && run->ctx == ctx) {
+               convert_stop(run);
+               run->status = -EIO;
+               list_add_tail(&run->list, &chan->done_q);
+               chan->current_run = NULL;
+               run_next(chan);
+       }
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       empty_done_q(chan);
+}
+
+static void release_ipu_resources(struct ipu_image_convert_chan *chan)
+{
+       if (chan->out_eof_irq >= 0)
+               free_irq(chan->out_eof_irq, chan);
+       if (chan->rot_out_eof_irq >= 0)
+               free_irq(chan->rot_out_eof_irq, chan);
+
+       if (!IS_ERR_OR_NULL(chan->in_chan))
+               ipu_idmac_put(chan->in_chan);
+       if (!IS_ERR_OR_NULL(chan->out_chan))
+               ipu_idmac_put(chan->out_chan);
+       if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
+               ipu_idmac_put(chan->rotation_in_chan);
+       if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
+               ipu_idmac_put(chan->rotation_out_chan);
+       if (!IS_ERR_OR_NULL(chan->ic))
+               ipu_ic_put(chan->ic);
+
+       chan->in_chan = chan->out_chan = chan->rotation_in_chan =
+               chan->rotation_out_chan = NULL;
+       chan->out_eof_irq = chan->rot_out_eof_irq = -1;
+}
+
+static int get_ipu_resources(struct ipu_image_convert_chan *chan)
+{
+       const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       int ret;
+
+       /* get IC */
+       chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
+       if (IS_ERR(chan->ic)) {
+               dev_err(priv->ipu->dev, "could not acquire IC\n");
+               ret = PTR_ERR(chan->ic);
+               goto err;
+       }
+
+       /* get IDMAC channels */
+       chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
+       chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
+       if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
+               dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
+               ret = -EBUSY;
+               goto err;
+       }
+
+       chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
+       chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
+       if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
+               dev_err(priv->ipu->dev,
+                       "could not acquire idmac rotation channels\n");
+               ret = -EBUSY;
+               goto err;
+       }
+
+       /* acquire the EOF interrupts */
+       chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+                                                 chan->out_chan,
+                                                 IPU_IRQ_EOF);
+
+       ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
+                                  0, "ipu-ic", chan);
+       if (ret < 0) {
+               dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+                        chan->out_eof_irq);
+               chan->out_eof_irq = -1;
+               goto err;
+       }
+
+       chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+                                                    chan->rotation_out_chan,
+                                                    IPU_IRQ_EOF);
+
+       ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
+                                  0, "ipu-ic", chan);
+       if (ret < 0) {
+               dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+                       chan->rot_out_eof_irq);
+               chan->rot_out_eof_irq = -1;
+               goto err;
+       }
+
+       return 0;
+err:
+       release_ipu_resources(chan);
+       return ret;
+}
+
+static int fill_image(struct ipu_image_convert_ctx *ctx,
+                     struct ipu_image_convert_image *ic_image,
+                     struct ipu_image *image,
+                     enum ipu_image_convert_type type)
+{
+       struct ipu_image_convert_priv *priv = ctx->chan->priv;
+
+       ic_image->base = *image;
+       ic_image->type = type;
+
+       ic_image->fmt = get_format(image->pix.pixelformat);
+       if (!ic_image->fmt) {
+               dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
+                       type == IMAGE_CONVERT_OUT ? "Output" : "Input");
+               return -EINVAL;
+       }
+
+       if (ic_image->fmt->planar)
+               ic_image->stride = ic_image->base.pix.width;
+       else
+               ic_image->stride  = ic_image->base.pix.bytesperline;
+
+       calc_tile_dimensions(ctx, ic_image);
+       calc_tile_offsets(ctx, ic_image);
+
+       return 0;
+}
+
+/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+                               unsigned int max, unsigned int align)
+{
+       /* Bits that must be zero to be aligned */
+       unsigned int mask = ~((1 << align) - 1);
+
+       /* Clamp to aligned min and max */
+       x = clamp(x, (min + ~mask) & mask, max & mask);
+
+       /* Round to nearest aligned value */
+       if (align)
+               x = (x + (1 << (align - 1))) & mask;
+
+       return x;
+}
+
+/*
+ * We have to adjust the tile width such that the tile physaddrs and
+ * U and V plane offsets are multiples of 8 bytes as required by
+ * the IPU DMA Controller. For the planar formats, this corresponds
+ * to a pixel alignment of 16 (but use a more formal equation since
+ * the variables are available). For all the packed formats, 8 is
+ * good enough.
+ */
+static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
+{
+       return fmt->planar ? 8 * fmt->uv_width_dec : 8;
+}
+
+/*
+ * For tile height alignment, we have to ensure that the output tile
+ * heights are multiples of 8 lines if the IRT is required by the
+ * given rotation mode (the IRT performs rotations on 8x8 blocks
+ * at a time). If the IRT is not used, or for input image tiles,
+ * 2 lines are good enough.
+ */
+static inline u32 tile_height_align(enum ipu_image_convert_type type,
+                                   enum ipu_rotate_mode rot_mode)
+{
+       return (type == IMAGE_CONVERT_OUT &&
+               ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
+}
+
+/* Adjusts input/output images to IPU restrictions */
+void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+                             enum ipu_rotate_mode rot_mode)
+{
+       const struct ipu_image_pixfmt *infmt, *outfmt;
+       unsigned int num_in_rows, num_in_cols;
+       unsigned int num_out_rows, num_out_cols;
+       u32 w_align, h_align;
+
+       infmt = get_format(in->pix.pixelformat);
+       outfmt = get_format(out->pix.pixelformat);
+
+       /* set some default pixel formats if needed */
+       if (!infmt) {
+               in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+               infmt = get_format(V4L2_PIX_FMT_RGB24);
+       }
+       if (!outfmt) {
+               out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+               outfmt = get_format(V4L2_PIX_FMT_RGB24);
+       }
+
+       /* image converter does not handle fields */
+       in->pix.field = out->pix.field = V4L2_FIELD_NONE;
+
+       /* resizer cannot downsize more than 4:1 */
+       if (ipu_rot_mode_is_irt(rot_mode)) {
+               out->pix.height = max_t(__u32, out->pix.height,
+                                       in->pix.width / 4);
+               out->pix.width = max_t(__u32, out->pix.width,
+                                      in->pix.height / 4);
+       } else {
+               out->pix.width = max_t(__u32, out->pix.width,
+                                      in->pix.width / 4);
+               out->pix.height = max_t(__u32, out->pix.height,
+                                       in->pix.height / 4);
+       }
+
+       /* get tiling rows/cols from output format */
+       num_out_rows = num_stripes(out->pix.height);
+       num_out_cols = num_stripes(out->pix.width);
+       if (ipu_rot_mode_is_irt(rot_mode)) {
+               num_in_rows = num_out_cols;
+               num_in_cols = num_out_rows;
+       } else {
+               num_in_rows = num_out_rows;
+               num_in_cols = num_out_cols;
+       }
+
+       /* align input width/height */
+       w_align = ilog2(tile_width_align(infmt) * num_in_cols);
+       h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
+                       num_in_rows);
+       in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
+       in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
+
+       /* align output width/height */
+       w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
+       h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
+                       num_out_rows);
+       out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
+       out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
+
+       /* set input/output strides and image sizes */
+       in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
+       in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
+       out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
+       out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
+
+/*
+ * this is used by ipu_image_convert_prepare() to verify set input and
+ * output images are valid before starting the conversion. Clients can
+ * also call it before calling ipu_image_convert_prepare().
+ */
+int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+                            enum ipu_rotate_mode rot_mode)
+{
+       struct ipu_image testin, testout;
+
+       testin = *in;
+       testout = *out;
+
+       ipu_image_convert_adjust(&testin, &testout, rot_mode);
+
+       if (testin.pix.width != in->pix.width ||
+           testin.pix.height != in->pix.height ||
+           testout.pix.width != out->pix.width ||
+           testout.pix.height != out->pix.height)
+               return -EINVAL;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
+
+/*
+ * Call ipu_image_convert_prepare() to prepare for the conversion of
+ * given images and rotation mode. Returns a new conversion context.
+ */
+struct ipu_image_convert_ctx *
+ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                         struct ipu_image *in, struct ipu_image *out,
+                         enum ipu_rotate_mode rot_mode,
+                         ipu_image_convert_cb_t complete,
+                         void *complete_context)
+{
+       struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
+       struct ipu_image_convert_image *s_image, *d_image;
+       struct ipu_image_convert_chan *chan;
+       struct ipu_image_convert_ctx *ctx;
+       unsigned long flags;
+       bool get_res;
+       int ret;
+
+       if (!in || !out || !complete ||
+           (ic_task != IC_TASK_VIEWFINDER &&
+            ic_task != IC_TASK_POST_PROCESSOR))
+               return ERR_PTR(-EINVAL);
+
+       /* verify the in/out images before continuing */
+       ret = ipu_image_convert_verify(in, out, rot_mode);
+       if (ret) {
+               dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
+                       __func__);
+               return ERR_PTR(ret);
+       }
+
+       chan = &priv->chan[ic_task];
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return ERR_PTR(-ENOMEM);
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
+               chan->ic_task, ctx);
+
+       ctx->chan = chan;
+       init_completion(&ctx->aborted);
+
+       s_image = &ctx->in;
+       d_image = &ctx->out;
+
+       /* set tiling and rotation */
+       d_image->num_rows = num_stripes(out->pix.height);
+       d_image->num_cols = num_stripes(out->pix.width);
+       if (ipu_rot_mode_is_irt(rot_mode)) {
+               s_image->num_rows = d_image->num_cols;
+               s_image->num_cols = d_image->num_rows;
+       } else {
+               s_image->num_rows = d_image->num_rows;
+               s_image->num_cols = d_image->num_cols;
+       }
+
+       ctx->num_tiles = d_image->num_cols * d_image->num_rows;
+       ctx->rot_mode = rot_mode;
+
+       ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
+       if (ret)
+               goto out_free;
+       ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
+       if (ret)
+               goto out_free;
+
+       calc_out_tile_map(ctx);
+
+       dump_format(ctx, s_image);
+       dump_format(ctx, d_image);
+
+       ctx->complete = complete;
+       ctx->complete_context = complete_context;
+
+       /*
+        * Can we use double-buffering for this operation? If there is
+        * only one tile (the whole image can be converted in a single
+        * operation) there's no point in using double-buffering. Also,
+        * the IPU's IDMAC channels allow only a single U and V plane
+        * offset shared between both buffers, but these offsets change
+        * for every tile, and therefore would have to be updated for
+        * each buffer which is not possible. So double-buffering is
+        * impossible when either the source or destination images are
+        * a planar format (YUV420, YUV422P, etc.).
+        */
+       ctx->double_buffering = (ctx->num_tiles > 1 &&
+                                !s_image->fmt->planar &&
+                                !d_image->fmt->planar);
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
+                                   d_image->tile[0].size);
+               if (ret)
+                       goto out_free;
+               if (ctx->double_buffering) {
+                       ret = alloc_dma_buf(priv,
+                                           &ctx->rot_intermediate[1],
+                                           d_image->tile[0].size);
+                       if (ret)
+                               goto out_free_dmabuf0;
+               }
+       }
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       get_res = list_empty(&chan->ctx_list);
+
+       list_add_tail(&ctx->list, &chan->ctx_list);
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       if (get_res) {
+               ret = get_ipu_resources(chan);
+               if (ret)
+                       goto out_free_dmabuf1;
+       }
+
+       return ctx;
+
+out_free_dmabuf1:
+       free_dma_buf(priv, &ctx->rot_intermediate[1]);
+       spin_lock_irqsave(&chan->irqlock, flags);
+       list_del(&ctx->list);
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+out_free_dmabuf0:
+       free_dma_buf(priv, &ctx->rot_intermediate[0]);
+out_free:
+       kfree(ctx);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
+
+/*
+ * Carry out a single image conversion run. Only the physaddr's of the input
+ * and output image buffers are needed. The conversion context must have
+ * been created previously with ipu_image_convert_prepare().
+ */
+int ipu_image_convert_queue(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_chan *chan;
+       struct ipu_image_convert_priv *priv;
+       struct ipu_image_convert_ctx *ctx;
+       unsigned long flags;
+       int ret = 0;
+
+       if (!run || !run->ctx || !run->in_phys || !run->out_phys)
+               return -EINVAL;
+
+       ctx = run->ctx;
+       chan = ctx->chan;
+       priv = chan->priv;
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
+               chan->ic_task, ctx, run);
+
+       INIT_LIST_HEAD(&run->list);
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       if (ctx->aborting) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       list_add_tail(&run->list, &chan->pending_q);
+
+       if (!chan->current_run) {
+               ret = do_run(run);
+               if (ret)
+                       chan->current_run = NULL;
+       }
+unlock:
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
+
+/* Abort any active or pending conversions for this context */
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_run *run, *active_run, *tmp;
+       unsigned long flags;
+       int run_count, ret;
+       bool need_abort;
+
+       reinit_completion(&ctx->aborted);
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       /* move all remaining pending runs in this context to done_q */
+       list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+               if (run->ctx != ctx)
+                       continue;
+               run->status = -EIO;
+               list_move_tail(&run->list, &chan->done_q);
+       }
+
+       run_count = get_run_count(ctx, &chan->done_q);
+       active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
+               chan->current_run : NULL;
+
+       need_abort = (run_count || active_run);
+
+       ctx->aborting = need_abort;
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       if (!need_abort) {
+               dev_dbg(priv->ipu->dev,
+                       "%s: task %u: no abort needed for ctx %p\n",
+                       __func__, chan->ic_task, ctx);
+               return;
+       }
+
+       dev_dbg(priv->ipu->dev,
+               "%s: task %u: wait for completion: %d runs, active run %p\n",
+               __func__, chan->ic_task, run_count, active_run);
+
+       ret = wait_for_completion_timeout(&ctx->aborted,
+                                         msecs_to_jiffies(10000));
+       if (ret == 0) {
+               dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
+               force_abort(ctx);
+       }
+
+       ctx->aborting = false;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
+
+/* Unprepare image conversion context */
+void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       unsigned long flags;
+       bool put_res;
+
+       /* make sure no runs are hanging around */
+       ipu_image_convert_abort(ctx);
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
+               chan->ic_task, ctx);
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       list_del(&ctx->list);
+
+       put_res = list_empty(&chan->ctx_list);
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       if (put_res)
+               release_ipu_resources(chan);
+
+       free_dma_buf(priv, &ctx->rot_intermediate[1]);
+       free_dma_buf(priv, &ctx->rot_intermediate[0]);
+
+       kfree(ctx);
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
+
+/*
+ * "Canned" asynchronous single image conversion. Allocates and returns
+ * a new conversion run.  On successful return the caller must free the
+ * run and call ipu_image_convert_unprepare() after conversion completes.
+ */
+struct ipu_image_convert_run *
+ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                 struct ipu_image *in, struct ipu_image *out,
+                 enum ipu_rotate_mode rot_mode,
+                 ipu_image_convert_cb_t complete,
+                 void *complete_context)
+{
+       struct ipu_image_convert_ctx *ctx;
+       struct ipu_image_convert_run *run;
+       int ret;
+
+       ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
+                                       complete, complete_context);
+       if (IS_ERR(ctx))
+               return ERR_PTR(PTR_ERR(ctx));
+
+       run = kzalloc(sizeof(*run), GFP_KERNEL);
+       if (!run) {
+               ipu_image_convert_unprepare(ctx);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       run->ctx = ctx;
+       run->in_phys = in->phys0;
+       run->out_phys = out->phys0;
+
+       ret = ipu_image_convert_queue(run);
+       if (ret) {
+               ipu_image_convert_unprepare(ctx);
+               kfree(run);
+               return ERR_PTR(ret);
+       }
+
+       return run;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert);
+
+/* "Canned" synchronous single image conversion */
+static void image_convert_sync_complete(struct ipu_image_convert_run *run,
+                                       void *data)
+{
+       struct completion *comp = data;
+
+       complete(comp);
+}
+
+int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                          struct ipu_image *in, struct ipu_image *out,
+                          enum ipu_rotate_mode rot_mode)
+{
+       struct ipu_image_convert_run *run;
+       struct completion comp;
+       int ret;
+
+       init_completion(&comp);
+
+       run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
+                               image_convert_sync_complete, &comp);
+       if (IS_ERR(run))
+               return PTR_ERR(run);
+
+       ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
+       ret = (ret == 0) ? -ETIMEDOUT : 0;
+
+       ipu_image_convert_unprepare(run->ctx);
+       kfree(run);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
+
+int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
+{
+       struct ipu_image_convert_priv *priv;
+       int i;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       ipu->image_convert_priv = priv;
+       priv->ipu = ipu;
+
+       for (i = 0; i < IC_NUM_TASKS; i++) {
+               struct ipu_image_convert_chan *chan = &priv->chan[i];
+
+               chan->ic_task = i;
+               chan->priv = priv;
+               chan->dma_ch = &image_convert_dma_chan[i];
+               chan->out_eof_irq = -1;
+               chan->rot_out_eof_irq = -1;
+
+               spin_lock_init(&chan->irqlock);
+               INIT_LIST_HEAD(&chan->ctx_list);
+               INIT_LIST_HEAD(&chan->pending_q);
+               INIT_LIST_HEAD(&chan->done_q);
+       }
+
+       return 0;
+}
+
+void ipu_image_convert_exit(struct ipu_soc *ipu)
+{
+}
index bfb1e8a4483fdbf7afbcc15311b90e3be986ffb1..22e47b68b14a230a6ad00814a827e4d3e4d11a51 100644 (file)
@@ -75,6 +75,33 @@ struct ipu_soc;
 #define IPU_INT_CTRL(n)                IPU_CM_REG(0x003C + 4 * (n))
 #define IPU_INT_STAT(n)                IPU_CM_REG(0x0200 + 4 * (n))
 
+/* FS_PROC_FLOW1 */
+#define FS_PRPENC_ROT_SRC_SEL_MASK     (0xf << 0)
+#define FS_PRPENC_ROT_SRC_SEL_ENC              (0x7 << 0)
+#define FS_PRPVF_ROT_SRC_SEL_MASK      (0xf << 8)
+#define FS_PRPVF_ROT_SRC_SEL_VF                        (0x8 << 8)
+#define FS_PP_SRC_SEL_MASK             (0xf << 12)
+#define FS_PP_ROT_SRC_SEL_MASK         (0xf << 16)
+#define FS_PP_ROT_SRC_SEL_PP                   (0x5 << 16)
+#define FS_VDI1_SRC_SEL_MASK           (0x3 << 20)
+#define FS_VDI3_SRC_SEL_MASK           (0x3 << 20)
+#define FS_PRP_SRC_SEL_MASK            (0xf << 24)
+#define FS_VDI_SRC_SEL_MASK            (0x3 << 28)
+#define FS_VDI_SRC_SEL_CSI_DIRECT              (0x1 << 28)
+#define FS_VDI_SRC_SEL_VDOA                    (0x2 << 28)
+
+/* FS_PROC_FLOW2 */
+#define FS_PRP_ENC_DEST_SEL_MASK       (0xf << 0)
+#define FS_PRP_ENC_DEST_SEL_IRT_ENC            (0x1 << 0)
+#define FS_PRPVF_DEST_SEL_MASK         (0xf << 4)
+#define FS_PRPVF_DEST_SEL_IRT_VF               (0x1 << 4)
+#define FS_PRPVF_ROT_DEST_SEL_MASK     (0xf << 8)
+#define FS_PP_DEST_SEL_MASK            (0xf << 12)
+#define FS_PP_DEST_SEL_IRT_PP                  (0x3 << 12)
+#define FS_PP_ROT_DEST_SEL_MASK                (0xf << 16)
+#define FS_PRPENC_ROT_DEST_SEL_MASK    (0xf << 20)
+#define FS_PRP_DEST_SEL_MASK           (0xf << 24)
+
 #define IPU_DI0_COUNTER_RELEASE                        (1 << 24)
 #define IPU_DI1_COUNTER_RELEASE                        (1 << 25)
 
@@ -138,6 +165,8 @@ struct ipu_dc_priv;
 struct ipu_dmfc_priv;
 struct ipu_di;
 struct ipu_ic_priv;
+struct ipu_vdi;
+struct ipu_image_convert_priv;
 struct ipu_smfc_priv;
 
 struct ipu_devtype;
@@ -152,6 +181,7 @@ struct ipu_soc {
        void __iomem            *cm_reg;
        void __iomem            *idmac_reg;
 
+       int                     id;
        int                     usecount;
 
        struct clk              *clk;
@@ -169,6 +199,8 @@ struct ipu_soc {
        struct ipu_di           *di_priv[2];
        struct ipu_csi          *csi_priv[2];
        struct ipu_ic_priv      *ic_priv;
+       struct ipu_vdi          *vdi_priv;
+       struct ipu_image_convert_priv *image_convert_priv;
        struct ipu_smfc_priv    *smfc_priv;
 };
 
@@ -199,6 +231,13 @@ int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
                unsigned long base, unsigned long tpmem_base);
 void ipu_ic_exit(struct ipu_soc *ipu);
 
+int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+                unsigned long base, u32 module);
+void ipu_vdi_exit(struct ipu_soc *ipu);
+
+int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev);
+void ipu_image_convert_exit(struct ipu_soc *ipu);
+
 int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
                unsigned long base, u32 module, struct clk *ipu_clk);
 void ipu_di_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/ipu-v3/ipu-vdi.c
new file mode 100644 (file)
index 0000000..f27bf5a
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#include <linux/io.h>
+#include "ipu-prv.h"
+
+struct ipu_vdi {
+       void __iomem *base;
+       u32 module;
+       spinlock_t lock;
+       int use_count;
+       struct ipu_soc *ipu;
+};
+
+
+/* VDI Register Offsets */
+#define VDI_FSIZE 0x0000
+#define VDI_C     0x0004
+
+/* VDI Register Fields */
+#define VDI_C_CH_420             (0 << 1)
+#define VDI_C_CH_422             (1 << 1)
+#define VDI_C_MOT_SEL_MASK       (0x3 << 2)
+#define VDI_C_MOT_SEL_FULL       (2 << 2)
+#define VDI_C_MOT_SEL_LOW        (1 << 2)
+#define VDI_C_MOT_SEL_MED        (0 << 2)
+#define VDI_C_BURST_SIZE1_4      (3 << 4)
+#define VDI_C_BURST_SIZE2_4      (3 << 8)
+#define VDI_C_BURST_SIZE3_4      (3 << 12)
+#define VDI_C_BURST_SIZE_MASK    0xF
+#define VDI_C_BURST_SIZE1_OFFSET 4
+#define VDI_C_BURST_SIZE2_OFFSET 8
+#define VDI_C_BURST_SIZE3_OFFSET 12
+#define VDI_C_VWM1_SET_1         (0 << 16)
+#define VDI_C_VWM1_SET_2         (1 << 16)
+#define VDI_C_VWM1_CLR_2         (1 << 19)
+#define VDI_C_VWM3_SET_1         (0 << 22)
+#define VDI_C_VWM3_SET_2         (1 << 22)
+#define VDI_C_VWM3_CLR_2         (1 << 25)
+#define VDI_C_TOP_FIELD_MAN_1    (1 << 30)
+#define VDI_C_TOP_FIELD_AUTO_1   (1 << 31)
+
+static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset)
+{
+       return readl(vdi->base + offset);
+}
+
+static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value,
+                                unsigned int offset)
+{
+       writel(value, vdi->base + offset);
+}
+
+void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
+{
+       bool top_field_0 = false;
+       unsigned long flags;
+       u32 reg;
+
+       switch (field) {
+       case V4L2_FIELD_INTERLACED_TB:
+       case V4L2_FIELD_SEQ_TB:
+       case V4L2_FIELD_TOP:
+               top_field_0 = true;
+               break;
+       case V4L2_FIELD_INTERLACED_BT:
+       case V4L2_FIELD_SEQ_BT:
+       case V4L2_FIELD_BOTTOM:
+               top_field_0 = false;
+               break;
+       default:
+               top_field_0 = (std & V4L2_STD_525_60) ? true : false;
+               break;
+       }
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       reg = ipu_vdi_read(vdi, VDI_C);
+       if (top_field_0)
+               reg &= ~VDI_C_TOP_FIELD_MAN_1;
+       else
+               reg |= VDI_C_TOP_FIELD_MAN_1;
+       ipu_vdi_write(vdi, reg, VDI_C);
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order);
+
+void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel)
+{
+       unsigned long flags;
+       u32 reg;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       reg = ipu_vdi_read(vdi, VDI_C);
+
+       reg &= ~VDI_C_MOT_SEL_MASK;
+
+       switch (motion_sel) {
+       case MED_MOTION:
+               reg |= VDI_C_MOT_SEL_MED;
+               break;
+       case HIGH_MOTION:
+               reg |= VDI_C_MOT_SEL_FULL;
+               break;
+       default:
+               reg |= VDI_C_MOT_SEL_LOW;
+               break;
+       }
+
+       ipu_vdi_write(vdi, reg, VDI_C);
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_set_motion);
+
+void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
+{
+       unsigned long flags;
+       u32 pixel_fmt, reg;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       reg = ((yres - 1) << 16) | (xres - 1);
+       ipu_vdi_write(vdi, reg, VDI_FSIZE);
+
+       /*
+        * Full motion, only vertical filter is used.
+        * Burst size is 4 accesses
+        */
+       if (code == MEDIA_BUS_FMT_UYVY8_2X8 ||
+           code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+           code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+           code == MEDIA_BUS_FMT_YUYV8_1X16)
+               pixel_fmt = VDI_C_CH_422;
+       else
+               pixel_fmt = VDI_C_CH_420;
+
+       reg = ipu_vdi_read(vdi, VDI_C);
+       reg |= pixel_fmt;
+       reg |= VDI_C_BURST_SIZE2_4;
+       reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2;
+       reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2;
+       ipu_vdi_write(vdi, reg, VDI_C);
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_setup);
+
+void ipu_vdi_unsetup(struct ipu_vdi *vdi)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+       ipu_vdi_write(vdi, 0, VDI_FSIZE);
+       ipu_vdi_write(vdi, 0, VDI_C);
+       spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
+
+int ipu_vdi_enable(struct ipu_vdi *vdi)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       if (!vdi->use_count)
+               ipu_module_enable(vdi->ipu, vdi->module);
+
+       vdi->use_count++;
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_enable);
+
+int ipu_vdi_disable(struct ipu_vdi *vdi)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       if (vdi->use_count) {
+               if (!--vdi->use_count)
+                       ipu_module_disable(vdi->ipu, vdi->module);
+       }
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_disable);
+
+struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu)
+{
+       return ipu->vdi_priv;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_get);
+
+void ipu_vdi_put(struct ipu_vdi *vdi)
+{
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_put);
+
+int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+                unsigned long base, u32 module)
+{
+       struct ipu_vdi *vdi;
+
+       vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL);
+       if (!vdi)
+               return -ENOMEM;
+
+       ipu->vdi_priv = vdi;
+
+       spin_lock_init(&vdi->lock);
+       vdi->module = module;
+       vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
+       if (!vdi->base)
+               return -ENOMEM;
+
+       dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base);
+       vdi->ipu = ipu;
+
+       return 0;
+}
+
+void ipu_vdi_exit(struct ipu_soc *ipu)
+{
+}
index f17cb04318337688b0d5223dd60ad4f687b0cfc5..1887f199ccb787ad4209995b637caa300e5b944e 100644 (file)
@@ -131,7 +131,24 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev)
        return NULL;
 }
 
-/* Returns the default VGA device (vgacon's babe) */
+/**
+ * vga_default_device - return the default VGA device, for vgacon
+ *
+ * This can be defined by the platform. The default implementation
+ * is rather dumb and will probably only work properly on single
+ * vga card setups and/or x86 platforms.
+ *
+ * If your VGA default device is not PCI, you'll have to return
+ * NULL here. In this case, I assume it will not conflict with
+ * any PCI card. If this is not true, I'll have to define two archs
+ * hooks for enabling/disabling the VGA default device if that is
+ * possible. This may be a problem with real _ISA_ VGA cards, in
+ * addition to a PCI one. I don't know at this point how to deal
+ * with that card. Can theirs IOs be disabled at all ? If not, then
+ * I suppose it's a matter of having the proper arch hook telling
+ * us about it, so we basically never allow anybody to succeed a
+ * vga_get()...
+ */
 struct pci_dev *vga_default_device(void)
 {
        return vga_default;
@@ -356,6 +373,40 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
                wake_up_all(&vga_wait_queue);
 }
 
+/**
+ * vga_get - acquire & locks VGA resources
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
+ * @interruptible: blocking should be interruptible by signals ?
+ *
+ * This function acquires VGA resources for the given card and mark those
+ * resources locked. If the resource requested are "normal" (and not legacy)
+ * resources, the arbiter will first check whether the card is doing legacy
+ * decoding for that type of resource. If yes, the lock is "converted" into a
+ * legacy resource lock.
+ *
+ * The arbiter will first look for all VGA cards that might conflict and disable
+ * their IOs and/or Memory access, including VGA forwarding on P2P bridges if
+ * necessary, so that the requested resources can be used. Then, the card is
+ * marked as locking these resources and the IO and/or Memory accesses are
+ * enabled on the card (including VGA forwarding on parent P2P bridges if any).
+ *
+ * This function will block if some conflicting card is already locking one of
+ * the required resources (or any resource on a different bus segment, since P2P
+ * bridges don't differentiate VGA memory and IO afaik). You can indicate
+ * whether this blocking should be interruptible by a signal (for userland
+ * interface) or not.
+ *
+ * Must not be called at interrupt time or in atomic context.  If the card
+ * already owns the resources, the function succeeds.  Nested calls are
+ * supported (a per-resource counter is maintained)
+ *
+ * On success, release the VGA resource again with vga_put().
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
 int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
 {
        struct vga_device *vgadev, *conflict;
@@ -408,6 +459,21 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
 }
 EXPORT_SYMBOL(vga_get);
 
+/**
+ * vga_tryget - try to acquire & lock legacy VGA resources
+ * @pdev: pci devivce of VGA card or NULL for system default
+ * @rsrc: bit mask of resources to acquire and lock
+ *
+ * This function performs the same operation as vga_get(), but will return an
+ * error (-EBUSY) instead of blocking if the resources are already locked by
+ * another card. It can be called in any context
+ *
+ * On success, release the VGA resource again with vga_put().
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
 int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
 {
        struct vga_device *vgadev;
@@ -435,6 +501,16 @@ bail:
 }
 EXPORT_SYMBOL(vga_tryget);
 
+/**
+ * vga_put - release lock on legacy VGA resources
+ * @pdev: pci device of VGA card or NULL for system default
+ * @rsrc: but mask of resource to release
+ *
+ * This fuction releases resources previously locked by vga_get() or
+ * vga_tryget(). The resources aren't disabled right away, so that a subsequence
+ * vga_get() on the same card will succeed immediately. Resources have a
+ * counter, so locks are only released if the counter reaches 0.
+ */
 void vga_put(struct pci_dev *pdev, unsigned int rsrc)
 {
        struct vga_device *vgadev;
@@ -716,7 +792,37 @@ void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes)
 }
 EXPORT_SYMBOL(vga_set_legacy_decoding);
 
-/* call with NULL to unregister */
+/**
+ * vga_client_register - register or unregister a VGA arbitration client
+ * @pdev: pci device of the VGA client
+ * @cookie: client cookie to be used in callbacks
+ * @irq_set_state: irq state change callback
+ * @set_vga_decode: vga decode change callback
+ *
+ * Clients have two callback mechanisms they can use.
+ *
+ * @irq_set_state callback: If a client can't disable its GPUs VGA
+ * resources, then we need to be able to ask it to turn off its irqs when we
+ * turn off its mem and io decoding.
+ *
+ * @set_vga_decode callback: If a client can disable its GPU VGA resource, it
+ * will get a callback from this to set the encode/decode state.
+ *
+ * Rationale: we cannot disable VGA decode resources unconditionally some single
+ * GPU laptops seem to require ACPI or BIOS access to the VGA registers to
+ * control things like backlights etc.  Hopefully newer multi-GPU laptops do
+ * something saner, and desktops won't have any special ACPI for this. The
+ * driver will get a callback when VGA arbitration is first used by userspace
+ * since some older X servers have issues.
+ *
+ * This function does not check whether a client for @pdev has been registered
+ * already.
+ *
+ * To unregister just call this function with @irq_set_state and @set_vga_decode
+ * both set to NULL for the same @pdev as originally used to register them.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
 int vga_client_register(struct pci_dev *pdev, void *cookie,
                        void (*irq_set_state)(void *cookie, bool state),
                        unsigned int (*set_vga_decode)(void *cookie,
index 730d840282603160ffad847fe938dc2e23319e7f..4667012b46b7fb35119fab2eaeb17b65e8afa980 100644 (file)
@@ -491,7 +491,7 @@ struct it87_sio_data {
 struct it87_data {
        const struct attribute_group *groups[7];
        enum chips type;
-       u16 features;
+       u32 features;
        u8 peci_mask;
        u8 old_peci_mask;
 
@@ -2015,6 +2015,7 @@ static struct attribute *it87_attributes_in[] = {
        &sensor_dev_attr_in10_input.dev_attr.attr,      /* 41 */
        &sensor_dev_attr_in11_input.dev_attr.attr,      /* 41 */
        &sensor_dev_attr_in12_input.dev_attr.attr,      /* 41 */
+       NULL
 };
 
 static const struct attribute_group it87_group_in = {
index f23372669f770fe81154d116843d34e1b40c9b6a..1bb97f658b47a1fa4c5b15e85779fa5160c6c3e6 100644 (file)
@@ -38,6 +38,7 @@
 #define AT91_I2C_TIMEOUT       msecs_to_jiffies(100)   /* transfer timeout */
 #define AT91_I2C_DMA_THRESHOLD 8                       /* enable DMA if transfer size is bigger than this threshold */
 #define AUTOSUSPEND_TIMEOUT            2000
+#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
 
 /* AT91 TWI register definitions */
 #define        AT91_TWI_CR             0x0000  /* Control Register */
@@ -141,6 +142,7 @@ struct at91_twi_dev {
        unsigned twi_cwgr_reg;
        struct at91_twi_pdata *pdata;
        bool use_dma;
+       bool use_alt_cmd;
        bool recv_len_abort;
        u32 fifo_size;
        struct at91_twi_dma dma;
@@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
 
        /* send stop when last byte has been written */
        if (--dev->buf_len == 0)
-               if (!dev->pdata->has_alt_cmd)
+               if (!dev->use_alt_cmd)
                        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 
        dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data)
         * we just have to enable TXCOMP one.
         */
        at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
-       if (!dev->pdata->has_alt_cmd)
+       if (!dev->use_alt_cmd)
                at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 }
 
@@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
        }
 
        /* send stop if second but last byte has been read */
-       if (!dev->pdata->has_alt_cmd && dev->buf_len == 1)
+       if (!dev->use_alt_cmd && dev->buf_len == 1)
                at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 
        dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data)
        dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
                         dev->buf_len, DMA_FROM_DEVICE);
 
-       if (!dev->pdata->has_alt_cmd) {
+       if (!dev->use_alt_cmd) {
                /* The last two bytes have to be read without using dma */
                dev->buf += dev->buf_len - 2;
                dev->buf_len = 2;
@@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
        struct dma_chan *chan_rx = dma->chan_rx;
        size_t buf_len;
 
-       buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
+       buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
        dma->direction = DMA_FROM_DEVICE;
 
        /* Keep in mind that we won't use dma to read the last two bytes */
@@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                unsigned start_flags = AT91_TWI_START;
 
                /* if only one byte is to be read, immediately stop transfer */
-               if (!has_alt_cmd && dev->buf_len <= 1 &&
+               if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
                    !(dev->msg->flags & I2C_M_RECV_LEN))
                        start_flags |= AT91_TWI_STOP;
                at91_twi_write(dev, AT91_TWI_CR, start_flags);
@@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
        int ret;
        unsigned int_addr_flag = 0;
        struct i2c_msg *m_start = msg;
-       bool is_read, use_alt_cmd = false;
+       bool is_read;
 
        dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
 
@@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
                at91_twi_write(dev, AT91_TWI_IADR, internal_address);
        }
 
+       dev->use_alt_cmd = false;
        is_read = (m_start->flags & I2C_M_RD);
        if (dev->pdata->has_alt_cmd) {
-               if (m_start->len > 0) {
+               if (m_start->len > 0 &&
+                   m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
                        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
                        at91_twi_write(dev, AT91_TWI_ACR,
                                       AT91_TWI_ACR_DATAL(m_start->len) |
                                       ((is_read) ? AT91_TWI_ACR_DIR : 0));
-                       use_alt_cmd = true;
+                       dev->use_alt_cmd = true;
                } else {
                        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
                }
@@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
        at91_twi_write(dev, AT91_TWI_MMR,
                       (m_start->addr << 16) |
                       int_addr_flag |
-                      ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
+                      ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
 
        dev->buf_len = m_start->len;
        dev->buf = m_start->buf;
index 19c843828fe2ca504d9616d3d0fcad48d089c8a1..95f7cac76f89bfeac68ad22521d8c38c2df348c8 100644 (file)
@@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
 
        if (status & BIT(IS_M_START_BUSY_SHIFT)) {
                iproc_i2c->xfer_is_done = 1;
-               complete_all(&iproc_i2c->done);
+               complete(&iproc_i2c->done);
        }
 
        writel(status, iproc_i2c->base + IS_OFFSET);
index ac9f47679c3a4b18eabfb48c76420ca5871cc34d..258cb9a40ab3491b7c872432e07a2a1a435da78b 100644 (file)
@@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid)
                       dev->base + TXFCR_OFFSET);
 
        writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET);
-       complete_all(&dev->done);
+       complete(&dev->done);
 
        return IRQ_HANDLED;
 }
@@ -643,7 +643,7 @@ static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter,
                        if (rc < 0) {
                                dev_err(dev->device,
                                        "restart cmd failed rc = %d\n", rc);
-                                       goto xfer_send_stop;
+                               goto xfer_send_stop;
                        }
                }
 
index 3f5a4d71d3bf32289997371955e6297a26dc1d20..385b57bfcb386ce8754892741584fd30a10b3b47 100644 (file)
@@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid)
                return IRQ_NONE;
 
        brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE);
-       complete_all(&dev->done);
+       complete(&dev->done);
 
        dev_dbg(dev->device, "isr handled");
        return IRQ_HANDLED;
index 90bbd9f9dd8f778422d54d1cc988b67409588f08..3c16a2f7c673cb808b4113a65d41493bd167ca89 100644 (file)
@@ -767,7 +767,7 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
  * depending on the scaling direction.
  *
  * Return:     NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- *             to acknowedge the change, NOTIFY_DONE if the notification is
+ *             to acknowledge the change, NOTIFY_DONE if the notification is
  *             considered irrelevant.
  */
 static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
index a0d95ff682ae120186a689dd0dcdd74f3d65b0f3..2d5ff86398d0911cd0c3a261fb689cf2c9e04609 100644 (file)
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
        msg->outsize = request_len;
        msg->insize = response_len;
 
-       result = cros_ec_cmd_xfer(bus->ec, msg);
+       result = cros_ec_cmd_xfer_status(bus->ec, msg);
        if (result < 0) {
                dev_err(dev, "Error transferring EC i2c message %d\n", result);
                goto exit;
index c6922b806fb72516726816214134a69120f386f2..fcd973d5131e8f767cbe2af45cfac7bad74aed24 100644 (file)
@@ -367,13 +367,17 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
 
        /* Configure SDA Hold Time if required */
-       if (dev->sda_hold_time) {
-               reg = dw_readl(dev, DW_IC_COMP_VERSION);
-               if (reg >= DW_IC_SDA_HOLD_MIN_VERS)
+       reg = dw_readl(dev, DW_IC_COMP_VERSION);
+       if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
+               if (dev->sda_hold_time) {
                        dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
-               else
-                       dev_warn(dev->dev,
-                               "Hardware too old to adjust SDA hold time.");
+               } else {
+                       /* Keep previous hold time setting if no one set it */
+                       dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
+               }
+       } else {
+               dev_warn(dev->dev,
+                       "Hardware too old to adjust SDA hold time.\n");
        }
 
        /* Configure Tx/Rx FIFO threshold levels */
index 137125b5eae77ca07fae38d52475341b01df5c76..5ce71ce7b6c43b7ce8fd7e7586127650c5e2e508 100644 (file)
@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
        /* Set the number of I2C channel instance */
        adap_info->ch_num = id->driver_data;
 
-       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
-                 KBUILD_MODNAME, adap_info);
-       if (ret) {
-               pch_pci_err(pdev, "request_irq FAILED\n");
-               goto err_request_irq;
-       }
-
        for (i = 0; i < adap_info->ch_num; i++) {
                pch_adap = &adap_info->pch_data[i].pch_adapter;
                adap_info->pch_i2c_suspended = false;
@@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
 
                pch_adap->dev.of_node = pdev->dev.of_node;
                pch_adap->dev.parent = &pdev->dev;
+       }
+
+       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+                 KBUILD_MODNAME, adap_info);
+       if (ret) {
+               pch_pci_err(pdev, "request_irq FAILED\n");
+               goto err_request_irq;
+       }
+
+       for (i = 0; i < adap_info->ch_num; i++) {
+               pch_adap = &adap_info->pch_data[i].pch_adapter;
 
                pch_i2c_init(&adap_info->pch_data[i]);
 
index 71d3929adf54ed074e1600fcfd0b844f6e6574d6..76e28980904f0b166971ffa09688f4302f067c5b 100644 (file)
@@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c)
                meson_i2c_add_token(i2c, TOKEN_STOP);
        } else {
                i2c->state = STATE_IDLE;
-               complete_all(&i2c->done);
+               complete(&i2c->done);
        }
 }
 
@@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
                dev_dbg(i2c->dev, "error bit set\n");
                i2c->error = -ENXIO;
                i2c->state = STATE_IDLE;
-               complete_all(&i2c->done);
+               complete(&i2c->done);
                goto out;
        }
 
@@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
                break;
        case STATE_STOP:
                i2c->state = STATE_IDLE;
-               complete_all(&i2c->done);
+               complete(&i2c->done);
                break;
        case STATE_IDLE:
                break;
index dfa7a4b4a91d78ec827c074089e035f973c585eb..ac88a524143e07406bc2e4837a80a8ac17a4bd91 100644 (file)
@@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
                        if (!clock_frequency_present) {
                                dev_err(&pdev->dev,
                                        "Missing required parameter 'opencores,ip-clock-frequency'\n");
+                               clk_disable_unprepare(i2c->clk);
                                return -ENODEV;
                        }
                        i2c->ip_clock_khz = clock_frequency / 1000;
@@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev)
                default:
                        dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
                                i2c->reg_io_width);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err_clk;
                }
        }
 
        ret = ocores_init(&pdev->dev, i2c);
        if (ret)
-               return ret;
+               goto err_clk;
 
        init_waitqueue_head(&i2c->wait);
        ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
                               pdev->name, i2c);
        if (ret) {
                dev_err(&pdev->dev, "Cannot claim IRQ\n");
-               return ret;
+               goto err_clk;
        }
 
        /* hook up driver to tree */
@@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
        ret = i2c_add_adapter(&i2c->adap);
        if (ret) {
                dev_err(&pdev->dev, "Failed to add adapter\n");
-               return ret;
+               goto err_clk;
        }
 
        /* add in known devices to the bus */
@@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev)
        }
 
        return 0;
+
+err_clk:
+       clk_disable_unprepare(i2c->clk);
+       return ret;
 }
 
 static int ocores_i2c_remove(struct platform_device *pdev)
index 501bd15cb78e0eddab711efcb3ea6cd86515f5bb..a8497cfdae6f40a4de6bd4c57cac4d8f3789967f 100644 (file)
@@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
 #ifdef CONFIG_PM_SLEEP
 static int qup_i2c_suspend(struct device *device)
 {
-       qup_i2c_pm_suspend_runtime(device);
+       if (!pm_runtime_suspended(device))
+               return qup_i2c_pm_suspend_runtime(device);
        return 0;
 }
 
index 52407f3c9e1cce3f9a4c0e9d010ad0dddb6eee7c..9bd849dacee8724f1cbc3607f3bca013ee91e94b 100644 (file)
@@ -378,7 +378,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
        }
 
        dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
-       if (dma_mapping_error(dev, dma_addr)) {
+       if (dma_mapping_error(chan->device->dev, dma_addr)) {
                dev_dbg(dev, "dma map failed, using PIO\n");
                return;
        }
index 2bc8b01153d61985814591a962851b19bb0bb2b1..5c5b7cada8beef28d2d9e5c94bfa5d0c355d8af0 100644 (file)
@@ -918,7 +918,7 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
  * Code adapted from i2c-cadence.c.
  *
  * Return:     NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- *             to acknowedge the change, NOTIFY_DONE if the notification is
+ *             to acknowledge the change, NOTIFY_DONE if the notification is
  *             considered irrelevant.
  */
 static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
@@ -1111,6 +1111,15 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
        return ret < 0 ? ret : num;
 }
 
+static __maybe_unused int rk3x_i2c_resume(struct device *dev)
+{
+       struct rk3x_i2c *i2c = dev_get_drvdata(dev);
+
+       rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk));
+
+       return 0;
+}
+
 static u32 rk3x_i2c_func(struct i2c_adapter *adap)
 {
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
@@ -1334,12 +1343,15 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume);
+
 static struct platform_driver rk3x_i2c_driver = {
        .probe   = rk3x_i2c_probe,
        .remove  = rk3x_i2c_remove,
        .driver  = {
                .name  = "rk3x-i2c",
                .of_match_table = rk3x_i2c_match,
+               .pm = &rk3x_i2c_pm_ops,
        },
 };
 
index 6fb3e2645992297937635c0ffd0f791ed5c92b1e..05b1eeab9cf5f6a58b8af543a2c515cf7ee07498 100644 (file)
@@ -610,7 +610,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
                return;
 
        dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
-       if (dma_mapping_error(pd->dev, dma_addr)) {
+       if (dma_mapping_error(chan->device->dev, dma_addr)) {
                dev_dbg(pd->dev, "dma map failed, using PIO\n");
                return;
        }
index 8de073aed001482461b3ad12398c00fa9417fc95..b3893f6282ba5b38920388657d5d1a70129a0148 100644 (file)
@@ -37,8 +37,6 @@ struct i2c_demux_pinctrl_priv {
        struct i2c_demux_pinctrl_chan chan[];
 };
 
-static struct property status_okay = { .name = "status", .length = 3, .value = "ok" };
-
 static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 {
        struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
@@ -68,7 +66,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
        adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
        if (!adap) {
                ret = -ENODEV;
-               goto err;
+               goto err_with_revert;
        }
 
        p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
@@ -103,8 +101,11 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
 
  err_with_put:
        i2c_put_adapter(adap);
+ err_with_revert:
+       of_changeset_revert(&priv->chan[new_chan].chgset);
  err:
        dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
+       priv->cur_chan = -EINVAL;
        return ret;
 }
 
@@ -190,6 +191,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct i2c_demux_pinctrl_priv *priv;
+       struct property *props;
        int num_chan, i, j, err;
 
        num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL);
@@ -200,7 +202,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
                           + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
-       if (!priv)
+
+       props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
+
+       if (!priv || !props)
                return -ENOMEM;
 
        err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
@@ -218,8 +223,12 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
                }
                priv->chan[i].parent_np = adap_np;
 
+               props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
+               props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+               props[i].length = 3;
+
                of_changeset_init(&priv->chan[i].chgset);
-               of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay);
+               of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]);
        }
 
        priv->num_chan = num_chan;
index 528e755c468f36a8cf7cf1f693878e4482b17a93..3278ebf1cc5ccda15321f46bd658a03f51273557 100644 (file)
@@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
        /* Only select the channel if its different from the last channel */
        if (data->last_chan != regval) {
                ret = pca954x_reg_write(muxc->parent, client, regval);
-               data->last_chan = regval;
+               data->last_chan = ret ? 0 : regval;
        }
 
        return ret;
index 89d78208de3f30d6396162a208eb0f29460087b7..78f148ea9d9f92e42164c26fe980e53871ed1815 100644 (file)
@@ -20,6 +20,8 @@ config BMA180
 config BMA220
     tristate "Bosch BMA220 3-Axis Accelerometer Driver"
        depends on SPI
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
     help
       Say yes here to add support for the Bosch BMA220 triaxial
       acceleration sensor.
@@ -234,7 +236,8 @@ config STK8312
 config STK8BA50
        tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
        depends on I2C
-       depends on IIO_TRIGGER
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
          Say yes here to get support for the Sensortek STK8BA50 3-axis
          accelerometer.
index 1098d10df8e8e2bb6e6b36530a95655ac6d40b8a..5099f295dd378db1236ae5a07c288c2932cbb309 100644 (file)
@@ -253,7 +253,7 @@ static int bma220_probe(struct spi_device *spi)
        if (ret < 0)
                return ret;
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                                         bma220_trigger_handler, NULL);
        if (ret < 0) {
                dev_err(&spi->dev, "iio triggered buffer setup failed\n");
index bf17aae6614535b0ab3d68d3bfd84aec653a2f45..59b380dbf27f918297d83bd00ae585149baba5a2 100644 (file)
@@ -67,6 +67,9 @@
 #define BMC150_ACCEL_REG_PMU_BW                0x10
 #define BMC150_ACCEL_DEF_BW                    125
 
+#define BMC150_ACCEL_REG_RESET                 0x14
+#define BMC150_ACCEL_RESET_VAL                 0xB6
+
 #define BMC150_ACCEL_REG_INT_MAP_0             0x19
 #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE       BIT(2)
 
@@ -1497,6 +1500,14 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
        int ret, i;
        unsigned int val;
 
+       /*
+        * Reset chip to get it in a known good state. A delay of 1.8ms after
+        * reset is required according to the data sheets of supported chips.
+        */
+       regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
+                    BMC150_ACCEL_RESET_VAL);
+       usleep_range(1800, 2500);
+
        ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
        if (ret < 0) {
                dev_err(dev, "Error: Reading chip id\n");
index 3a9f106787d28b2f85248402b68628fd02604460..9d72d4bcf5e9b316034219bb1b880fc55a72c076 100644 (file)
@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
                if (ret < 0)
                        goto error_ret;
                *val = ret;
+               ret = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SCALE:
                ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
                if (ret < 0)
                        goto error_ret;
+               *val = 0;
                *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
                ret = IIO_VAL_INT_PLUS_MICRO;
                break;
index 1de31bdd4ce4e2f12cda1c3e9c597df5c323038e..767577298ee3bd9f2cc06814f91a889d503aa589 100644 (file)
@@ -389,6 +389,7 @@ config QCOM_SPMI_VADC
 config ROCKCHIP_SARADC
        tristate "Rockchip SARADC driver"
        depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
+       depends on RESET_CONTROLLER
        help
          Say yes here to build support for the SARADC found in SoCs from
          Rockchip.
index b6163764489c677a2ec45dd5536464734ac641c6..9704090b79084f3206703c248aed3d774831744f 100644 (file)
@@ -527,6 +527,7 @@ static struct attribute_group ad799x_event_attrs_group = {
 static const struct iio_info ad7991_info = {
        .read_raw = &ad799x_read_raw,
        .driver_module = THIS_MODULE,
+       .update_scan_mode = ad799x_update_scan_mode,
 };
 
 static const struct iio_info ad7993_4_7_8_noirq_info = {
index 52430ba171f3893729701bbd0dd0c6cc2fb73dab..0438c68015e8fc77404633d1e6a16fa6982487e4 100644 (file)
@@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
                st->ts_bufferedmeasure = false;
                input_report_key(st->ts_input, BTN_TOUCH, 0);
                input_sync(st->ts_input);
-       } else if (status & AT91_ADC_EOC(3)) {
-               /* Conversion finished */
+       } else if (status & AT91_ADC_EOC(3) && st->ts_input) {
+               /* Conversion finished and we've a touchscreen */
                if (st->ts_bufferedmeasure) {
                        /*
                         * Last measurement is always discarded, since it can
index f9ad6c2d68219234e250dcbdeb6d5909fb06bbe1..85d701291654074e1fa07978331eb01d89abd04a 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/of_device.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
 #include <linux/regulator/consumer.h>
 #include <linux/iio/iio.h>
 
@@ -53,6 +55,7 @@ struct rockchip_saradc {
        struct clk              *clk;
        struct completion       completion;
        struct regulator        *vref;
+       struct reset_control    *reset;
        const struct rockchip_saradc_data *data;
        u16                     last_val;
 };
@@ -190,6 +193,16 @@ static const struct of_device_id rockchip_saradc_match[] = {
 };
 MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
 
+/**
+ * Reset SARADC Controller.
+ */
+static void rockchip_saradc_reset_controller(struct reset_control *reset)
+{
+       reset_control_assert(reset);
+       usleep_range(10, 20);
+       reset_control_deassert(reset);
+}
+
 static int rockchip_saradc_probe(struct platform_device *pdev)
 {
        struct rockchip_saradc *info = NULL;
@@ -218,6 +231,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
        if (IS_ERR(info->regs))
                return PTR_ERR(info->regs);
 
+       /*
+        * The reset should be an optional property, as it should work
+        * with old devicetrees as well
+        */
+       info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb");
+       if (IS_ERR(info->reset)) {
+               ret = PTR_ERR(info->reset);
+               if (ret != -ENOENT)
+                       return ret;
+
+               dev_dbg(&pdev->dev, "no reset control found\n");
+               info->reset = NULL;
+       }
+
        init_completion(&info->completion);
 
        irq = platform_get_irq(pdev, 0);
@@ -252,6 +279,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
                return PTR_ERR(info->vref);
        }
 
+       if (info->reset)
+               rockchip_saradc_reset_controller(info->reset);
+
        /*
         * Use a default value for the converter clock.
         * This may become user-configurable in the future.
index 1ef398770a1f2c2dcbc760f911b5064ea4950954..066abaf8020157f2f247dc27d4ce55aec84a362d 100644 (file)
@@ -489,7 +489,8 @@ static struct iio_info ads1115_info = {
 #ifdef CONFIG_OF
 static int ads1015_get_channels_config_of(struct i2c_client *client)
 {
-       struct ads1015_data *data = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct ads1015_data *data = iio_priv(indio_dev);
        struct device_node *node;
 
        if (!client->dev.of_node ||
index 8a368756881b82431404f86766d70b4b8acdd236..c3cfacca25410c5660aaac5a13947f770fda05c0 100644 (file)
@@ -32,6 +32,7 @@
 
 struct tiadc_device {
        struct ti_tscadc_dev *mfd_tscadc;
+       struct mutex fifo1_lock; /* to protect fifo access */
        int channels;
        u8 channel_line[8];
        u8 channel_step[8];
@@ -359,6 +360,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
                int *val, int *val2, long mask)
 {
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
+       int ret = IIO_VAL_INT;
        int i, map_val;
        unsigned int fifo1count, read, stepid;
        bool found = false;
@@ -372,13 +374,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
        if (!step_en)
                return -EINVAL;
 
+       mutex_lock(&adc_dev->fifo1_lock);
        fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
        while (fifo1count--)
                tiadc_readl(adc_dev, REG_FIFO1);
 
        am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en);
 
-       timeout = jiffies + usecs_to_jiffies
+       timeout = jiffies + msecs_to_jiffies
                                (IDLE_TIMEOUT * adc_dev->channels);
        /* Wait for Fifo threshold interrupt */
        while (1) {
@@ -388,7 +391,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
 
                if (time_after(jiffies, timeout)) {
                        am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
-                       return -EAGAIN;
+                       ret = -EAGAIN;
+                       goto err_unlock;
                }
        }
        map_val = adc_dev->channel_step[chan->scan_index];
@@ -414,8 +418,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
        am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
 
        if (found == false)
-               return -EBUSY;
-       return IIO_VAL_INT;
+               ret =  -EBUSY;
+
+err_unlock:
+       mutex_unlock(&adc_dev->fifo1_lock);
+       return ret;
 }
 
 static const struct iio_info tiadc_info = {
@@ -483,6 +490,7 @@ static int tiadc_probe(struct platform_device *pdev)
 
        tiadc_step_config(indio_dev);
        tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD);
+       mutex_init(&adc_dev->fifo1_lock);
 
        err = tiadc_channel_init(indio_dev, adc_dev->channels);
        if (err < 0)
index ae038a59d256c1c9f6ff89acb65819090e66a829..407f141a1eee9785435cddb1d27b7922434295f3 100644 (file)
@@ -434,7 +434,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
                        break;
                case IIO_ELECTRICALCONDUCTIVITY:
                        *val = 1; /* 0.00001 */
-                       *val = 100000;
+                       *val2 = 100000;
                        break;
                case IIO_CONCENTRATION:
                        *val = 0; /* 0.000000001 */
index e81f434760f4c778c604c7d2e102bfa79adef9b2..dc33c1dd5191a57aaa8c3c66cdaa75a31866463c 100644 (file)
@@ -56,8 +56,8 @@ static struct {
        {HID_USAGE_SENSOR_ALS, 0, 1, 0},
        {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
 
-       {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0},
-       {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0},
+       {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
+       {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
 };
 
 static int pow_10(unsigned power)
index 792a97164cb28e04dd25d44cdffd8541b05f19c4..bebbd00304ce8486f0e3e5d6e03340806861925f 100644 (file)
@@ -65,6 +65,16 @@ struct stx104_gpio {
        unsigned int out_state;
 };
 
+/**
+ * struct stx104_dev - STX104 device private data structure
+ * @indio_dev: IIO device
+ * @chip:      instance of the gpio_chip
+ */
+struct stx104_dev {
+       struct iio_dev *indio_dev;
+       struct gpio_chip *chip;
+};
+
 static int stx104_read_raw(struct iio_dev *indio_dev,
        struct iio_chan_spec const *chan, int *val, int *val2, long mask)
 {
@@ -107,6 +117,7 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
 static int stx104_gpio_get_direction(struct gpio_chip *chip,
        unsigned int offset)
 {
+       /* GPIO 0-3 are input only, while the rest are output only */
        if (offset < 4)
                return 1;
 
@@ -169,6 +180,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
        struct iio_dev *indio_dev;
        struct stx104_iio *priv;
        struct stx104_gpio *stx104gpio;
+       struct stx104_dev *stx104dev;
        int err;
 
        indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
@@ -179,6 +191,10 @@ static int stx104_probe(struct device *dev, unsigned int id)
        if (!stx104gpio)
                return -ENOMEM;
 
+       stx104dev = devm_kzalloc(dev, sizeof(*stx104dev), GFP_KERNEL);
+       if (!stx104dev)
+               return -ENOMEM;
+
        if (!devm_request_region(dev, base[id], STX104_EXTENT,
                dev_name(dev))) {
                dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -199,12 +215,6 @@ static int stx104_probe(struct device *dev, unsigned int id)
        outw(0, base[id] + 4);
        outw(0, base[id] + 6);
 
-       err = devm_iio_device_register(dev, indio_dev);
-       if (err) {
-               dev_err(dev, "IIO device registering failed (%d)\n", err);
-               return err;
-       }
-
        stx104gpio->chip.label = dev_name(dev);
        stx104gpio->chip.parent = dev;
        stx104gpio->chip.owner = THIS_MODULE;
@@ -220,7 +230,9 @@ static int stx104_probe(struct device *dev, unsigned int id)
 
        spin_lock_init(&stx104gpio->lock);
 
-       dev_set_drvdata(dev, stx104gpio);
+       stx104dev->indio_dev = indio_dev;
+       stx104dev->chip = &stx104gpio->chip;
+       dev_set_drvdata(dev, stx104dev);
 
        err = gpiochip_add_data(&stx104gpio->chip, stx104gpio);
        if (err) {
@@ -228,14 +240,22 @@ static int stx104_probe(struct device *dev, unsigned int id)
                return err;
        }
 
+       err = iio_device_register(indio_dev);
+       if (err) {
+               dev_err(dev, "IIO device registering failed (%d)\n", err);
+               gpiochip_remove(&stx104gpio->chip);
+               return err;
+       }
+
        return 0;
 }
 
 static int stx104_remove(struct device *dev, unsigned int id)
 {
-       struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev);
+       struct stx104_dev *const stx104dev = dev_get_drvdata(dev);
 
-       gpiochip_remove(&stx104gpio->chip);
+       iio_device_unregister(stx104dev->indio_dev);
+       gpiochip_remove(stx104dev->chip);
 
        return 0;
 }
index 738a86d9e4a9f8b17efc91a987a840169fe47f99..d04124345992c9bfcf857a17cdac19894df21a70 100644 (file)
@@ -6,6 +6,8 @@ menu "Humidity sensors"
 config AM2315
     tristate "Aosong AM2315 relative humidity and temperature sensor"
     depends on I2C
+    select IIO_BUFFER
+    select IIO_TRIGGERED_BUFFER
     help
       If you say yes here you get support for the Aosong AM2315
       relative humidity and ambient temperature sensor.
index 3e200f69e88657488a19de758c318f69ec314759..ff96b6d0fdae436f83e9620abcddcd60a958366e 100644 (file)
@@ -244,7 +244,7 @@ static int am2315_probe(struct i2c_client *client,
        indio_dev->channels = am2315_channels;
        indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                                         am2315_trigger_handler, NULL);
        if (ret < 0) {
                dev_err(&client->dev, "iio triggered buffer setup failed\n");
index a03832a5fc95aafd81b075710d32d57acf9f2677..e0c9c70c2a4ae6dcc1723abc114ad036511a0c97 100644 (file)
@@ -142,7 +142,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
        struct i2c_client *client = data->client;
        int delay = data->adc_int_us[chan->address];
        int ret;
-       int val;
+       __be16 val;
 
        /* start measurement */
        ret = i2c_smbus_write_byte(client, chan->address);
@@ -154,26 +154,13 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
        /* wait for integration time to pass */
        usleep_range(delay, delay + 1000);
 
-       /*
-        * i2c_smbus_read_word_data cannot() be used here due to the command
-        * value not being understood and causes NAKs preventing any reading
-        * from being accessed.
-        */
-       ret = i2c_smbus_read_byte(client);
+       /* read measurement */
+       ret = i2c_master_recv(data->client, (char *)&val, sizeof(val));
        if (ret < 0) {
-               dev_err(&client->dev, "cannot read high byte measurement");
+               dev_err(&client->dev, "cannot read sensor data\n");
                return ret;
        }
-       val = ret << 8;
-
-       ret = i2c_smbus_read_byte(client);
-       if (ret < 0) {
-               dev_err(&client->dev, "cannot read low byte measurement");
-               return ret;
-       }
-       val |= ret;
-
-       return val;
+       return be16_to_cpu(val);
 }
 
 static int hdc100x_get_heater_status(struct hdc100x_data *data)
@@ -272,8 +259,8 @@ static int hdc100x_probe(struct i2c_client *client,
        struct iio_dev *indio_dev;
        struct hdc100x_data *data;
 
-       if (!i2c_check_functionality(client->adapter,
-                               I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
+                                    I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
                return -EOPNOTSUPP;
 
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
index 90462fcf543698bab68378f5a0cb4b8d7262ef2c..158aaf44dd951047f7d6212b522dd8b921395b21 100644 (file)
@@ -107,9 +107,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 {
        struct iio_dev *indio_dev = filp->private_data;
        struct iio_buffer *rb = indio_dev->buffer;
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        size_t datum_size;
        size_t to_wait;
-       int ret;
+       int ret = 0;
 
        if (!indio_dev->info)
                return -ENODEV;
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
        else
                to_wait = min_t(size_t, n / datum_size, rb->watermark);
 
+       add_wait_queue(&rb->pollq, &wait);
        do {
-               ret = wait_event_interruptible(rb->pollq,
-                     iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
-               if (ret)
-                       return ret;
+               if (!indio_dev->info) {
+                       ret = -ENODEV;
+                       break;
+               }
 
-               if (!indio_dev->info)
-                       return -ENODEV;
+               if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
+                       if (signal_pending(current)) {
+                               ret = -ERESTARTSYS;
+                               break;
+                       }
+
+                       wait_woken(&wait, TASK_INTERRUPTIBLE,
+                                  MAX_SCHEDULE_TIMEOUT);
+                       continue;
+               }
 
                ret = rb->access->read_first_n(rb, n, buf);
                if (ret == 0 && (filp->f_flags & O_NONBLOCK))
                        ret = -EAGAIN;
-        } while (ret == 0);
+       } while (ret == 0);
+       remove_wait_queue(&rb->pollq, &wait);
 
        return ret;
 }
index f914d5d140e4014f5a31011c7783959fe8022755..d2b889918c3e7ae99028cbb6d4e67961aa3a941a 100644 (file)
@@ -613,9 +613,8 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
                        return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
        case IIO_VAL_FRACTIONAL:
                tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
-               vals[1] = do_div(tmp, 1000000000LL);
-               vals[0] = tmp;
-               return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+               vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
+               return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
        case IIO_VAL_FRACTIONAL_LOG2:
                tmp = (s64)vals[0] * 1000000000LL >> vals[1];
                vals[1] = do_div(tmp, 1000000000LL);
index 7c566f516572957e7a6ab36764bc5ad79c16f058..3574945183fe8258fcb2bb98d6bc188b9aea5b9e 100644 (file)
@@ -76,7 +76,6 @@ config BH1750
 config BH1780
        tristate "ROHM BH1780 ambient light sensor"
        depends on I2C
-       depends on !SENSORS_BH1780
        help
         Say Y here to build support for the ROHM BH1780GLI ambient
         light sensor.
@@ -238,6 +237,8 @@ config MAX44000
        tristate "MAX44000 Ambient and Infrared Proximity Sensor"
        depends on I2C
        select REGMAP_I2C
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
         Say Y here if you want to build support for Maxim Integrated's
         MAX44000 ambient and infrared proximity sensor device.
index 6943688e66dfbebc5edae2959f4f43e558786c0a..e5a533cbd53fa58bbee3de2a7c624e2b252c62cc 100644 (file)
@@ -970,7 +970,7 @@ int bmp280_common_probe(struct device *dev,
        data->vdda = devm_regulator_get(dev, "vdda");
        if (IS_ERR(data->vdda)) {
                dev_err(dev, "failed to get VDDA regulator\n");
-               ret = PTR_ERR(data->vddd);
+               ret = PTR_ERR(data->vdda);
                goto out_disable_vddd;
        }
        ret = regulator_enable(data->vdda);
@@ -1079,7 +1079,8 @@ EXPORT_SYMBOL(bmp280_common_remove);
 #ifdef CONFIG_PM
 static int bmp280_runtime_suspend(struct device *dev)
 {
-       struct bmp280_data *data = dev_get_drvdata(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct bmp280_data *data = iio_priv(indio_dev);
        int ret;
 
        ret = regulator_disable(data->vdda);
@@ -1090,7 +1091,8 @@ static int bmp280_runtime_suspend(struct device *dev)
 
 static int bmp280_runtime_resume(struct device *dev)
 {
-       struct bmp280_data *data = dev_get_drvdata(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct bmp280_data *data = iio_priv(indio_dev);
        int ret;
 
        ret = regulator_enable(data->vddd);
index 2e3a70e1b24541677d80661d9dedb7742d1f3341..5656deb17261c63963bffddecaf8c48a98cd16e5 100644 (file)
@@ -397,7 +397,7 @@ static int as3935_probe(struct spi_device *spi)
                return ret;
        }
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                &as3935_trigger_handler, NULL);
 
        if (ret) {
index e6dfa1bd3defae37e1ee549e10886c2edeece697..5f65a78b27c9c56c52414dddbe2d16e12f123905 100644 (file)
@@ -2462,18 +2462,24 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
 
        if (addr->dev_addr.bound_dev_if) {
                ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
-               if (!ndev)
-                       return -ENODEV;
+               if (!ndev) {
+                       ret = -ENODEV;
+                       goto err2;
+               }
 
                if (ndev->flags & IFF_LOOPBACK) {
                        dev_put(ndev);
-                       if (!id_priv->id.device->get_netdev)
-                               return -EOPNOTSUPP;
+                       if (!id_priv->id.device->get_netdev) {
+                               ret = -EOPNOTSUPP;
+                               goto err2;
+                       }
 
                        ndev = id_priv->id.device->get_netdev(id_priv->id.device,
                                                              id_priv->id.port_num);
-                       if (!ndev)
-                               return -ENODEV;
+                       if (!ndev) {
+                               ret = -ENODEV;
+                               goto err2;
+                       }
                }
 
                route->path_rec->net = &init_net;
index 3a3c5d73bbfc833a89c6143f0e732441c8488ab8..51c79b2fb0b837f4fbfe5a9672e98dd1dd304f61 100644 (file)
@@ -106,7 +106,6 @@ struct mcast_group {
        atomic_t                refcount;
        enum mcast_group_state  state;
        struct ib_sa_query      *query;
-       int                     query_id;
        u16                     pkey_index;
        u8                      leave_state;
        int                     retries;
@@ -340,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
                                       member->multicast.comp_mask,
                                       3000, GFP_KERNEL, join_handler, group,
                                       &group->query);
-       if (ret >= 0) {
-               group->query_id = ret;
-               ret = 0;
-       }
-       return ret;
+       return (ret > 0) ? 0 : ret;
 }
 
 static int send_leave(struct mcast_group *group, u8 leave_state)
@@ -364,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
                                       IB_SA_MCMEMBER_REC_JOIN_STATE,
                                       3000, GFP_KERNEL, leave_handler,
                                       group, &group->query);
-       if (ret >= 0) {
-               group->query_id = ret;
-               ret = 0;
-       }
-       return ret;
+       return (ret > 0) ? 0 : ret;
 }
 
 static void join_group(struct mcast_group *group, struct mcast_member *member,
index 3aca7f6171b428eac865567978ff21d59ab39dbf..80f988984f4448ca42d53c1b9e2bba890948ec78 100644 (file)
@@ -333,6 +333,8 @@ static void remove_ep_tid(struct c4iw_ep *ep)
 
        spin_lock_irqsave(&ep->com.dev->lock, flags);
        _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
+       if (idr_is_empty(&ep->com.dev->hwtid_idr))
+               wake_up(&ep->com.dev->wait);
        spin_unlock_irqrestore(&ep->com.dev->lock, flags);
 }
 
@@ -1827,8 +1829,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
                                (ep->mpa_pkt + sizeof(*mpa));
                        ep->ird = ntohs(mpa_v2_params->ird) &
                                MPA_V2_IRD_ORD_MASK;
+                       ep->ird = min_t(u32, ep->ird,
+                                       cur_max_read_depth(ep->com.dev));
                        ep->ord = ntohs(mpa_v2_params->ord) &
                                MPA_V2_IRD_ORD_MASK;
+                       ep->ord = min_t(u32, ep->ord,
+                                       cur_max_read_depth(ep->com.dev));
                        PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
                             ep->ord);
                        if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
@@ -2113,8 +2119,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
                }
                ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
                                        n, pdev, rt_tos2priority(tos));
-               if (!ep->l2t)
+               if (!ep->l2t) {
+                       dev_put(pdev);
                        goto out;
+               }
                ep->mtu = pdev->mtu;
                ep->tx_chan = cxgb4_port_chan(pdev);
                ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
@@ -3136,7 +3144,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
                if (conn_param->ord > ep->ird) {
                        if (RELAXED_IRD_NEGOTIATION) {
-                               ep->ord = ep->ird;
+                               conn_param->ord = ep->ird;
                        } else {
                                ep->ird = conn_param->ird;
                                ep->ord = conn_param->ord;
index 812ab7278b8eec477183f451100b7c900adbeaec..ac926c942fee7bc72036e13f269e84156ded95d4 100644 (file)
@@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 {
        struct c4iw_cq *chp;
-       int ret;
+       int ret = 0;
        unsigned long flag;
 
        chp = to_c4iw_cq(ibcq);
        spin_lock_irqsave(&chp->lock, flag);
-       ret = t4_arm_cq(&chp->cq,
-                       (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+       t4_arm_cq(&chp->cq,
+                 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+       if (flags & IB_CQ_REPORT_MISSED_EVENTS)
+               ret = t4_cq_notempty(&chp->cq);
        spin_unlock_irqrestore(&chp->lock, flag);
-       if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
-               ret = 0;
        return ret;
 }
index 071d7332ec061489080a9ef55c1a6cbd9622af4d..3c4b2126e0d12e319a7779505673a651af689211 100644 (file)
@@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 static void c4iw_dealloc(struct uld_ctx *ctx)
 {
        c4iw_rdev_close(&ctx->dev->rdev);
+       WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
        idr_destroy(&ctx->dev->cqidr);
+       WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
        idr_destroy(&ctx->dev->qpidr);
+       WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
        idr_destroy(&ctx->dev->mmidr);
+       wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
        idr_destroy(&ctx->dev->hwtid_idr);
        idr_destroy(&ctx->dev->stid_idr);
        idr_destroy(&ctx->dev->atid_idr);
@@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
        mutex_init(&devp->rdev.stats.lock);
        mutex_init(&devp->db_mutex);
        INIT_LIST_HEAD(&devp->db_fc_list);
+       init_waitqueue_head(&devp->wait);
        devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
 
        if (c4iw_debugfs_root) {
index aa47e0ae80bc4c42d7a0009714a26be8e0ff7bbb..4b83b84f7ddf4fe56f6c9d573b0a7f00a629db74 100644 (file)
@@ -263,6 +263,7 @@ struct c4iw_dev {
        struct idr stid_idr;
        struct list_head db_fc_list;
        u32 avail_ird;
+       wait_queue_head_t wait;
 };
 
 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
index edb1172b6f54c14b991c1c3f76b36b612c841ca2..690435229be7a9afc17db2eb8c9a220b59546456 100644 (file)
@@ -683,7 +683,7 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
        return 0;
 }
 
-void _free_qp(struct kref *kref)
+static void _free_qp(struct kref *kref)
 {
        struct c4iw_qp *qhp;
 
index 6126bbe36095c21021f9cc5cdca3b75da299dc85..02173f4315fa62bec5e67171a45ab864a1e096da 100644 (file)
@@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
        return (CQE_GENBIT(cqe) == cq->gen);
 }
 
+static inline int t4_cq_notempty(struct t4_cq *cq)
+{
+       return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
+}
+
 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
 {
        int ret;
index 79575ee873f21af3361264b711419c267b6a23d0..0566393e5aba689c8ea73563a7e29bf54003da07 100644 (file)
@@ -47,7 +47,6 @@
 #include <linux/topology.h>
 #include <linux/cpumask.h>
 #include <linux/module.h>
-#include <linux/cpumask.h>
 
 #include "hfi.h"
 #include "affinity.h"
@@ -682,7 +681,7 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
                           size_t count)
 {
        struct hfi1_affinity_node *entry;
-       struct cpumask mask;
+       cpumask_var_t mask;
        int ret, i;
 
        spin_lock(&node_affinity.lock);
@@ -692,19 +691,24 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
        if (!entry)
                return -EINVAL;
 
-       ret = cpulist_parse(buf, &mask);
+       ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+       if (!ret)
+               return -ENOMEM;
+
+       ret = cpulist_parse(buf, mask);
        if (ret)
-               return ret;
+               goto out;
 
-       if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) {
+       if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
                dd_dev_warn(dd, "Invalid CPU mask\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        mutex_lock(&sdma_affinity_mutex);
        /* reset the SDMA interrupt affinity details */
        init_cpu_mask_set(&entry->def_intr);
-       cpumask_copy(&entry->def_intr.mask, &mask);
+       cpumask_copy(&entry->def_intr.mask, mask);
        /*
         * Reassign the affinity for each SDMA interrupt.
         */
@@ -720,8 +724,9 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
                if (ret)
                        break;
        }
-
        mutex_unlock(&sdma_affinity_mutex);
+out:
+       free_cpumask_var(mask);
        return ret ? ret : strnlen(buf, PAGE_SIZE);
 }
 
index b32638d58ae82c73f920afee4a7fd0a4386581a8..cc38004cea42d2b56ce187fc1ba132b4e24398ea 100644 (file)
@@ -9490,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd)
        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
 }
 
+/*
+ * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
+ * on error.
+ */
+static int test_qsfp_read(struct hfi1_pportdata *ppd)
+{
+       int ret;
+       u8 status;
+
+       /* report success if not a QSFP */
+       if (ppd->port_type != PORT_TYPE_QSFP)
+               return 0;
+
+       /* read byte 2, the status byte */
+       ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
+       if (ret < 0)
+               return ret;
+       if (ret != 1)
+               return -EIO;
+
+       return 0; /* success */
+}
+
+/*
+ * Values for QSFP retry.
+ *
+ * Give up after 10s (20 x 500ms).  The overall timeout was empirically
+ * arrived at from experience on a large cluster.
+ */
+#define MAX_QSFP_RETRIES 20
+#define QSFP_RETRY_WAIT 500 /* msec */
+
+/*
+ * Try a QSFP read.  If it fails, schedule a retry for later.
+ * Called on first link activation after driver load.
+ */
+static void try_start_link(struct hfi1_pportdata *ppd)
+{
+       if (test_qsfp_read(ppd)) {
+               /* read failed */
+               if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
+                       dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
+                       return;
+               }
+               dd_dev_info(ppd->dd,
+                           "QSFP not responding, waiting and retrying %d\n",
+                           (int)ppd->qsfp_retry_count);
+               ppd->qsfp_retry_count++;
+               queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
+                                  msecs_to_jiffies(QSFP_RETRY_WAIT));
+               return;
+       }
+       ppd->qsfp_retry_count = 0;
+
+       /*
+        * Tune the SerDes to a ballpark setting for optimal signal and bit
+        * error rate.  Needs to be done before starting the link.
+        */
+       tune_serdes(ppd);
+       start_link(ppd);
+}
+
+/*
+ * Workqueue function to start the link after a delay.
+ */
+void handle_start_link(struct work_struct *work)
+{
+       struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+                                                 start_link_work.work);
+       try_start_link(ppd);
+}
+
 int bringup_serdes(struct hfi1_pportdata *ppd)
 {
        struct hfi1_devdata *dd = ppd->dd;
@@ -9525,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
                set_qsfp_int_n(ppd, 1);
        }
 
-       /*
-        * Tune the SerDes to a ballpark setting for
-        * optimal signal and bit error rate
-        * Needs to be done before starting the link
-        */
-       tune_serdes(ppd);
-
-       return start_link(ppd);
+       try_start_link(ppd);
+       return 0;
 }
 
 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
@@ -9549,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
        ppd->driver_link_ready = 0;
        ppd->link_enabled = 0;
 
+       ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
+       flush_delayed_work(&ppd->start_link_work);
+       cancel_delayed_work_sync(&ppd->start_link_work);
+
        ppd->offline_disabled_reason =
                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
        set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
@@ -12865,7 +12935,7 @@ fail:
  */
 static int set_up_context_variables(struct hfi1_devdata *dd)
 {
-       int num_kernel_contexts;
+       unsigned long num_kernel_contexts;
        int total_contexts;
        int ret;
        unsigned ngroups;
@@ -12894,9 +12964,9 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
         */
        if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
                dd_dev_err(dd,
-                          "Reducing # kernel rcv contexts to: %d, from %d\n",
+                          "Reducing # kernel rcv contexts to: %d, from %lu\n",
                           (int)(dd->chip_send_contexts - num_vls - 1),
-                          (int)num_kernel_contexts);
+                          num_kernel_contexts);
                num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
        }
        /*
index ed11107c50fe50614efc39b469a31cb7e240291c..e29573769efc04a6a059db8c06ae2b54a45d1d70 100644 (file)
@@ -706,6 +706,7 @@ void handle_link_up(struct work_struct *work);
 void handle_link_down(struct work_struct *work);
 void handle_link_downgrade(struct work_struct *work);
 void handle_link_bounce(struct work_struct *work);
+void handle_start_link(struct work_struct *work);
 void handle_sma_message(struct work_struct *work);
 void reset_qsfp(struct hfi1_pportdata *ppd);
 void qsfp_event(struct work_struct *work);
index dbab9d9cc288c7edb296ab25d890d3d79c93ef22..5e9be16f6cd3bc9fabd8f741972a24b1e00bb1e7 100644 (file)
 
 static struct dentry *hfi1_dbg_root;
 
+/* wrappers to enforce srcu in seq file */
+static ssize_t hfi1_seq_read(
+       struct file *file,
+       char __user *buf,
+       size_t size,
+       loff_t *ppos)
+{
+       struct dentry *d = file->f_path.dentry;
+       int srcu_idx;
+       ssize_t r;
+
+       r = debugfs_use_file_start(d, &srcu_idx);
+       if (likely(!r))
+               r = seq_read(file, buf, size, ppos);
+       debugfs_use_file_finish(srcu_idx);
+       return r;
+}
+
+static loff_t hfi1_seq_lseek(
+       struct file *file,
+       loff_t offset,
+       int whence)
+{
+       struct dentry *d = file->f_path.dentry;
+       int srcu_idx;
+       loff_t r;
+
+       r = debugfs_use_file_start(d, &srcu_idx);
+       if (likely(!r))
+               r = seq_lseek(file, offset, whence);
+       debugfs_use_file_finish(srcu_idx);
+       return r;
+}
+
 #define private2dd(file) (file_inode(file)->i_private)
 #define private2ppd(file) (file_inode(file)->i_private)
 
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \
 static const struct file_operations _##name##_file_ops = { \
        .owner   = THIS_MODULE, \
        .open    = _##name##_open, \
-       .read    = seq_read, \
-       .llseek  = seq_lseek, \
+       .read    = hfi1_seq_read, \
+       .llseek  = hfi1_seq_lseek, \
        .release = seq_release \
 }
 
@@ -105,11 +139,9 @@ do { \
        DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
 
 static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
        struct hfi1_opcode_stats_perctx *opstats;
 
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(opstats->stats))
                return NULL;
        return pos;
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _opcode_stats_seq_show(struct seq_file *s, void *v)
@@ -223,28 +253,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
 DEBUGFS_FILE_OPS(ctx_stats);
 
 static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
+       __acquires(RCU)
 {
        struct qp_iter *iter;
        loff_t n = *pos;
 
-       rcu_read_lock();
        iter = qp_iter_init(s->private);
+
+       /* stop calls rcu_read_unlock */
+       rcu_read_lock();
+
        if (!iter)
                return NULL;
 
-       while (n--) {
+       do {
                if (qp_iter_next(iter)) {
                        kfree(iter);
                        return NULL;
                }
-       }
+       } while (n--);
 
        return iter;
 }
 
 static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
                                loff_t *pos)
+       __must_hold(RCU)
 {
        struct qp_iter *iter = iter_ptr;
 
@@ -259,7 +293,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 }
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
-__releases(RCU)
+       __releases(RCU)
 {
        rcu_read_unlock();
 }
@@ -281,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats)
 DEBUGFS_FILE_OPS(qp_stats);
 
 static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
        struct hfi1_ibdev *ibd;
        struct hfi1_devdata *dd;
 
-       rcu_read_lock();
        ibd = (struct hfi1_ibdev *)s->private;
        dd = dd_from_dev(ibd);
        if (!dd->per_sdma || *pos >= dd->num_sdma)
@@ -306,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _sdes_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _sdes_seq_show(struct seq_file *s, void *v)
@@ -335,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_cntrs(dd, NULL, &counters);
        rval =  simple_read_from_buffer(buf, count, ppos, counters, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -352,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_cntrs(dd, &names, NULL);
        rval =  simple_read_from_buffer(buf, count, ppos, names, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -379,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
        rval = simple_read_from_buffer(buf, count, ppos, names, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -396,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
        struct hfi1_pportdata *ppd;
        ssize_t rval;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        avail = hfi1_read_portcntrs(ppd, NULL, &counters);
        rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -430,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
        int used;
        int i;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        dd = ppd->dd;
        size = PAGE_SIZE;
        used = 0;
        tmp = kmalloc(size, GFP_KERNEL);
-       if (!tmp) {
-               rcu_read_unlock();
+       if (!tmp)
                return -ENOMEM;
-       }
 
        scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
        used += scnprintf(tmp + used, size - used,
@@ -466,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
        used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
 
        ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
-       rcu_read_unlock();
        kfree(tmp);
        return ret;
 }
@@ -482,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
        u64 scratch0;
        u64 clear;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        dd = ppd->dd;
 
        buff = kmalloc(count + 1, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto do_return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
@@ -523,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
 
  do_free:
        kfree(buff);
- do_return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -538,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
        char *tmp;
        int ret;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!tmp) {
-               rcu_read_unlock();
+       if (!tmp)
                return -ENOMEM;
-       }
 
        ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
        if (ret > 0)
                ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
-       rcu_read_unlock();
        kfree(tmp);
        return ret;
 }
@@ -565,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
        int offset;
        int total_written;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
 
        /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -573,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
        offset = *ppos & 0xffff;
 
        /* explicitly reject invalid address 0 to catch cp and cat */
-       if (i2c_addr == 0) {
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (i2c_addr == 0)
+               return -EINVAL;
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
@@ -602,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -632,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
        int offset;
        int total_read;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
 
        /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -640,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
        offset = *ppos & 0xffff;
 
        /* explicitly reject invalid address 0 to catch cp and cat */
-       if (i2c_addr == 0) {
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (i2c_addr == 0)
+               return -EINVAL;
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
        if (total_read < 0) {
@@ -669,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -697,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
        int ret;
        int total_written;
 
-       rcu_read_lock();
-       if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */
+               return -EINVAL;
 
        ppd = private2ppd(file);
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
                ret = -EFAULT;
                goto _free;
        }
-
        total_written = qsfp_write(ppd, target, *ppos, buff, count);
        if (total_written < 0) {
                ret = total_written;
@@ -729,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -757,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
        int ret;
        int total_read;
 
-       rcu_read_lock();
        if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
                ret = -EINVAL;
                goto _return;
@@ -790,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
  _free:
        kfree(buff);
  _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1006,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
        debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
 out:
        ibd->hfi1_ibdev_dbg = NULL;
-       synchronize_rcu();
 }
 
 /*
@@ -1031,9 +1015,7 @@ static const char * const hfi1_statnames[] = {
 };
 
 static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(hfi1_statnames))
                return NULL;
        return pos;
@@ -1051,9 +1033,7 @@ static void *_driver_stats_names_seq_next(
 }
 
 static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
@@ -1069,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
 DEBUGFS_FILE_OPS(driver_stats_names);
 
 static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(hfi1_statnames))
                return NULL;
        return pos;
@@ -1086,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _driver_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static u64 hfi1_sps_ints(void)
index 8246dc7d0573a1be768fb7d65fab2fba01c7c686..303f10555729715a9a21fd5b240eb8daba39efc0 100644 (file)
@@ -888,14 +888,15 @@ void set_all_slowpath(struct hfi1_devdata *dd)
 }
 
 static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
-                                     struct hfi1_packet packet,
+                                     struct hfi1_packet *packet,
                                      struct hfi1_devdata *dd)
 {
        struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
-       struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
-                                                            packet.rhf_addr);
+       struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+                                                            packet->rhf_addr);
+       u8 etype = rhf_rcv_type(packet->rhf);
 
-       if (hdr2sc(hdr, packet.rhf) != 0xf) {
+       if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
                int hwstate = read_logical_state(dd);
 
                if (hwstate != LSTATE_ACTIVE) {
@@ -979,7 +980,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
                        /* Auto activate link on non-SC15 packet receive */
                        if (unlikely(rcd->ppd->host_link_state ==
                                     HLS_UP_ARMED) &&
-                           set_armed_to_active(rcd, packet, dd))
+                           set_armed_to_active(rcd, &packet, dd))
                                goto bail;
                        last = process_rcv_packet(&packet, thread);
                }
index 1ecbec1923589c3ec96d0958ec45541aa2b5724b..7e03ccd2554db3a63f5f79966f0a21c1617e45c7 100644 (file)
@@ -183,6 +183,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
        if (fd) {
                fd->rec_cpu_num = -1; /* no cpu affinity by default */
                fd->mm = current->mm;
+               atomic_inc(&fd->mm->mm_count);
        }
 
        fp->private_data = fd;
@@ -222,7 +223,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
                ret = assign_ctxt(fp, &uinfo);
                if (ret < 0)
                        return ret;
-               setup_ctxt(fp);
+               ret = setup_ctxt(fp);
                if (ret)
                        return ret;
                ret = user_init(fp);
@@ -779,6 +780,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
        mutex_unlock(&hfi1_mutex);
        hfi1_free_ctxtdata(dd, uctxt);
 done:
+       mmdrop(fdata->mm);
        kobject_put(&dd->kobj);
        kfree(fdata);
        return 0;
index 1000e0fd96d9b4972cec3327739f5ca7ff59b7b3..325ec211370fa233aed23ebc2480d4a091ad5214 100644 (file)
@@ -605,6 +605,7 @@ struct hfi1_pportdata {
        struct work_struct freeze_work;
        struct work_struct link_downgrade_work;
        struct work_struct link_bounce_work;
+       struct delayed_work start_link_work;
        /* host link state variables */
        struct mutex hls_lock;
        u32 host_link_state;
@@ -659,6 +660,7 @@ struct hfi1_pportdata {
        u8 linkinit_reason;
        u8 local_tx_rate;       /* rate given to 8051 firmware */
        u8 last_pstate;         /* info only */
+       u8 qsfp_retry_count;
 
        /* placeholders for IB MAD packet settings */
        u8 overrun_threshold;
@@ -1272,9 +1274,26 @@ static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
               ((!!(rhf_dc_info(rhf))) << 4);
 }
 
+#define HFI1_JKEY_WIDTH       16
+#define HFI1_JKEY_MASK        (BIT(16) - 1)
+#define HFI1_ADMIN_JKEY_RANGE 32
+
+/*
+ * J_KEYs are split and allocated in the following groups:
+ *   0 - 31    - users with administrator privileges
+ *  32 - 63    - kernel protocols using KDETH packets
+ *  64 - 65535 - all other users using KDETH packets
+ */
 static inline u16 generate_jkey(kuid_t uid)
 {
-       return from_kuid(current_user_ns(), uid) & 0xffff;
+       u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
+
+       if (capable(CAP_SYS_ADMIN))
+               jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
+       else if (jkey < 64)
+               jkey |= BIT(HFI1_JKEY_WIDTH - 1);
+
+       return jkey;
 }
 
 /*
@@ -1656,7 +1675,6 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
                                  const struct pci_device_id *);
 void hfi1_free_devdata(struct hfi1_devdata *);
-void cc_state_reclaim(struct rcu_head *rcu);
 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
 
 /* LED beaconing functions */
@@ -1788,7 +1806,7 @@ extern unsigned int hfi1_max_mtu;
 extern unsigned int hfi1_cu;
 extern unsigned int user_credit_return_threshold;
 extern int num_user_contexts;
-extern unsigned n_krcvqs;
+extern unsigned long n_krcvqs;
 extern uint krcvqs[];
 extern int krcvqsset;
 extern uint kdeth_qp;
index a358d23ecd54d563d05a14e10c0cd885f6546b8f..384b43d2fd49937f2390ca05da1782864396e306 100644 (file)
@@ -94,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
 
 /* computed based on above array */
-unsigned n_krcvqs;
+unsigned long n_krcvqs;
 
 static unsigned hfi1_rcvarr_split = 25;
 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
@@ -500,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
        INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
        INIT_WORK(&ppd->sma_message_work, handle_sma_message);
        INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
+       INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
        INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
        INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
 
@@ -1333,7 +1334,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
                spin_unlock(&ppd->cc_state_lock);
 
                if (cc_state)
-                       call_rcu(&cc_state->rcu, cc_state_reclaim);
+                       kfree_rcu(cc_state, rcu);
        }
 
        free_credit_return(dd);
index 1263abe01999e3e84306b5594921bc4b11019fca..7ffc14f2152312f11d4a52e70b9bd9c084cdfd9a 100644 (file)
@@ -1819,6 +1819,11 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
        u32 len = OPA_AM_CI_LEN(am) + 1;
        int ret;
 
+       if (dd->pport->port_type != PORT_TYPE_QSFP) {
+               smp->status |= IB_SMP_INVALID_FIELD;
+               return reply((struct ib_mad_hdr *)smp);
+       }
+
 #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
 #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
 #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
@@ -2599,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
        u8 lq, num_vls;
        u8 res_lli, res_ler;
        u64 port_mask;
-       unsigned long port_num;
+       u8 port_num;
        unsigned long vl;
        u32 vl_select_mask;
        int vfi;
@@ -2633,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
-       if ((u8)port_num != port) {
+       if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
                return reply((struct ib_mad_hdr *)pmp);
        }
@@ -2837,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3010,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3247,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3398,7 +3403,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
 
        spin_unlock(&ppd->cc_state_lock);
 
-       call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+       kfree_rcu(old_cc_state, rcu);
 }
 
 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
@@ -3553,13 +3558,6 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
        return reply((struct ib_mad_hdr *)smp);
 }
 
-void cc_state_reclaim(struct rcu_head *rcu)
-{
-       struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
-
-       kfree(cc_state);
-}
-
 static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
                                   struct ib_device *ibdev, u8 port,
                                   u32 *resp_len)
index 8c25e1b58849a17a22a96d6ca8c4f54de4992098..3a1ef3056282e176f20128b37c2790690b4f850f 100644 (file)
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
                        read_extra_bytes(pbuf, from, to_fill);
                        from += to_fill;
                        nbytes -= to_fill;
+                       /* may not be enough valid bytes left to align */
+                       if (extra > nbytes)
+                               extra = nbytes;
 
                        /* ...now write carry */
                        dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
                        read_low_bytes(pbuf, from, extra);
                        from += extra;
                        nbytes -= extra;
+                       /*
+                        * If no bytes are left, return early - we are done.
+                        * NOTE: This short-circuit is *required* because
+                        * "extra" may have been reduced in size and "from"
+                        * is not aligned, as required when leaving this
+                        * if block.
+                        */
+                       if (nbytes == 0)
+                               return;
                }
 
                /* at this point, from is QW aligned */
index a5aa3517e7d5c537d36786fd0d512cef6d60544c..4e4d8317c281cde45278c34104e29aca00b284f2 100644 (file)
@@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
 
        iter->dev = dev;
        iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
-       if (qp_iter_next(iter)) {
-               kfree(iter);
-               return NULL;
-       }
 
        return iter;
 }
index a207717ade2aac0da5fbf34e8adb70a53efc2a42..4e95ad810847ab91b76829b00e305bba48dd2057 100644 (file)
@@ -706,8 +706,8 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
                   u8 *data)
 {
        struct hfi1_pportdata *ppd;
-       u32 excess_len = 0;
-       int ret = 0;
+       u32 excess_len = len;
+       int ret = 0, offset = 0;
 
        if (port_num > dd->num_pports || port_num < 1) {
                dd_dev_info(dd, "%s: Invalid port number %d\n",
@@ -740,6 +740,34 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
        }
 
        memcpy(data, &ppd->qsfp_info.cache[addr], len);
+
+       if (addr <= QSFP_MONITOR_VAL_END &&
+           (addr + len) >= QSFP_MONITOR_VAL_START) {
+               /* Overlap with the dynamic channel monitor range */
+               if (addr < QSFP_MONITOR_VAL_START) {
+                       if (addr + len <= QSFP_MONITOR_VAL_END)
+                               len = addr + len - QSFP_MONITOR_VAL_START;
+                       else
+                               len = QSFP_MONITOR_RANGE;
+                       offset = QSFP_MONITOR_VAL_START - addr;
+                       addr = QSFP_MONITOR_VAL_START;
+               } else if (addr == QSFP_MONITOR_VAL_START) {
+                       offset = 0;
+                       if (addr + len > QSFP_MONITOR_VAL_END)
+                               len = QSFP_MONITOR_RANGE;
+               } else {
+                       offset = 0;
+                       if (addr + len > QSFP_MONITOR_VAL_END)
+                               len = QSFP_MONITOR_VAL_END - addr + 1;
+               }
+               /* Refresh the values of the dynamic monitors from the cable */
+               ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
+               if (ret != len) {
+                       ret = -EAGAIN;
+                       goto set_zeroes;
+               }
+       }
+
        return 0;
 
 set_zeroes:
index 69275ebd9597322b599e97fce5804d17efa526f1..36cf52359848afdc5180d03602d587d3813f5b71 100644 (file)
@@ -74,6 +74,9 @@
 /* Defined fields that Intel requires of qualified cables */
 /* Byte 0 is Identifier, not checked */
 /* Byte 1 is reserved "status MSB" */
+#define QSFP_MONITOR_VAL_START 22
+#define QSFP_MONITOR_VAL_END 81
+#define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1)
 #define QSFP_TX_CTRL_BYTE_OFFS 86
 #define QSFP_PWR_CTRL_BYTE_OFFS 93
 #define QSFP_CDR_CTRL_BYTE_OFFS 98
index 0ecf27903dc20f62ab2bd26c307d8be7afcfab5e..1694037d1eee287b34559cadf11ab2bf5019ea80 100644 (file)
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
 #define KDETH_HCRC_LOWER_SHIFT    24
 #define KDETH_HCRC_LOWER_MASK     0xff
 
+#define AHG_KDETH_INTR_SHIFT 12
+
 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
 
@@ -1480,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
                /* Clear KDETH.SH on last packet */
                if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
                        val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
-                                                               INTR) >> 16);
+                                                    INTR) <<
+                                          AHG_KDETH_INTR_SHIFT);
                        val &= cpu_to_le16(~(1U << 13));
                        AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
                } else {
index b738acdb9b027705f11257c8ab25cbaeba652159..8ec09e470f848334e746b50326c6eadcf57785cd 100644 (file)
@@ -232,7 +232,7 @@ struct i40iw_device {
        struct i40e_client *client;
        struct i40iw_hw hw;
        struct i40iw_cm_core cm_core;
-       unsigned long *mem_resources;
+       u8 *mem_resources;
        unsigned long *allocated_qps;
        unsigned long *allocated_cqs;
        unsigned long *allocated_mrs;
@@ -435,8 +435,8 @@ static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
        *next = resource_num + 1;
        if (*next == max_resources)
                *next = 0;
-       spin_unlock_irqrestore(&iwdev->resource_lock, flags);
        *req_resource_num = resource_num;
+       spin_unlock_irqrestore(&iwdev->resource_lock, flags);
 
        return 0;
 }
index 5026dc79978a7c2ea2a1040861913a3aeaf8d7d9..7ca0638579c0b31d9cd0fdc71ea6858b77dba7ea 100644 (file)
@@ -535,8 +535,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
                buf += hdr_len;
        }
 
-       if (pd_len)
-               memcpy(buf, pdata->addr, pd_len);
+       if (pdata && pdata->addr)
+               memcpy(buf, pdata->addr, pdata->size);
 
        atomic_set(&sqbuf->refcount, 1);
 
@@ -3346,26 +3346,6 @@ int i40iw_cm_disconn(struct i40iw_qp *iwqp)
        return 0;
 }
 
-/**
- * i40iw_loopback_nop - Send a nop
- * @qp: associated hw qp
- */
-static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
-{
-       u64 *wqe;
-       u64 header;
-
-       wqe = qp->qp_uk.sq_base->elem;
-       set_64bit_val(wqe, 0, 0);
-       set_64bit_val(wqe, 8, 0);
-       set_64bit_val(wqe, 16, 0);
-
-       header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
-           LS_64(0, I40IWQPSQ_SIGCOMPL) |
-           LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
-       set_64bit_val(wqe, 24, header);
-}
-
 /**
  * i40iw_qp_disconnect - free qp and close cm
  * @iwqp: associate qp for the connection
@@ -3638,7 +3618,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        } else {
                if (iwqp->page)
                        iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
-               i40iw_loopback_nop(&iwqp->sc_qp);
+               dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
        }
 
        if (iwqp->page)
index 3ee0cad96bc688457475a1f727969e7f05336e62..0c92a40b3e8699f6d14c24bc51b0ac98e3e8ad45 100644 (file)
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp,
                info.dont_send_fin = false;
        if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
                info.reset_tcp_conn = true;
+       iwqp->hw_iwarp_state = state;
        i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
 }
 
index 6e9081380a276cbb78da8687820b9c7092f684af..445e230d5ff88f6de4e1b0e5d0d81160028842cf 100644 (file)
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = {
        .notifier_call = i40iw_net_event
 };
 
-static int i40iw_notifiers_registered;
+static atomic_t i40iw_notifiers_registered;
 
 /**
  * i40iw_find_i40e_handler - find a handler given a client info
@@ -1342,12 +1342,11 @@ exit:
  */
 static void i40iw_register_notifiers(void)
 {
-       if (!i40iw_notifiers_registered) {
+       if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
                register_inetaddr_notifier(&i40iw_inetaddr_notifier);
                register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
                register_netevent_notifier(&i40iw_net_notifier);
        }
-       i40iw_notifiers_registered++;
 }
 
 /**
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
                        i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
                /* fallthrough */
        case INET_NOTIFIER:
-               if (i40iw_notifiers_registered > 0) {
-                       i40iw_notifiers_registered--;
+               if (!atomic_dec_return(&i40iw_notifiers_registered)) {
                        unregister_netevent_notifier(&i40iw_net_notifier);
                        unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
                        unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
@@ -1558,6 +1556,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
        enum i40iw_status_code status;
        struct i40iw_handler *hdl;
 
+       hdl = i40iw_find_netdev(ldev->netdev);
+       if (hdl)
+               return 0;
+
        hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
        if (!hdl)
                return -ENOMEM;
index 0e8db0a3514153d7659977ff31d4dbaf45a103d5..6fd043b1d71413fbcebe8a209121a9cee50a5586 100644 (file)
@@ -673,8 +673,11 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
 {
        if (!mem)
                return I40IW_ERR_PARAM;
+       /*
+        * mem->va points to the parent of mem, so both mem and mem->va
+        * can not be touched once mem->va is freed
+        */
        kfree(mem->va);
-       mem->va = NULL;
        return 0;
 }
 
index 2360338877bf68ca4a809d153f83a64326fa467c..6329c971c22fc383330099fa07819b4666ee48b8 100644 (file)
@@ -794,7 +794,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
        return &iwqp->ibqp;
 error:
        i40iw_free_qp_resources(iwdev, iwqp, qp_num);
-       kfree(mem);
        return ERR_PTR(err_code);
 }
 
@@ -1926,8 +1925,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
                }
                if (iwpbl->pbl_allocated)
                        i40iw_free_pble(iwdev->pble_rsrc, palloc);
-               kfree(iwpbl->iwmr);
-               iwpbl->iwmr = NULL;
+               kfree(iwmr);
                return 0;
        }
 
index d6fc8a6e8c3324fcef4d9081167cce9cc998f13b..5df63dacaaa32f2b0a4944559181e3f877bb94b5 100644 (file)
@@ -576,8 +576,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
                checksum == cpu_to_be16(0xffff);
 }
 
-static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
-                          unsigned tail, struct mlx4_cqe *cqe, int is_eth)
+static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
+                           unsigned tail, struct mlx4_cqe *cqe, int is_eth)
 {
        struct mlx4_ib_proxy_sqp_hdr *hdr;
 
@@ -600,8 +600,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
                wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
                wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
        }
-
-       return 0;
 }
 
 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
@@ -689,12 +687,6 @@ repoll:
        is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
                MLX4_CQE_OPCODE_ERROR;
 
-       if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
-                    is_send)) {
-               pr_warn("Completion for NOP opcode detected!\n");
-               return -EINVAL;
-       }
-
        /* Resize CQ in progress */
        if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
                if (cq->resize_buf) {
@@ -720,12 +712,6 @@ repoll:
                 */
                mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
                                       be32_to_cpu(cqe->vlan_my_qpn));
-               if (unlikely(!mqp)) {
-                       pr_warn("CQ %06x with entry for unknown QPN %06x\n",
-                              cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
-                       return -EINVAL;
-               }
-
                *cur_qp = to_mibqp(mqp);
        }
 
@@ -738,11 +724,6 @@ repoll:
                /* SRQ is also in the radix tree */
                msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
                                       srq_num);
-               if (unlikely(!msrq)) {
-                       pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
-                               cq->mcq.cqn, srq_num);
-                       return -EINVAL;
-               }
        }
 
        if (is_send) {
@@ -852,9 +833,11 @@ repoll:
                if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
                        if ((*cur_qp)->mlx4_ib_qp_type &
                            (MLX4_IB_QPT_PROXY_SMI_OWNER |
-                            MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
-                               return use_tunnel_data(*cur_qp, cq, wc, tail,
-                                                      cqe, is_eth);
+                            MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+                               use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
+                                               is_eth);
+                               return 0;
+                       }
                }
 
                wc->slid           = be16_to_cpu(cqe->rlid);
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        struct mlx4_ib_qp *cur_qp = NULL;
        unsigned long flags;
        int npolled;
-       int err = 0;
        struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
 
        spin_lock_irqsave(&cq->lock, flags);
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        }
 
        for (npolled = 0; npolled < num_entries; ++npolled) {
-               err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
-               if (err)
+               if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
                        break;
        }
 
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 out:
        spin_unlock_irqrestore(&cq->lock, flags);
 
-       if (err == 0 || err == -EAGAIN)
-               return npolled;
-       else
-               return err;
+       return npolled;
 }
 
 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
index 9c2e53d28f985740c3d22f9c13be776e58010567..0f21c3a25552d47b7305c0df2980a75b36a55970 100644 (file)
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
 
                /* Generate GUID changed event */
                if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
+                       if (mlx4_is_master(dev->dev)) {
+                               union ib_gid gid;
+                               int err = 0;
+
+                               if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
+                                       err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
+                               else
+                                       gid.global.subnet_prefix =
+                                               eqe->event.port_mgmt_change.params.port_info.gid_prefix;
+                               if (err) {
+                                       pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
+                                               port, err);
+                               } else {
+                                       pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
+                                                port,
+                                                (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
+                                                be64_to_cpu(gid.global.subnet_prefix));
+                                       atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
+                                                    be64_to_cpu(gid.global.subnet_prefix));
+                               }
+                       }
                        mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
                        /*if master, notify all slaves*/
                        if (mlx4_is_master(dev->dev))
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
                if (err)
                        goto demux_err;
                dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+               atomic64_set(&dev->sriov.demux[i].subnet_prefix,
+                            be64_to_cpu(gid.global.subnet_prefix));
                err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
                                      &dev->sriov.sqps[i]);
                if (err)
index 2af44c2de2624a75d90a675727b32f914ec53e05..87ba9bca4181c2b5115b0c455a691c2e9f66d649 100644 (file)
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
        bool per_port = !!(ibdev->dev->caps.flags2 &
                MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
 
+       if (mlx4_is_slave(ibdev->dev))
+               return 0;
+
        for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
                /* i == 1 means we are building port counters */
                if (i && !per_port)
index 8f7ad07915b07e27ed7f512eabc07757269332c8..097bfcc4ee997eaab178f8d8353efe79fa00d0a7 100644 (file)
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
                if (!group->members[i])
                        leave_state |= (1 << i);
 
-       return leave_state & (group->rec.scope_join_state & 7);
+       return leave_state & (group->rec.scope_join_state & 0xf);
 }
 
 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
                } else
                        mcg_warn_group(group, "DRIVER BUG\n");
        } else if (group->state == MCAST_LEAVE_SENT) {
-               if (group->rec.scope_join_state & 7)
-                       group->rec.scope_join_state &= 0xf8;
+               if (group->rec.scope_join_state & 0xf)
+                       group->rec.scope_join_state &= 0xf0;
                group->state = MCAST_IDLE;
                mutex_unlock(&group->lock);
                if (release_group(group, 1))
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
 static int handle_join_req(struct mcast_group *group, u8 join_mask,
                           struct mcast_req *req)
 {
-       u8 group_join_state = group->rec.scope_join_state & 7;
+       u8 group_join_state = group->rec.scope_join_state & 0xf;
        int ref = 0;
        u16 status;
        struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
                        u8 cur_join_state;
 
                        resp_join_state = ((struct ib_sa_mcmember_data *)
-                                               group->response_sa_mad.data)->scope_join_state & 7;
-                       cur_join_state = group->rec.scope_join_state & 7;
+                                               group->response_sa_mad.data)->scope_join_state & 0xf;
+                       cur_join_state = group->rec.scope_join_state & 0xf;
 
                        if (method == IB_MGMT_METHOD_GET_RESP) {
                                /* successfull join */
@@ -710,7 +710,7 @@ process_requests:
                req = list_first_entry(&group->pending_list, struct mcast_req,
                                       group_list);
                sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
-               req_join_state = sa_data->scope_join_state & 0x7;
+               req_join_state = sa_data->scope_join_state & 0xf;
 
                /* For a leave request, we will immediately answer the VF, and
                 * update our internal counters. The actual leave will be sent
index 7c5832ede4bd0cc213139900eeb3999532192858..686ab48ff644163879049533985e81b33e4a054f 100644 (file)
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
        struct workqueue_struct *wq;
        struct workqueue_struct *ud_wq;
        spinlock_t ud_lock;
-       __be64 subnet_prefix;
+       atomic64_t subnet_prefix;
        __be64 guid_cache[128];
        struct mlx4_ib_dev *dev;
        /* the following lock protects both mcg_table and mcg_mgid0_list */
index 768085f5956645869e65aa5f81edb3f0b972a99b..7fb9629bd12b9736c181d7c5e0935ff5b7524d9a 100644 (file)
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
                sqp->ud_header.grh.flow_label    =
                        ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
                sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
-               if (is_eth)
+               if (is_eth) {
                        memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
-               else {
-               if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
-                       /* When multi-function is enabled, the ib_core gid
-                        * indexes don't necessarily match the hw ones, so
-                        * we must use our own cache */
-                       sqp->ud_header.grh.source_gid.global.subnet_prefix =
-                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-                                                      subnet_prefix;
-                       sqp->ud_header.grh.source_gid.global.interface_id =
-                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-                                              guid_cache[ah->av.ib.gid_index];
-               } else
-                       ib_get_cached_gid(ib_dev,
-                                         be32_to_cpu(ah->av.ib.port_pd) >> 24,
-                                         ah->av.ib.gid_index,
-                                         &sqp->ud_header.grh.source_gid, NULL);
+               } else {
+                       if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+                               /* When multi-function is enabled, the ib_core gid
+                                * indexes don't necessarily match the hw ones, so
+                                * we must use our own cache
+                                */
+                               sqp->ud_header.grh.source_gid.global.subnet_prefix =
+                                       cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
+                                                                   demux[sqp->qp.port - 1].
+                                                                   subnet_prefix)));
+                               sqp->ud_header.grh.source_gid.global.interface_id =
+                                       to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+                                                      guid_cache[ah->av.ib.gid_index];
+                       } else {
+                               ib_get_cached_gid(ib_dev,
+                                                 be32_to_cpu(ah->av.ib.port_pd) >> 24,
+                                                 ah->av.ib.gid_index,
+                                                 &sqp->ud_header.grh.source_gid, NULL);
+                       }
                }
                memcpy(sqp->ud_header.grh.destination_gid.raw,
                       ah->av.ib.dgid, 16);
index 308a358e5b46416f42285f0738200f505495bf5c..e4fac9292e4aeb68f3a5f0eb79b55cc3cf8eba1b 100644 (file)
@@ -553,12 +553,6 @@ repoll:
                 * from the table.
                 */
                mqp = __mlx5_qp_lookup(dev->mdev, qpn);
-               if (unlikely(!mqp)) {
-                       mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
-                                    cq->mcq.cqn, qpn);
-                       return -EINVAL;
-               }
-
                *cur_qp = to_mibqp(mqp);
        }
 
@@ -619,13 +613,6 @@ repoll:
                read_lock(&dev->mdev->priv.mkey_table.lock);
                mmkey = __mlx5_mr_lookup(dev->mdev,
                                         mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
-               if (unlikely(!mmkey)) {
-                       read_unlock(&dev->mdev->priv.mkey_table.lock);
-                       mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
-                                    cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
-                       return -EINVAL;
-               }
-
                mr = to_mibmr(mmkey);
                get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
                mr->sig->sig_err_exists = true;
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        unsigned long flags;
        int soft_polled = 0;
        int npolled;
-       int err = 0;
 
        spin_lock_irqsave(&cq->lock, flags);
        if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
                soft_polled = poll_soft_wc(cq, num_entries, wc);
 
        for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
-               err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
-               if (err)
+               if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
                        break;
        }
 
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 out:
        spin_unlock_irqrestore(&cq->lock, flags);
 
-       if (err == 0 || err == -EAGAIN)
-               return soft_polled + npolled;
-       else
-               return err;
+       return soft_polled + npolled;
 }
 
 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
index a84bb766fc62874bc45303268c25b961e45cd4f1..e19537cf44ab3890e51f1262f0064378fecaf44b 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
-#include <linux/io-mapping.h>
 #if defined(CONFIG_X86)
 #include <asm/pat.h>
 #endif
@@ -289,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
 
 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
 {
-       return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+       if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+               return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+       return 0;
 }
 
 enum {
@@ -1429,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
                                             dmac_47_16),
                                ib_spec->eth.val.dst_mac);
 
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                            smac_47_16),
+                               ib_spec->eth.mask.src_mac);
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+                                            smac_47_16),
+                               ib_spec->eth.val.src_mac);
+
                if (ib_spec->eth.mask.vlan_tag) {
                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
                                 vlan_tag, 1);
@@ -1850,6 +1858,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                                           int domain)
 {
        struct mlx5_ib_dev *dev = to_mdev(qp->device);
+       struct mlx5_ib_qp *mqp = to_mqp(qp);
        struct mlx5_ib_flow_handler *handler = NULL;
        struct mlx5_flow_destination *dst = NULL;
        struct mlx5_ib_flow_prio *ft_prio;
@@ -1876,7 +1885,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
        }
 
        dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-       dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
+       if (mqp->flags & MLX5_IB_QP_RSS)
+               dst->tir_num = mqp->rss_qp.tirn;
+       else
+               dst->tir_num = mqp->raw_packet_qp.rq.tirn;
 
        if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
                if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
index 40df2cca0609a6c3dfff09e738bec5450685f8e0..996b54e366b03d031a7791e8f80c55aba232e2f1 100644 (file)
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
 
        addr = addr >> page_shift;
        tmp = (unsigned long)addr;
-       m = find_first_bit(&tmp, sizeof(tmp));
+       m = find_first_bit(&tmp, BITS_PER_LONG);
        skip = 1 << m;
        mask = skip - 1;
        i = 0;
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
                for (k = 0; k < len; k++) {
                        if (!(i & mask)) {
                                tmp = (unsigned long)pfn;
-                               m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
+                               m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
                                skip = 1 << m;
                                mask = skip - 1;
                                base = pfn;
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
                        } else {
                                if (base + p != pfn) {
                                        tmp = (unsigned long)p;
-                                       m = find_first_bit(&tmp, sizeof(tmp));
+                                       m = find_first_bit(&tmp, BITS_PER_LONG);
                                        skip = 1 << m;
                                        mask = skip - 1;
                                        base = pfn;
index 372385d0f99384785123ba5e24fcb2ceddcc520e..95146f4aa3e3d7d2d3844b27c1a3bd8578bb4bbe 100644 (file)
@@ -402,6 +402,7 @@ enum mlx5_ib_qp_flags {
        /* QP uses 1 as its source QP number */
        MLX5_IB_QP_SQPN_QP1                     = 1 << 6,
        MLX5_IB_QP_CAP_SCATTER_FCS              = 1 << 7,
+       MLX5_IB_QP_RSS                          = 1 << 8,
 };
 
 struct mlx5_umr_wr {
index 0dd7d93cac95b9e2bba86749b18f83846fa36245..affc3f6598cac6c0d6e794dbca0594aace42500b 100644 (file)
@@ -1449,6 +1449,7 @@ create_tir:
        kvfree(in);
        /* qpn is reserved for that QP */
        qp->trans_qp.base.mqp.qpn = 0;
+       qp->flags |= MLX5_IB_QP_RSS;
        return 0;
 
 err:
@@ -3658,12 +3659,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
                     struct ib_send_wr *wr, unsigned *idx,
                     int *size, int nreq)
 {
-       int err = 0;
-
-       if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
-               err = -ENOMEM;
-               return err;
-       }
+       if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+               return -ENOMEM;
 
        *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
        *seg = mlx5_get_send_wqe(qp, *idx);
@@ -3679,7 +3676,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
        *seg += sizeof(**ctrl);
        *size = sizeof(**ctrl) / 16;
 
-       return err;
+       return 0;
 }
 
 static void finish_wqe(struct mlx5_ib_qp *qp,
@@ -3758,7 +3755,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                num_sge = wr->num_sge;
                if (unlikely(num_sge > qp->sq.max_gs)) {
                        mlx5_ib_warn(dev, "\n");
-                       err = -ENOMEM;
+                       err = -EINVAL;
                        *bad_wr = wr;
                        goto out;
                }
index 16740dcb876bd4f93e2c4954d61aa51ba8579343..67fc0b6857e185ca4b4a17631741c7cac8a69922 100644 (file)
@@ -1156,18 +1156,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_srq =
                (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
                OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
-       attr->max_send_sge = ((rsp->max_write_send_sge &
+       attr->max_send_sge = ((rsp->max_recv_send_sge &
                               OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
                              OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
-       attr->max_recv_sge = (rsp->max_write_send_sge &
-                             OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
-           OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+       attr->max_recv_sge = (rsp->max_recv_send_sge &
+                             OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
        attr->max_srq_sge = (rsp->max_srq_rqe_sge &
                              OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
-       attr->max_rdma_sge = (rsp->max_write_send_sge &
-                             OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
-           OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
+       attr->max_rdma_sge = (rsp->max_wr_rd_sge &
+                             OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
        attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
                                OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
index 0efc9662c6d8758df6b269a26acb2c4f51e2b23e..37df4481bb8fe6d403fde89327216668a258825a 100644 (file)
@@ -554,9 +554,9 @@ enum {
        OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK               = 0x18,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT         = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK          = 0xFFFF,
-       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT        = 16,
-       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK         = 0xFFFF <<
-                               OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
+       OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
+       OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK          = 0xFFFF <<
+                               OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
 
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT       = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK        = 0xFFFF,
@@ -612,6 +612,8 @@ enum {
        OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET         = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK           = 0xFFFF <<
                                OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
+       OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT           = 0,
+       OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK            = 0xFFFF,
 };
 
 struct ocrdma_mbx_query_config {
@@ -619,7 +621,7 @@ struct ocrdma_mbx_query_config {
        struct ocrdma_mbx_rsp rsp;
        u32 qp_srq_cq_ird_ord;
        u32 max_pd_ca_ack_delay;
-       u32 max_write_send_sge;
+       u32 max_recv_send_sge;
        u32 max_ird_ord_per_qp;
        u32 max_shared_ird_ord;
        u32 max_mr;
@@ -639,6 +641,8 @@ struct ocrdma_mbx_query_config {
        u32 max_wqes_rqes_per_q;
        u32 max_cq_cqes_per_cq;
        u32 max_srq_rqe_sge;
+       u32 max_wr_rd_sge;
+       u32 ird_pgsz_num_pages;
 };
 
 struct ocrdma_fw_ver_rsp {
index b1a3d91fe8b94fe292fbe09a4c3820a8592e9f6e..0aa854737e74e98b871a2b714977c4d327f7cd17 100644 (file)
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
                                        IB_DEVICE_SYS_IMAGE_GUID |
                                        IB_DEVICE_LOCAL_DMA_LKEY |
                                        IB_DEVICE_MEM_MGT_EXTENSIONS;
-       attr->max_sge = dev->attr.max_send_sge;
-       attr->max_sge_rd = attr->max_sge;
+       attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
+       attr->max_sge_rd = dev->attr.max_rdma_sge;
        attr->max_cq = dev->attr.max_cq;
        attr->max_cqe = dev->attr.max_cqe;
        attr->max_mr = dev->attr.max_mr;
index 5e75b43c596b608adfacfeb48ea955ffa914082d..5bad8e3b40bb34517948e7a18b0497f79d4b7e16 100644 (file)
@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
 DEBUGFS_FILE(ctx_stats)
 
 static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+       __acquires(RCU)
 {
        struct qib_qp_iter *iter;
        loff_t n = *pos;
 
-       rcu_read_lock();
        iter = qib_qp_iter_init(s->private);
+
+       /* stop calls rcu_read_unlock */
+       rcu_read_lock();
+
        if (!iter)
                return NULL;
 
-       while (n--) {
+       do {
                if (qib_qp_iter_next(iter)) {
                        kfree(iter);
                        return NULL;
                }
-       }
+       } while (n--);
 
        return iter;
 }
 
 static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
                                   loff_t *pos)
+       __must_hold(RCU)
 {
        struct qib_qp_iter *iter = iter_ptr;
 
@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 }
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+       __releases(RCU)
 {
        rcu_read_unlock();
 }
index fcdf37913a264d52b5204f771523b37a2b0c4985..c3edc033f7c4ca674380930f958c3b6b60c0cb0f 100644 (file)
@@ -328,26 +328,12 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
 
        pos = *ppos;
 
-       if (pos != 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (count != sizeof(struct qib_flash)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       tmp = kmalloc(count, GFP_KERNEL);
-       if (!tmp) {
-               ret = -ENOMEM;
-               goto bail;
-       }
+       if (pos != 0 || count != sizeof(struct qib_flash))
+               return -EINVAL;
 
-       if (copy_from_user(tmp, buf, count)) {
-               ret = -EFAULT;
-               goto bail_tmp;
-       }
+       tmp = memdup_user(buf, count);
+       if (IS_ERR(tmp))
+               return PTR_ERR(tmp);
 
        dd = private2dd(file);
        if (qib_eeprom_write(dd, pos, tmp, count)) {
@@ -361,8 +347,6 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
 
 bail_tmp:
        kfree(tmp);
-
-bail:
        return ret;
 }
 
index 9cc0aae1d78191735b31eaed33bca1ee21de7d01..f9b8cd2354d1ba6f6b077aea249f00b2f3a8e8bb 100644 (file)
@@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
                return NULL;
 
        iter->dev = dev;
-       if (qib_qp_iter_next(iter)) {
-               kfree(iter);
-               return NULL;
-       }
 
        return iter;
 }
index c229b9f4a52da65d8559d832d930e0c73d7109d9..0a89a955550b29ce327751ce5c2274638b1eccb9 100644 (file)
@@ -664,7 +664,8 @@ static int __init usnic_ib_init(void)
                return err;
        }
 
-       if (pci_register_driver(&usnic_ib_pci_driver)) {
+       err = pci_register_driver(&usnic_ib_pci_driver);
+       if (err) {
                usnic_err("Unable to register with PCI\n");
                goto out_umem_fini;
        }
index 80c4b6b401b83af99d8a1dde1ada784daeb8b6cb..46b64970058ece11bf3c272ca5fb80d3c1cabff6 100644 (file)
@@ -294,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr)
 {
        rvt_deinit_mregion(&mr->mr);
        rvt_free_lkey(&mr->mr);
-       vfree(mr);
+       kfree(mr);
 }
 
 /**
index bdb540f25a888dcc24b24cf96268c6d8f8a3c479..870b4f212fbcfde8e09031897105614532eee244 100644 (file)
@@ -873,7 +873,8 @@ bail_qpn:
        free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
 
 bail_rq_wq:
-       vfree(qp->r_rq.wq);
+       if (!qp->ip)
+               vfree(qp->r_rq.wq);
 
 bail_driver_priv:
        rdi->driver_f.qp_priv_free(rdi, qp);
index 55f0e8f0ca7993f4f00993d7df9ac8525e021628..ddd59270ff6dea6ca4c5f4d032567141dcc78902 100644 (file)
@@ -362,15 +362,34 @@ static int __init rxe_module_init(void)
                return err;
        }
 
-       err = rxe_net_init();
+       err = rxe_net_ipv4_init();
        if (err) {
-               pr_err("rxe: unable to init\n");
+               pr_err("rxe: unable to init ipv4 tunnel\n");
                rxe_cache_exit();
-               return err;
+               goto exit;
+       }
+
+       err = rxe_net_ipv6_init();
+       if (err) {
+               pr_err("rxe: unable to init ipv6 tunnel\n");
+               rxe_cache_exit();
+               goto exit;
        }
+
+       err = register_netdevice_notifier(&rxe_net_notifier);
+       if (err) {
+               pr_err("rxe: Failed to rigister netdev notifier\n");
+               goto exit;
+       }
+
        pr_info("rxe: loaded\n");
 
        return 0;
+
+exit:
+       rxe_release_udp_tunnel(recv_sockets.sk4);
+       rxe_release_udp_tunnel(recv_sockets.sk6);
+       return err;
 }
 
 static void __exit rxe_module_exit(void)
index 36f67de44095701ecbd6e857a7312f78fb5dbc26..1c59ef2c67aaec74c93a2c27323214446219f6f1 100644 (file)
@@ -689,7 +689,14 @@ int rxe_completer(void *arg)
                                        qp->req.need_retry = 1;
                                        rxe_run_task(&qp->req.task, 1);
                                }
+
+                               if (pkt) {
+                                       rxe_drop_ref(pkt->qp);
+                                       kfree_skb(skb);
+                               }
+
                                goto exit;
+
                        } else {
                                wqe->status = IB_WC_RETRY_EXC_ERR;
                                state = COMPST_ERROR;
@@ -716,6 +723,12 @@ int rxe_completer(void *arg)
                case COMPST_ERROR:
                        do_complete(qp, wqe);
                        rxe_qp_error(qp);
+
+                       if (pkt) {
+                               rxe_drop_ref(pkt->qp);
+                               kfree_skb(skb);
+                       }
+
                        goto exit;
                }
        }
index 0b8d2ea8b41df86a4c07e57eff51b5fbca259f7a..eedf2f1cafdfa56f1db4223ebe42aaa74d51f4c9 100644 (file)
@@ -275,9 +275,10 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
        return sock;
 }
 
-static void rxe_release_udp_tunnel(struct socket *sk)
+void rxe_release_udp_tunnel(struct socket *sk)
 {
-       udp_tunnel_sock_release(sk);
+       if (sk)
+               udp_tunnel_sock_release(sk);
 }
 
 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
@@ -658,51 +659,45 @@ out:
        return NOTIFY_OK;
 }
 
-static struct notifier_block rxe_net_notifier = {
+struct notifier_block rxe_net_notifier = {
        .notifier_call = rxe_notify,
 };
 
-int rxe_net_init(void)
+int rxe_net_ipv4_init(void)
 {
-       int err;
-
        spin_lock_init(&dev_list_lock);
 
-       recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
-                       htons(ROCE_V2_UDP_DPORT), true);
-       if (IS_ERR(recv_sockets.sk6)) {
-               recv_sockets.sk6 = NULL;
-               pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
-               return -1;
-       }
-
        recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
-                       htons(ROCE_V2_UDP_DPORT), false);
+                               htons(ROCE_V2_UDP_DPORT), false);
        if (IS_ERR(recv_sockets.sk4)) {
-               rxe_release_udp_tunnel(recv_sockets.sk6);
                recv_sockets.sk4 = NULL;
-               recv_sockets.sk6 = NULL;
                pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
                return -1;
        }
 
-       err = register_netdevice_notifier(&rxe_net_notifier);
-       if (err) {
-               rxe_release_udp_tunnel(recv_sockets.sk6);
-               rxe_release_udp_tunnel(recv_sockets.sk4);
-               pr_err("rxe: Failed to rigister netdev notifier\n");
-       }
-
-       return err;
+       return 0;
 }
 
-void rxe_net_exit(void)
+int rxe_net_ipv6_init(void)
 {
-       if (recv_sockets.sk6)
-               rxe_release_udp_tunnel(recv_sockets.sk6);
+#if IS_ENABLED(CONFIG_IPV6)
 
-       if (recv_sockets.sk4)
-               rxe_release_udp_tunnel(recv_sockets.sk4);
+       spin_lock_init(&dev_list_lock);
 
+       recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+                                               htons(ROCE_V2_UDP_DPORT), true);
+       if (IS_ERR(recv_sockets.sk6)) {
+               recv_sockets.sk6 = NULL;
+               pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+               return -1;
+       }
+#endif
+       return 0;
+}
+
+void rxe_net_exit(void)
+{
+       rxe_release_udp_tunnel(recv_sockets.sk6);
+       rxe_release_udp_tunnel(recv_sockets.sk4);
        unregister_netdevice_notifier(&rxe_net_notifier);
 }
index 7b06f76d16cc6058ab83dc4051ac0a71f4229f67..0daf7f09e5b5857e44506dc7057ab40abcc7c3b8 100644 (file)
@@ -44,10 +44,13 @@ struct rxe_recv_sockets {
 };
 
 extern struct rxe_recv_sockets recv_sockets;
+extern struct notifier_block rxe_net_notifier;
+void rxe_release_udp_tunnel(struct socket *sk);
 
 struct rxe_dev *rxe_net_add(struct net_device *ndev);
 
-int rxe_net_init(void);
+int rxe_net_ipv4_init(void);
+int rxe_net_ipv6_init(void);
 void rxe_net_exit(void);
 
 #endif /* RXE_NET_H */
index 3d464c23e08bbbf0d55f3efbe922b44b12ca1442..144d2f129fcdf7f8a18b1fb8440ef6a597412fda 100644 (file)
@@ -312,7 +312,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
                 * make a copy of the skb to post to the next qp
                 */
                skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
-                               skb_clone(skb, GFP_KERNEL) : NULL;
+                               skb_clone(skb, GFP_ATOMIC) : NULL;
 
                pkt->qp = qp;
                rxe_add_ref(qp);
index 33b2d9d77021716c76bee6f149fa1835e456918a..13a848a518e8855e11d69cbd24e7c908bc89b23f 100644 (file)
@@ -511,24 +511,21 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 }
 
 static void update_wqe_state(struct rxe_qp *qp,
-                            struct rxe_send_wqe *wqe,
-                            struct rxe_pkt_info *pkt,
-                            enum wqe_state *prev_state)
+               struct rxe_send_wqe *wqe,
+               struct rxe_pkt_info *pkt)
 {
-       enum wqe_state prev_state_ = wqe->state;
-
        if (pkt->mask & RXE_END_MASK) {
                if (qp_type(qp) == IB_QPT_RC)
                        wqe->state = wqe_state_pending;
        } else {
                wqe->state = wqe_state_processing;
        }
-
-       *prev_state = prev_state_;
 }
 
-static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-                        struct rxe_pkt_info *pkt, int payload)
+static void update_wqe_psn(struct rxe_qp *qp,
+                          struct rxe_send_wqe *wqe,
+                          struct rxe_pkt_info *pkt,
+                          int payload)
 {
        /* number of packets left to send including current one */
        int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
@@ -546,9 +543,34 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
                qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
        else
                qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+}
 
-       qp->req.opcode = pkt->opcode;
+static void save_state(struct rxe_send_wqe *wqe,
+                      struct rxe_qp *qp,
+                      struct rxe_send_wqe *rollback_wqe,
+                      struct rxe_qp *rollback_qp)
+{
+       rollback_wqe->state     = wqe->state;
+       rollback_wqe->first_psn = wqe->first_psn;
+       rollback_wqe->last_psn  = wqe->last_psn;
+       rollback_qp->req.psn    = qp->req.psn;
+}
 
+static void rollback_state(struct rxe_send_wqe *wqe,
+                          struct rxe_qp *qp,
+                          struct rxe_send_wqe *rollback_wqe,
+                          struct rxe_qp *rollback_qp)
+{
+       wqe->state     = rollback_wqe->state;
+       wqe->first_psn = rollback_wqe->first_psn;
+       wqe->last_psn  = rollback_wqe->last_psn;
+       qp->req.psn    = rollback_qp->req.psn;
+}
+
+static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+                        struct rxe_pkt_info *pkt, int payload)
+{
+       qp->req.opcode = pkt->opcode;
 
        if (pkt->mask & RXE_END_MASK)
                qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
@@ -571,7 +593,8 @@ int rxe_requester(void *arg)
        int mtu;
        int opcode;
        int ret;
-       enum wqe_state prev_state;
+       struct rxe_qp rollback_qp;
+       struct rxe_send_wqe rollback_wqe;
 
 next_wqe:
        if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -688,13 +711,21 @@ next_wqe:
                goto err;
        }
 
-       update_wqe_state(qp, wqe, &pkt, &prev_state);
+       /*
+        * To prevent a race on wqe access between requester and completer,
+        * wqe members state and psn need to be set before calling
+        * rxe_xmit_packet().
+        * Otherwise, completer might initiate an unjustified retry flow.
+        */
+       save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+       update_wqe_state(qp, wqe, &pkt);
+       update_wqe_psn(qp, wqe, &pkt, payload);
        ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
        if (ret) {
                qp->need_req_skb = 1;
                kfree_skb(skb);
 
-               wqe->state = prev_state;
+               rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
 
                if (ret == -EAGAIN) {
                        rxe_run_task(&qp->req.task, 1);
index ebb03b46e2ad71e4ca881f1af4b8552eb8f331f6..3e0f0f2baace2518a92e1c7b8c6b995191730007 100644 (file)
@@ -972,11 +972,13 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
        free_rd_atomic_resource(qp, res);
        rxe_advance_resp_resource(qp);
 
+       memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
+
        res->type = RXE_ATOMIC_MASK;
        res->atomic.skb = skb;
-       res->first_psn = qp->resp.psn;
-       res->last_psn = qp->resp.psn;
-       res->cur_psn = qp->resp.psn;
+       res->first_psn = ack_pkt.psn;
+       res->last_psn  = ack_pkt.psn;
+       res->cur_psn   = ack_pkt.psn;
 
        rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
        if (rc) {
@@ -1116,8 +1118,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
                                rc = RESPST_CLEANUP;
                                goto out;
                        }
-                       bth_set_psn(SKB_TO_PKT(skb_copy),
-                                   qp->resp.psn - 1);
+
                        /* Resend the result. */
                        rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
                                             pkt, skb_copy);
index 4f7d9b48df643c7ef5f69b80f870a0f15c6efe87..9dbfcc0ab577694c71f853f48d784ae5b9c4592a 100644 (file)
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                struct ipoib_ah *address, u32 qpn);
 void ipoib_reap_ah(struct work_struct *work);
 
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
 void ipoib_mark_paths_invalid(struct net_device *dev);
 void ipoib_flush_paths(struct net_device *dev);
 int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
index 951d9abcca8b283e652368c7c9c96f1fa25f3df4..4ad297d3de897789141c87847d26d6fdf91a062e 100644 (file)
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
        }
 }
 
+#define QPN_AND_OPTIONS_OFFSET 4
+
 static void ipoib_cm_tx_start(struct work_struct *work)
 {
        struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
        struct ipoib_neigh *neigh;
        struct ipoib_cm_tx *p;
        unsigned long flags;
+       struct ipoib_path *path;
        int ret;
 
        struct ib_sa_path_rec pathrec;
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
                p = list_entry(priv->cm.start_list.next, typeof(*p), list);
                list_del_init(&p->list);
                neigh = p->neigh;
+
                qpn = IPOIB_QPN(neigh->daddr);
+               /*
+                * As long as the search is with these 2 locks,
+                * path existence indicates its validity.
+                */
+               path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+               if (!path) {
+                       pr_info("%s ignore not valid path %pI6\n",
+                               __func__,
+                               neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+                       goto free_neigh;
+               }
                memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
 
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
                spin_lock_irqsave(&priv->lock, flags);
 
                if (ret) {
+free_neigh:
                        neigh = p->neigh;
                        if (neigh) {
                                neigh->cm = NULL;
index dc6d241b9406e9661394c7e985fda4b0eacbfa6b..be11d5d5b8c1d9ca84ab884bac32000e018c4c60 100644 (file)
@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
        }
 
        if (level == IPOIB_FLUSH_LIGHT) {
+               int oper_up;
                ipoib_mark_paths_invalid(dev);
+               /* Set IPoIB operation as down to prevent races between:
+                * the flush flow which leaves MCG and on the fly joins
+                * which can happen during that time. mcast restart task
+                * should deal with join requests we missed.
+                */
+               oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
                ipoib_mcast_dev_flush(dev);
+               if (oper_up)
+                       set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
                ipoib_flush_ah(dev);
        }
 
index 74bcaa0642261e672b6747e731d9c9ca56befd07..cc1c1b062ea58d530756586ee1ec00144c79b38e 100644 (file)
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
        return -EINVAL;
 }
 
-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct rb_node *n = priv->path_tree.rb_node;
index ba6be060a476b19d608c7797bed4526b7df04332..cae9bbcc27e7c81f2b3dfedc3e55987e428f43ed 100644 (file)
@@ -403,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn)
        INIT_LIST_HEAD(&isert_conn->node);
        init_completion(&isert_conn->login_comp);
        init_completion(&isert_conn->login_req_comp);
+       init_waitqueue_head(&isert_conn->rem_wait);
        kref_init(&isert_conn->kref);
        mutex_init(&isert_conn->mutex);
        INIT_WORK(&isert_conn->release_work, isert_release_work);
@@ -448,7 +449,7 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
 
        isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
        if (!isert_conn->login_rsp_buf) {
-               isert_err("Unable to allocate isert_conn->login_rspbuf\n");
+               ret = -ENOMEM;
                goto out_unmap_login_req_buf;
        }
 
@@ -578,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn)
        BUG_ON(!device);
 
        isert_free_rx_descriptors(isert_conn);
-       if (isert_conn->cm_id)
+       if (isert_conn->cm_id &&
+           !isert_conn->dev_removed)
                rdma_destroy_id(isert_conn->cm_id);
 
        if (isert_conn->qp) {
@@ -593,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn)
 
        isert_device_put(device);
 
-       kfree(isert_conn);
+       if (isert_conn->dev_removed)
+               wake_up_interruptible(&isert_conn->rem_wait);
+       else
+               kfree(isert_conn);
 }
 
 static void
@@ -753,6 +758,7 @@ static int
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
        struct isert_np *isert_np = cma_id->context;
+       struct isert_conn *isert_conn;
        int ret = 0;
 
        isert_info("%s (%d): status %d id %p np %p\n",
@@ -773,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                break;
        case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
        case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
-       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
                ret = isert_disconnected_handler(cma_id, event->event);
                break;
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               isert_conn = cma_id->qp->qp_context;
+               isert_conn->dev_removed = true;
+               isert_disconnected_handler(cma_id, event->event);
+               wait_event_interruptible(isert_conn->rem_wait,
+                                        isert_conn->state == ISER_CONN_DOWN);
+               kfree(isert_conn);
+               /*
+                * return non-zero from the callback to destroy
+                * the rdma cm id
+                */
+               return 1;
        case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
        case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
        case RDMA_CM_EVENT_CONNECT_ERROR:
index fc791efe3a108178f1949f386548ed84ae81ebc8..c02ada57d7f5c4fa5b765a6401ac8e7bbd6096bb 100644 (file)
@@ -158,6 +158,8 @@ struct isert_conn {
        struct work_struct      release_work;
        bool                    logout_posted;
        bool                    snd_w_inv;
+       wait_queue_head_t       rem_wait;
+       bool                    dev_removed;
 };
 
 #define ISERT_MAX_CQ 64
index dfa23b075a88469b73c3f199f73c8b8ed571187f..883bbfe08e0efa64d87ab2652b0e773516d54450 100644 (file)
@@ -522,6 +522,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
        if (ret)
                goto err_query_port;
 
+       snprintf(sport->port_guid, sizeof(sport->port_guid),
+               "0x%016llx%016llx",
+               be64_to_cpu(sport->gid.global.subnet_prefix),
+               be64_to_cpu(sport->gid.global.interface_id));
+
        if (!sport->mad_agent) {
                memset(&reg_req, 0, sizeof(reg_req));
                reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -2548,10 +2553,6 @@ static void srpt_add_one(struct ib_device *device)
                               sdev->device->name, i);
                        goto err_ring;
                }
-               snprintf(sport->port_guid, sizeof(sport->port_guid),
-                       "0x%016llx%016llx",
-                       be64_to_cpu(sport->gid.global.subnet_prefix),
-                       be64_to_cpu(sport->gid.global.interface_id));
        }
 
        spin_lock(&srpt_dev_lock);
index 7d61439be5f27aadb3480cee68a17b21ef17a5db..0c07e1023a4693ac705acea1b26068c65c54b39f 100644 (file)
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
        /* Reset the KBC controller to clear all previous status.*/
        reset_control_assert(kbc->rst);
        udelay(100);
-       reset_control_assert(kbc->rst);
+       reset_control_deassert(kbc->rst);
        udelay(100);
 
        tegra_kbc_config_pins(kbc);
index faa295ec4f313a0f629fa3f2fa9ce5267c929b10..c83bce89028b2a2d270ffa3aed232788b8b491b6 100644 (file)
@@ -553,7 +553,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
                goto free_struct_buff;
 
        reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
-       map_offset = 0;
        for (i = 0; i < rdesc->num_registers; i++) {
                struct rmi_register_desc_item *item = &rdesc->registers[i];
                int reg_size = struct_buf[offset];
@@ -576,6 +575,8 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
                item->reg = reg;
                item->reg_size = reg_size;
 
+               map_offset = 0;
+
                do {
                        for (b = 0; b < 7; b++) {
                                if (struct_buf[offset] & (0x1 << b))
index b4d34086e73f5aac0654e607028f13f59ef771be..405252a884dd41e233da4399ab109f213becb85a 100644 (file)
@@ -1305,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx)
        serio->write            = i8042_aux_write;
        serio->start            = i8042_start;
        serio->stop             = i8042_stop;
+       serio->ps2_cmd_mutex    = &i8042_mutex;
        serio->port_data        = port;
        serio->dev.parent       = &i8042_platform_device->dev;
        if (idx < 0) {
index a61b2153ab8c20fb685d4af7595dba125df6023f..1ce3ecbe37f89153e7b863fab92b7d8f05f6ef14 100644 (file)
@@ -1473,7 +1473,6 @@ static int ads7846_remove(struct spi_device *spi)
 
        ads784x_hwmon_unregister(spi, ts);
 
-       regulator_disable(ts->reg);
        regulator_put(ts->reg);
 
        if (!ts->get_pendown_state) {
index 7379fe153cf9946397203aadc2bd9767081589f9..f502c8488be86361592187acdffdecb164c4dfbd 100644 (file)
@@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client)
                data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
        }
 
-       error = device_property_read_string(dev, "touchscreen-fw-name", &str);
+       error = device_property_read_string(dev, "firmware-name", &str);
        if (!error)
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s", str);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s", str);
        else
                dev_dbg(dev, "Firmware file name read error. Using default.");
 }
@@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
                if (!acpi_id)
                        return -ENODEV;
 
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
-                       acpi_id->id);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s.fw", acpi_id->id);
 
                for (i = 0; i < strlen(data->fw_name); i++)
                        data->fw_name[i] = tolower(data->fw_name[i]);
        } else {
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
-                       id->name);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s.fw", id->name);
        }
 
        return 0;
@@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
 static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
                                         const struct i2c_device_id *id)
 {
-       snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name);
+       snprintf(data->fw_name, sizeof(data->fw_name),
+                "silead/%s.fw", id->name);
        return 0;
 }
 #endif
@@ -464,7 +466,7 @@ static int silead_ts_probe(struct i2c_client *client,
                return -ENODEV;
 
        /* Power GPIO pin */
-       data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
+       data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
        if (IS_ERR(data->gpio_power)) {
                if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
                        dev_err(dev, "Shutdown GPIO request failed\n");
index ce801170d5f2342f1b2c750750bea720d9fc0ff2..641e887613193c76e7afb48a81c82b1fd732cc2d 100644 (file)
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
         * We may have concurrent producers, so we need to be careful
         * not to touch any of the shadow cmdq state.
         */
-       queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
+       queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
        dev_err(smmu->dev, "skipping command in error state:\n");
        for (i = 0; i < ARRAY_SIZE(cmd); ++i)
                dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
                return;
        }
 
-       queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
+       queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
 }
 
 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
                case STRTAB_STE_0_CFG_S2_TRANS:
                        ste_live = true;
                        break;
+               case STRTAB_STE_0_CFG_ABORT:
+                       if (disable_bypass)
+                               break;
                default:
                        BUG(); /* STE corruption */
                }
index 4f49fe29f2029ad0f1fdc4f7015ed0fa13e7c2bf..2db74ebc324060683aa39fc51a94b49c60b5daab 100644 (file)
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 {
-       int flags, ret;
-       u32 fsr, fsynr, resume;
+       u32 fsr, fsynr;
        unsigned long iova;
        struct iommu_domain *domain = dev;
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
        if (!(fsr & FSR_FAULT))
                return IRQ_NONE;
 
-       if (fsr & FSR_IGN)
-               dev_err_ratelimited(smmu->dev,
-                                   "Unexpected context fault (fsr 0x%x)\n",
-                                   fsr);
-
        fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
-       flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
-
        iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
-       if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
-               ret = IRQ_HANDLED;
-               resume = RESUME_RETRY;
-       } else {
-               dev_err_ratelimited(smmu->dev,
-                   "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
-                   iova, fsynr, cfg->cbndx);
-               ret = IRQ_NONE;
-               resume = RESUME_TERMINATE;
-       }
-
-       /* Clear the faulting FSR */
-       writel(fsr, cb_base + ARM_SMMU_CB_FSR);
 
-       /* Retry or terminate any stalled transactions */
-       if (fsr & FSR_SS)
-               writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+       dev_err_ratelimited(smmu->dev,
+       "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+                           fsr, iova, fsynr, cfg->cbndx);
 
-       return ret;
+       writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+       return IRQ_HANDLED;
 }
 
 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
        }
 
        /* SCTLR */
-       reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+       reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
        if (stage1)
                reg |= SCTLR_S1_ASIDPNE;
 #ifdef __BIG_ENDIAN
index 08a1e2f3690f148a38815239462a447321f25bb6..00c8a08d56e722349c64eff91b9ae9bb5a59a0bf 100644 (file)
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
        if (!iovad)
                return;
 
-       put_iova_domain(iovad);
+       if (iovad->granule)
+               put_iova_domain(iovad);
        kfree(iovad);
        domain->iova_cookie = NULL;
 }
@@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
        }
 }
 
-static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
+static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
                dma_addr_t dma_limit)
 {
+       struct iova_domain *iovad = domain->iova_cookie;
        unsigned long shift = iova_shift(iovad);
        unsigned long length = iova_align(iovad, size) >> shift;
 
+       if (domain->geometry.force_aperture)
+               dma_limit = min(dma_limit, domain->geometry.aperture_end);
        /*
         * Enforce size-alignment to be safe - there could perhaps be an
         * attribute to control this per-device, or at least per-domain...
@@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
        if (!pages)
                return NULL;
 
-       iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
+       iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
        if (!iova)
                goto out_free_pages;
 
@@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
        phys_addr_t phys = page_to_phys(page) + offset;
        size_t iova_off = iova_offset(iovad, phys);
        size_t len = iova_align(iovad, size + iova_off);
-       struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
+       struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
 
        if (!iova)
                return DMA_ERROR_CODE;
@@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
                prev = s;
        }
 
-       iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
+       iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
        if (!iova)
                goto out_restore_sg;
 
index 8c6139986d7d38f8be2ff3984978d813db56af24..def8ca1c982d5869a358ca69467a6f13763c46fb 100644 (file)
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
        int prot = IOMMU_READ;
        arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
 
-       if (attr & ARM_V7S_PTE_AP_RDONLY)
+       if (!(attr & ARM_V7S_PTE_AP_RDONLY))
                prot |= IOMMU_WRITE;
        if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
                prot |= IOMMU_MMIO;
        else if (pte & ARM_V7S_ATTR_C)
                prot |= IOMMU_CACHE;
+       if (pte & ARM_V7S_ATTR_XN(lvl))
+               prot |= IOMMU_NOEXEC;
 
        return prot;
 }
index 9ed0a8462ccf2983eaa0107e8cf8a4a81dd737b6..3dab13b4a211297fac3914e492ab709b27d25921 100644 (file)
@@ -55,19 +55,19 @@ struct mtk_iommu_data {
        bool                            enable_4GB;
 };
 
-static int compare_of(struct device *dev, void *data)
+static inline int compare_of(struct device *dev, void *data)
 {
        return dev->of_node == data;
 }
 
-static int mtk_iommu_bind(struct device *dev)
+static inline int mtk_iommu_bind(struct device *dev)
 {
        struct mtk_iommu_data *data = dev_get_drvdata(dev);
 
        return component_bind_all(dev, &data->smi_imu);
 }
 
-static void mtk_iommu_unbind(struct device *dev)
+static inline void mtk_iommu_unbind(struct device *dev)
 {
        struct mtk_iommu_data *data = dev_get_drvdata(dev);
 
index 112e17c2768be06587d340865ade9fe202adc7da..37f952dd9fc94bdc5faa6c2721822b8780dd46e1 100644 (file)
@@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
 {
        struct irq_domain_chip_generic *dgc = d->gc;
        struct irq_chip_generic *gc;
+       unsigned long flags;
        unsigned smr;
        int idx;
        int ret;
@@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
 
        gc = dgc->gc[idx];
 
-       irq_gc_lock(gc);
+       irq_gc_lock_irqsave(gc, flags);
        smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
        aic_common_set_priority(intspec[2], &smr);
        irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
-       irq_gc_unlock(gc);
+       irq_gc_unlock_irqrestore(gc, flags);
 
        return ret;
 }
index 4f0d068e1abec2bc068b5a2185bef0fd6df729ff..2a624d87a0356a963e2fa1d888cdff8e0668ae41 100644 (file)
@@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
                                 unsigned int *out_type)
 {
        struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
+       unsigned long flags;
        unsigned smr;
        int ret;
 
@@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
        if (ret)
                return ret;
 
-       irq_gc_lock(bgc);
+       irq_gc_lock_irqsave(bgc, flags);
        irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
        smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
        aic_common_set_priority(intspec[2], &smr);
        irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
-       irq_gc_unlock(bgc);
+       irq_gc_unlock_irqrestore(bgc, flags);
 
        return ret;
 }
index 7ceaba81efb44964a286536da93165157832674a..36b9c28a5c917bcebed23bd896cc102ea2a754ed 100644 (file)
@@ -1545,7 +1545,12 @@ static int its_force_quiescent(void __iomem *base)
        u32 val;
 
        val = readl_relaxed(base + GITS_CTLR);
-       if (val & GITS_CTLR_QUIESCENT)
+       /*
+        * GIC architecture specification requires the ITS to be both
+        * disabled and quiescent for writes to GITS_BASER<n> or
+        * GITS_CBASER to not have UNPREDICTABLE results.
+        */
+       if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
                return 0;
 
        /* Disable the generation of all interrupts to this ITS */
index 6fc56c3466b0039d6e3a51448b39a3c97ef53751..da6c0ba61d4f4c0bf73f323e735ab1386ed7f90e 100644 (file)
@@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu)
 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
                                   unsigned long cluster_id)
 {
-       int cpu = *base_cpu;
+       int next_cpu, cpu = *base_cpu;
        unsigned long mpidr = cpu_logical_map(cpu);
        u16 tlist = 0;
 
@@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
 
                tlist |= 1 << (mpidr & 0xf);
 
-               cpu = cpumask_next(cpu, mask);
-               if (cpu >= nr_cpu_ids)
+               next_cpu = cpumask_next(cpu, mask);
+               if (next_cpu >= nr_cpu_ids)
                        goto out;
+               cpu = next_cpu;
 
                mpidr = cpu_logical_map(cpu);
 
@@ -667,13 +668,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 #endif
 
 #ifdef CONFIG_CPU_PM
+/* Check whether it's single security state view */
+static bool gic_dist_security_disabled(void)
+{
+       return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
 static int gic_cpu_pm_notifier(struct notifier_block *self,
                               unsigned long cmd, void *v)
 {
        if (cmd == CPU_PM_EXIT) {
-               gic_enable_redist(true);
+               if (gic_dist_security_disabled())
+                       gic_enable_redist(true);
                gic_cpu_sys_reg_init();
-       } else if (cmd == CPU_PM_ENTER) {
+       } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
                gic_write_grpen1(0);
                gic_enable_redist(false);
        }
index c2cab572c5111c392c076d62232da4828cfa00a1..390fac59c6bca62fbb473e1fc7b091114b5371bf 100644 (file)
@@ -769,6 +769,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
        int cpu;
        unsigned long flags, map = 0;
 
+       if (unlikely(nr_cpu_ids == 1)) {
+               /* Only one CPU? let's do a self-IPI... */
+               writel_relaxed(2 << 24 | irq,
+                              gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+               return;
+       }
+
        raw_spin_lock_irqsave(&irq_controller_lock, flags);
 
        /* Convert our logical CPU mask into a physical one. */
index c5f33c3bd22851a2950bd7554e5e413110d0b019..6185696405d5b64181a28d671b6797743768b9f1 100644 (file)
@@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
        if (!gic_local_irq_is_routable(intr))
                return -EPERM;
 
-       /*
-        * HACK: These are all really percpu interrupts, but the rest
-        * of the MIPS kernel code does not use the percpu IRQ API for
-        * the CP0 timer and performance counter interrupts.
-        */
-       switch (intr) {
-       case GIC_LOCAL_INT_TIMER:
-       case GIC_LOCAL_INT_PERFCTR:
-       case GIC_LOCAL_INT_FDC:
-               irq_set_chip_and_handler(virq,
-                                        &gic_all_vpes_local_irq_controller,
-                                        handle_percpu_irq);
-               break;
-       default:
-               irq_set_chip_and_handler(virq,
-                                        &gic_local_irq_controller,
-                                        handle_percpu_devid_irq);
-               irq_set_percpu_devid(virq);
-               break;
-       }
-
        spin_lock_irqsave(&gic_lock, flags);
        for (i = 0; i < gic_vpes; i++) {
                u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
@@ -713,9 +692,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
        unsigned long flags;
        int i;
 
-       irq_set_chip_and_handler(virq, &gic_level_irq_controller,
-                                handle_level_irq);
-
        spin_lock_irqsave(&gic_lock, flags);
        gic_map_to_pin(intr, gic_cpu_pin);
        gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
@@ -727,12 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
        return 0;
 }
 
-static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
-                             irq_hw_number_t hw)
+static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
+                             unsigned int hwirq)
 {
-       if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
-               return gic_local_irq_domain_map(d, virq, hw);
-       return gic_shared_irq_domain_map(d, virq, hw, 0);
+       struct irq_chip *chip;
+       int err;
+
+       if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
+               err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                   &gic_level_irq_controller,
+                                                   NULL);
+       } else {
+               switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
+               case GIC_LOCAL_INT_TIMER:
+               case GIC_LOCAL_INT_PERFCTR:
+               case GIC_LOCAL_INT_FDC:
+                       /*
+                        * HACK: These are all really percpu interrupts, but
+                        * the rest of the MIPS kernel code does not use the
+                        * percpu IRQ API for them.
+                        */
+                       chip = &gic_all_vpes_local_irq_controller;
+                       irq_set_handler(virq, handle_percpu_irq);
+                       break;
+
+               default:
+                       chip = &gic_local_irq_controller;
+                       irq_set_handler(virq, handle_percpu_devid_irq);
+                       irq_set_percpu_devid(virq);
+                       break;
+               }
+
+               err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                   chip, NULL);
+       }
+
+       return err;
 }
 
 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -743,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
        int cpu, ret, i;
 
        if (spec->type == GIC_DEVICE) {
-               /* verify that it doesn't conflict with an IPI irq */
-               if (test_bit(spec->hwirq, ipi_resrv))
+               /* verify that shared irqs don't conflict with an IPI irq */
+               if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
+                   test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
                        return -EBUSY;
 
-               hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
-
-               return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
-                                                    &gic_level_irq_controller,
-                                                    NULL);
+               return gic_setup_dev_chip(d, virq, spec->hwirq);
        } else {
                base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
                if (base_hwirq == gic_shared_intrs) {
@@ -771,11 +774,13 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
                        hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
 
                        ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
-                                                           &gic_edge_irq_controller,
+                                                           &gic_level_irq_controller,
                                                            NULL);
                        if (ret)
                                goto error;
 
+                       irq_set_handler(virq + i, handle_level_irq);
+
                        ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
                        if (ret)
                                goto error;
@@ -818,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
 }
 
 static const struct irq_domain_ops gic_irq_domain_ops = {
-       .map = gic_irq_domain_map,
        .alloc = gic_irq_domain_alloc,
        .free = gic_irq_domain_free,
        .match = gic_irq_domain_match,
@@ -849,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
        struct irq_fwspec *fwspec = arg;
        struct gic_irq_spec spec = {
                .type = GIC_DEVICE,
-               .hwirq = fwspec->param[1],
        };
        int i, ret;
-       bool is_shared = fwspec->param[0] == GIC_SHARED;
 
-       if (is_shared) {
-               ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; i < nr_irqs; i++) {
-               irq_hw_number_t hwirq;
+       if (fwspec->param[0] == GIC_SHARED)
+               spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
+       else
+               spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
 
-               if (is_shared)
-                       hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
-               else
-                       hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
+       ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+       if (ret)
+               return ret;
 
-               ret = irq_domain_set_hwirq_and_chip(d, virq + i,
-                                                   hwirq,
-                                                   &gic_level_irq_controller,
-                                                   NULL);
+       for (i = 0; i < nr_irqs; i++) {
+               ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
                if (ret)
                        goto error;
        }
@@ -890,10 +885,20 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
        return;
 }
 
+static void gic_dev_domain_activate(struct irq_domain *domain,
+                                   struct irq_data *d)
+{
+       if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
+               gic_local_irq_domain_map(domain, d->irq, d->hwirq);
+       else
+               gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
+}
+
 static struct irq_domain_ops gic_dev_domain_ops = {
        .xlate = gic_dev_domain_xlate,
        .alloc = gic_dev_domain_alloc,
        .free = gic_dev_domain_free,
+       .activate = gic_dev_domain_activate,
 };
 
 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
index 978eda8d6678bce573b27bfa99dee94103708ce8..8a3ba565106fd22ecdb4dd655eb1fbaa697adfe1 100644 (file)
@@ -73,7 +73,6 @@ MODULE_DEVICE_TABLE(i2c, ams_id);
 static struct i2c_driver ams_i2c_driver = {
        .driver = {
                .name   = "ams",
-               .owner  = THIS_MODULE,
        },
        .probe          = ams_i2c_probe,
        .remove         = ams_i2c_remove,
index 3024685e4cca553d16ad4cd893925343f0ca5ba1..96d16fca68b2469f5a73c57369deaac3ed04e8ec 100644 (file)
@@ -668,7 +668,6 @@ static struct platform_driver wf_pm112_driver = {
        .remove = wf_pm112_remove,
        .driver = {
                .name = "windfarm",
-               .owner  = THIS_MODULE,
        },
 };
 
index 2f506b9d5a52bbc3ffa9cdf543b722641115e80d..e88cfb36a74d139b035574a9b078e16a13f7d35b 100644 (file)
@@ -789,7 +789,6 @@ static struct platform_driver wf_pm72_driver = {
        .remove = wf_pm72_remove,
        .driver = {
                .name = "windfarm",
-               .owner  = THIS_MODULE,
        },
 };
 
index 82fc86a90c1a436d94ab8500334b62f4cce47dbc..bdfcb8a8bfbb45004090ee9672c897cf5cf02db3 100644 (file)
@@ -682,7 +682,6 @@ static struct platform_driver wf_rm31_driver = {
        .remove = wf_rm31_remove,
        .driver = {
                .name = "windfarm",
-               .owner  = THIS_MODULE,
        },
 };
 
index 97c372908e78d23be7f0feed7c99c074f1e8ca34..7817d40d81e74ad282b1760b1dc50d090d10670d 100644 (file)
@@ -127,6 +127,7 @@ config XGENE_SLIMPRO_MBOX
 config BCM_PDC_MBOX
        tristate "Broadcom PDC Mailbox"
        depends on ARM64 || COMPILE_TEST
+       depends on HAS_DMA
        default ARCH_BCM_IPROC
        help
          Mailbox implementation for the Broadcom PDC ring manager,
index cbe0c1ee4ba9cca5d442cc9e181291e0a42909ce..c19dd820ea9b4baafb1b2d15ebb5031f464af9d0 100644 (file)
@@ -469,7 +469,7 @@ static const struct file_operations pdc_debugfs_stats = {
  * this directory for a SPU.
  * @pdcs: PDC state structure
  */
-void pdc_setup_debugfs(struct pdc_state *pdcs)
+static void pdc_setup_debugfs(struct pdc_state *pdcs)
 {
        char spu_stats_name[16];
 
@@ -485,7 +485,7 @@ void pdc_setup_debugfs(struct pdc_state *pdcs)
                                                  &pdc_debugfs_stats);
 }
 
-void pdc_free_debugfs(void)
+static void pdc_free_debugfs(void)
 {
        if (debugfs_dir && simple_empty(debugfs_dir)) {
                debugfs_remove_recursive(debugfs_dir);
@@ -1191,10 +1191,11 @@ static void pdc_shutdown(struct mbox_chan *chan)
 {
        struct pdc_state *pdcs = chan->con_priv;
 
-       if (pdcs)
-               dev_dbg(&pdcs->pdev->dev,
-                       "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
+       if (!pdcs)
+               return;
 
+       dev_dbg(&pdcs->pdev->dev,
+               "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
        pdc_ring_free(pdcs);
 }
 
index 95a4ca6ce6fffa34da0551199a5acde29193da46..849ad441cd76ba6b04ba2fa2aaac51471acdfd67 100644 (file)
@@ -760,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
        if (!d->nr_stripes ||
            d->nr_stripes > INT_MAX ||
            d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
-               pr_err("nr_stripes too large");
+               pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
+                       (unsigned)d->nr_stripes);
                return -ENOMEM;
        }
 
@@ -1820,7 +1821,7 @@ static int cache_alloc(struct cache *ca)
        free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
        if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
-           !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+           !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
            !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
            !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
            !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
@@ -1844,7 +1845,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
                                struct block_device *bdev, struct cache *ca)
 {
        char name[BDEVNAME_SIZE];
-       const char *err = NULL;
+       const char *err = NULL; /* must be set for any error case */
        int ret = 0;
 
        memcpy(&ca->sb, sb, sizeof(struct cache_sb));
@@ -1861,8 +1862,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
                ca->discard = CACHE_DISCARD(&ca->sb);
 
        ret = cache_alloc(ca);
-       if (ret != 0)
+       if (ret != 0) {
+               if (ret == -ENOMEM)
+                       err = "cache_alloc(): -ENOMEM";
+               else
+                       err = "cache_alloc(): unknown error";
                goto err;
+       }
 
        if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
                err = "error calling kobject_add";
index 6fff794e0c723c0d8e11d1f1899ba109c0bcef29..13041ee37ad6b504a4e1c1182a10cce9d77d0f9d 100644 (file)
@@ -2183,19 +2183,29 @@ location_show(struct mddev *mddev, char *page)
 static ssize_t
 location_store(struct mddev *mddev, const char *buf, size_t len)
 {
+       int rv;
 
+       rv = mddev_lock(mddev);
+       if (rv)
+               return rv;
        if (mddev->pers) {
-               if (!mddev->pers->quiesce)
-                       return -EBUSY;
-               if (mddev->recovery || mddev->sync_thread)
-                       return -EBUSY;
+               if (!mddev->pers->quiesce) {
+                       rv = -EBUSY;
+                       goto out;
+               }
+               if (mddev->recovery || mddev->sync_thread) {
+                       rv = -EBUSY;
+                       goto out;
+               }
        }
 
        if (mddev->bitmap || mddev->bitmap_info.file ||
            mddev->bitmap_info.offset) {
                /* bitmap already configured.  Only option is to clear it */
-               if (strncmp(buf, "none", 4) != 0)
-                       return -EBUSY;
+               if (strncmp(buf, "none", 4) != 0) {
+                       rv = -EBUSY;
+                       goto out;
+               }
                if (mddev->pers) {
                        mddev->pers->quiesce(mddev, 1);
                        bitmap_destroy(mddev);
@@ -2214,21 +2224,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                        /* nothing to be done */;
                else if (strncmp(buf, "file:", 5) == 0) {
                        /* Not supported yet */
-                       return -EINVAL;
+                       rv = -EINVAL;
+                       goto out;
                } else {
-                       int rv;
                        if (buf[0] == '+')
                                rv = kstrtoll(buf+1, 10, &offset);
                        else
                                rv = kstrtoll(buf, 10, &offset);
                        if (rv)
-                               return rv;
-                       if (offset == 0)
-                               return -EINVAL;
+                               goto out;
+                       if (offset == 0) {
+                               rv = -EINVAL;
+                               goto out;
+                       }
                        if (mddev->bitmap_info.external == 0 &&
                            mddev->major_version == 0 &&
-                           offset != mddev->bitmap_info.default_offset)
-                               return -EINVAL;
+                           offset != mddev->bitmap_info.default_offset) {
+                               rv = -EINVAL;
+                               goto out;
+                       }
                        mddev->bitmap_info.offset = offset;
                        if (mddev->pers) {
                                struct bitmap *bitmap;
@@ -2245,7 +2259,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                                mddev->pers->quiesce(mddev, 0);
                                if (rv) {
                                        bitmap_destroy(mddev);
-                                       return rv;
+                                       goto out;
                                }
                        }
                }
@@ -2257,6 +2271,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
                md_wakeup_thread(mddev->thread);
        }
+       rv = 0;
+out:
+       mddev_unlock(mddev);
+       if (rv)
+               return rv;
        return len;
 }
 
index 6571c81465e1867772d042316a42042afb2a702c..8625040bae92fee2da914feeb544c48f2cdce595 100644 (file)
@@ -1879,7 +1879,7 @@ static int __init dm_bufio_init(void)
        __cache_size_refresh();
        mutex_unlock(&dm_bufio_clients_lock);
 
-       dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
+       dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
        if (!dm_bufio_wq)
                return -ENOMEM;
 
index 4e9784b4e0ac655659a521d7427658e44ad6bf7d..874295757caa443a16c6d66723148df91e7c459e 100644 (file)
@@ -181,7 +181,7 @@ struct crypt_config {
        u8 key[0];
 };
 
-#define MIN_IOS        16
+#define MIN_IOS        64
 
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -1453,7 +1453,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
        unsigned i;
        int err;
 
-       cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
+       cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
                           GFP_KERNEL);
        if (!cc->tfms)
                return -ENOMEM;
@@ -1924,6 +1924,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
+       /*
+        * Check if bio is too large, split as needed.
+        */
+       if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
+           bio_data_dir(bio) == WRITE)
+               dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
+
        io = dm_per_bio_data(bio, cc->per_bio_data_size);
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
        io->ctx.req = (struct skcipher_request *)(io + 1);
index 97e446d54a151360fa1095311457ac149f426f38..6a2e8dd44a1b83b8fb45a4532458db3fb58db526 100644 (file)
@@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
                pb->bio_submitted = true;
 
                /*
-                * Map reads as normal only if corrupt_bio_byte set.
+                * Error reads if neither corrupt_bio_byte or drop_writes are set.
+                * Otherwise, flakey_end_io() will decide if the reads should be modified.
                 */
                if (bio_data_dir(bio) == READ) {
-                       /* If flags were specified, only corrupt those that match. */
-                       if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
-                           all_corrupt_bio_flags_match(bio, fc))
-                               goto map_bio;
-                       else
+                       if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
                                return -EIO;
+                       goto map_bio;
                }
 
                /*
@@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
        struct flakey_c *fc = ti->private;
        struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
 
-       /*
-        * Corrupt successful READs while in down state.
-        */
        if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
-               if (fc->corrupt_bio_byte)
+               if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+                   all_corrupt_bio_flags_match(bio, fc)) {
+                       /*
+                        * Corrupt successful matching READs while in down state.
+                        */
                        corrupt_bio_data(bio, fc);
-               else
+
+               } else if (!test_bit(DROP_WRITES, &fc->flags)) {
+                       /*
+                        * Error read during the down_interval if drop_writes
+                        * wasn't configured.
+                        */
                        return -EIO;
+               }
        }
 
        return error;
index 4ab68033f9d10e1d54e92fefe2bf2a916ab089fc..49e4d8d4558fb29bc170ccdf9ce07fe56a16a127 100644 (file)
@@ -259,12 +259,12 @@ static int log_one_block(struct log_writes_c *lc,
                goto out;
        sector++;
 
-       bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
+       atomic_inc(&lc->io_blocks);
+       bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
        if (!bio) {
                DMERR("Couldn't alloc log bio");
                goto error;
        }
-       atomic_inc(&lc->io_blocks);
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = lc->logdev->bdev;
@@ -282,7 +282,7 @@ static int log_one_block(struct log_writes_c *lc,
                if (ret != block->vecs[i].bv_len) {
                        atomic_inc(&lc->io_blocks);
                        submit_bio(bio);
-                       bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
+                       bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
                        if (!bio) {
                                DMERR("Couldn't alloc log bio");
                                goto error;
@@ -459,9 +459,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       ret = -EINVAL;
        lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
-       if (!lc->log_kthread) {
+       if (IS_ERR(lc->log_kthread)) {
+               ret = PTR_ERR(lc->log_kthread);
                ti->error = "Couldn't alloc kthread";
                dm_put_device(ti, lc->dev);
                dm_put_device(ti, lc->logdev);
index 4ca2d1df5b44c0811555d35651d59d62513a9584..07fc1ad42ec57c4c835c43990187410036e5e5bf 100644 (file)
@@ -291,9 +291,10 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
        core->nr_regions = le64_to_cpu(disk->nr_regions);
 }
 
-static int rw_header(struct log_c *lc, int rw)
+static int rw_header(struct log_c *lc, int op)
 {
-       lc->io_req.bi_op = rw;
+       lc->io_req.bi_op = op;
+       lc->io_req.bi_op_flags = 0;
 
        return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
 }
@@ -316,7 +317,7 @@ static int read_header(struct log_c *log)
 {
        int r;
 
-       r = rw_header(log, READ);
+       r = rw_header(log, REQ_OP_READ);
        if (r)
                return r;
 
@@ -630,7 +631,7 @@ static int disk_resume(struct dm_dirty_log *log)
        header_to_disk(&lc->header, lc->disk_header);
 
        /* write the new header */
-       r = rw_header(lc, WRITE);
+       r = rw_header(lc, REQ_OP_WRITE);
        if (!r) {
                r = flush_header(lc);
                if (r)
@@ -698,7 +699,7 @@ static int disk_flush(struct dm_dirty_log *log)
                        log_clear_bit(lc, lc->clean_bits, i);
        }
 
-       r = rw_header(lc, WRITE);
+       r = rw_header(lc, REQ_OP_WRITE);
        if (r)
                fail_log_device(lc);
        else {
index 1b9795d75ef898cd172bde67d1323ee8736606c3..8abde6b8cedc4540dac80b73256317fb671dbd69 100644 (file)
@@ -191,7 +191,6 @@ struct raid_dev {
 #define RT_FLAG_RS_BITMAP_LOADED       2
 #define RT_FLAG_UPDATE_SBS             3
 #define RT_FLAG_RESHAPE_RS             4
-#define RT_FLAG_KEEP_RS_FROZEN         5
 
 /* Array elements of 64 bit needed for rebuild/failed disk bits */
 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -861,6 +860,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
 {
        unsigned long min_region_size = rs->ti->len / (1 << 21);
 
+       if (rs_is_raid0(rs))
+               return 0;
+
        if (!region_size) {
                /*
                 * Choose a reasonable default.  All figures in sectors.
@@ -930,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs)
                        rebuild_cnt++;
 
        switch (rs->raid_type->level) {
+       case 0:
+               break;
        case 1:
                if (rebuild_cnt >= rs->md.raid_disks)
                        goto too_many;
@@ -2335,6 +2339,13 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
                case 0:
                        break;
                default:
+                       /*
+                        * We have to keep any raid0 data/metadata device pairs or
+                        * the MD raid0 personality will fail to start the array.
+                        */
+                       if (rs_is_raid0(rs))
+                               continue;
+
                        dev = container_of(rdev, struct raid_dev, rdev);
                        if (dev->meta_dev)
                                dm_put_device(ti, dev->meta_dev);
@@ -2579,7 +2590,6 @@ static int rs_prepare_reshape(struct raid_set *rs)
                } else {
                        /* Process raid1 without delta_disks */
                        mddev->raid_disks = rs->raid_disks;
-                       set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
                        reshape = false;
                }
        } else {
@@ -2590,7 +2600,6 @@ static int rs_prepare_reshape(struct raid_set *rs)
        if (reshape) {
                set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
                set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
-               set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
        } else if (mddev->raid_disks < rs->raid_disks)
                /* Create new superblocks and bitmaps, if any new disks */
                set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
@@ -2902,7 +2911,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                        goto bad;
 
                set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
-               set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
                /* Takeover ain't recovery, so disable recovery */
                rs_setup_recovery(rs, MaxSector);
                rs_set_new(rs);
@@ -3386,21 +3394,28 @@ static void raid_postsuspend(struct dm_target *ti)
 {
        struct raid_set *rs = ti->private;
 
-       if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
-               if (!rs->md.suspended)
-                       mddev_suspend(&rs->md);
-               rs->md.ro = 1;
-       }
+       if (!rs->md.suspended)
+               mddev_suspend(&rs->md);
+
+       rs->md.ro = 1;
 }
 
 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
 {
        int i;
-       uint64_t failed_devices, cleared_failed_devices = 0;
+       uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
        unsigned long flags;
+       bool cleared = false;
        struct dm_raid_superblock *sb;
+       struct mddev *mddev = &rs->md;
        struct md_rdev *r;
 
+       /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
+       if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
+               return;
+
+       memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
+
        for (i = 0; i < rs->md.raid_disks; i++) {
                r = &rs->dev[i].rdev;
                if (test_bit(Faulty, &r->flags) && r->sb_page &&
@@ -3420,7 +3435,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
                         * ourselves.
                         */
                        if ((r->raid_disk >= 0) &&
-                           (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
+                           (mddev->pers->hot_remove_disk(mddev, r) != 0))
                                /* Failed to revive this device, try next */
                                continue;
 
@@ -3430,22 +3445,30 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
                        clear_bit(Faulty, &r->flags);
                        clear_bit(WriteErrorSeen, &r->flags);
                        clear_bit(In_sync, &r->flags);
-                       if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
+                       if (mddev->pers->hot_add_disk(mddev, r)) {
                                r->raid_disk = -1;
                                r->saved_raid_disk = -1;
                                r->flags = flags;
                        } else {
                                r->recovery_offset = 0;
-                               cleared_failed_devices |= 1 << i;
+                               set_bit(i, (void *) cleared_failed_devices);
+                               cleared = true;
                        }
                }
        }
-       if (cleared_failed_devices) {
+
+       /* If any failed devices could be cleared, update all sbs failed_devices bits */
+       if (cleared) {
+               uint64_t failed_devices[DISKS_ARRAY_ELEMS];
+
                rdev_for_each(r, &rs->md) {
                        sb = page_address(r->sb_page);
-                       failed_devices = le64_to_cpu(sb->failed_devices);
-                       failed_devices &= ~cleared_failed_devices;
-                       sb->failed_devices = cpu_to_le64(failed_devices);
+                       sb_retrieve_failed_devices(sb, failed_devices);
+
+                       for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
+                               failed_devices[i] &= ~cleared_failed_devices[i];
+
+                       sb_update_failed_devices(sb, failed_devices);
                }
        }
 }
@@ -3610,26 +3633,15 @@ static void raid_resume(struct dm_target *ti)
                 * devices are reachable again.
                 */
                attempt_restore_of_faulty_devices(rs);
-       } else {
-               mddev->ro = 0;
-               mddev->in_sync = 0;
+       }
 
-               /*
-                * When passing in flags to the ctr, we expect userspace
-                * to reset them because they made it to the superblocks
-                * and reload the mapping anyway.
-                *
-                * -> only unfreeze recovery in case of a table reload or
-                *    we'll have a bogus recovery/reshape position
-                *    retrieved from the superblock by the ctr because
-                *    the ongoing recovery/reshape will change it after read.
-                */
-               if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags))
-                       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+       mddev->ro = 0;
+       mddev->in_sync = 0;
 
-               if (mddev->suspended)
-                       mddev_resume(mddev);
-       }
+       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+       if (mddev->suspended)
+               mddev_resume(mddev);
 }
 
 static struct target_type raid_target = {
index 4ace1da17db8f4473fcb72d546a1067b943bc87b..6c25213ab38c8d271ff58e093adee523571443ed 100644 (file)
@@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
        struct path_info *pi = NULL;
        struct dm_path *current_path = NULL;
 
+       local_irq_save(flags);
        current_path = *this_cpu_ptr(s->current_path);
        if (current_path) {
                percpu_counter_dec(&s->repeat_count);
-               if (percpu_counter_read_positive(&s->repeat_count) > 0)
+               if (percpu_counter_read_positive(&s->repeat_count) > 0) {
+                       local_irq_restore(flags);
                        return current_path;
+               }
        }
 
-       spin_lock_irqsave(&s->lock, flags);
+       spin_lock(&s->lock);
        if (!list_empty(&s->valid_paths)) {
                pi = list_entry(s->valid_paths.next, struct path_info, list);
                list_move_tail(&pi->list, &s->valid_paths);
index 41573f1f626f4dd43dd97d5e6372e38e6987ae5d..34a840d9df7644d74d24bf44e547a9dbd735407b 100644 (file)
@@ -834,8 +834,10 @@ static int join(struct mddev *mddev, int nodes)
                goto err;
        }
        cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
-       if (!cinfo->ack_lockres)
+       if (!cinfo->ack_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
        /* get sync CR lock on ACK. */
        if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
                pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
@@ -849,8 +851,10 @@ static int join(struct mddev *mddev, int nodes)
        pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
        snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
        cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
-       if (!cinfo->bitmap_lockres)
+       if (!cinfo->bitmap_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
        if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
                pr_err("Failed to get bitmap lock\n");
                ret = -EINVAL;
@@ -858,8 +862,10 @@ static int join(struct mddev *mddev, int nodes)
        }
 
        cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
-       if (!cinfo->resync_lockres)
+       if (!cinfo->resync_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
 
        return 0;
 err:
index d646f6e444f0d1921cf6d1df8e276f6acec76736..915e84d631a22111c2256ab2f4676875a2cc61b1 100644 (file)
@@ -1604,11 +1604,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
                        mddev->new_chunk_sectors = mddev->chunk_sectors;
                }
 
-               if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) {
+               if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
                        set_bit(MD_HAS_JOURNAL, &mddev->flags);
-                       if (mddev->recovery_cp == MaxSector)
-                               set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
-               }
        } else if (mddev->pers == NULL) {
                /* Insist of good event counter while assembling, except for
                 * spares (which don't need an event count) */
@@ -5851,6 +5848,9 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
                        working++;
                        if (test_bit(In_sync, &rdev->flags))
                                insync++;
+                       else if (test_bit(Journal, &rdev->flags))
+                               /* TODO: add journal count to md_u.h */
+                               ;
                        else
                                spare++;
                }
@@ -7610,16 +7610,12 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
 
 int md_setup_cluster(struct mddev *mddev, int nodes)
 {
-       int err;
-
-       err = request_module("md-cluster");
-       if (err) {
-               pr_err("md-cluster module not found.\n");
-               return -ENOENT;
-       }
-
+       if (!md_cluster_ops)
+               request_module("md-cluster");
        spin_lock(&pers_lock);
+       /* ensure module won't be unloaded */
        if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+               pr_err("can't find md-cluster module or get it's reference.\n");
                spin_unlock(&pers_lock);
                return -ENOENT;
        }
@@ -7862,6 +7858,7 @@ void md_do_sync(struct md_thread *thread)
         */
 
        do {
+               int mddev2_minor = -1;
                mddev->curr_resync = 2;
 
        try_again:
@@ -7891,10 +7888,14 @@ void md_do_sync(struct md_thread *thread)
                                prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
                                if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
                                    mddev2->curr_resync >= mddev->curr_resync) {
-                                       printk(KERN_INFO "md: delaying %s of %s"
-                                              " until %s has finished (they"
-                                              " share one or more physical units)\n",
-                                              desc, mdname(mddev), mdname(mddev2));
+                                       if (mddev2_minor != mddev2->md_minor) {
+                                               mddev2_minor = mddev2->md_minor;
+                                               printk(KERN_INFO "md: delaying %s of %s"
+                                                      " until %s has finished (they"
+                                                      " share one or more physical units)\n",
+                                                      desc, mdname(mddev),
+                                                      mdname(mddev2));
+                                       }
                                        mddev_put(mddev2);
                                        if (signal_pending(current))
                                                flush_signals(current);
@@ -8275,16 +8276,13 @@ no_add:
 static void md_start_sync(struct work_struct *ws)
 {
        struct mddev *mddev = container_of(ws, struct mddev, del_work);
-       int ret = 0;
 
        mddev->sync_thread = md_register_thread(md_do_sync,
                                                mddev,
                                                "resync");
        if (!mddev->sync_thread) {
-               if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
-                       printk(KERN_ERR "%s: could not start resync"
-                              " thread...\n",
-                              mdname(mddev));
+               printk(KERN_ERR "%s: could not start resync thread...\n",
+                      mdname(mddev));
                /* leave the spares where they are, it shouldn't hurt */
                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
index 0e4efcd1079550faba99495bdd6db51badbfea0f..be1a9fca3b2d2ade369359d109d1a53ddf30d077 100644 (file)
@@ -1064,6 +1064,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        int max_sectors;
        int sectors;
 
+       md_write_start(mddev, bio);
+
        /*
         * Register the new request and wait if the reconstruction
         * thread has put up a bar for new requests.
@@ -1445,8 +1447,6 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
                return;
        }
 
-       md_write_start(mddev, bio);
-
        do {
 
                /*
@@ -2465,20 +2465,21 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
 
        while (sect_to_write) {
                struct bio *wbio;
+               sector_t wsector;
                if (sectors > sect_to_write)
                        sectors = sect_to_write;
                /* Write at 'sector' for 'sectors' */
                wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
                bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
-               wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
-                                  choose_data_offset(r10_bio, rdev) +
-                                  (sector - r10_bio->sector));
+               wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
+               wbio->bi_iter.bi_sector = wsector +
+                                  choose_data_offset(r10_bio, rdev);
                wbio->bi_bdev = rdev->bdev;
                bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
 
                if (submit_bio_wait(wbio) < 0)
                        /* Failure! */
-                       ok = rdev_set_badblocks(rdev, sector,
+                       ok = rdev_set_badblocks(rdev, wsector,
                                                sectors, 0)
                                && ok;
 
index 51f76ddbe265445d1a6c5083cba2fe8715bf5881..1b1ab4a1d132b39f0145b8bc3305484fad5a7091 100644 (file)
@@ -96,7 +96,6 @@ struct r5l_log {
        spinlock_t no_space_stripes_lock;
 
        bool need_cache_flush;
-       bool in_teardown;
 };
 
 /*
@@ -704,31 +703,22 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
 
        mddev = log->rdev->mddev;
        /*
-        * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
-        * wait for this thread to finish. This thread waits for
-        * MD_CHANGE_PENDING clear, which is supposed to be done in
-        * md_check_recovery(). md_check_recovery() tries to get
-        * reconfig_mutex. Since r5l_quiesce already holds the mutex,
-        * md_check_recovery() fails, so the PENDING never get cleared. The
-        * in_teardown check workaround this issue.
+        * Discard could zero data, so before discard we must make sure
+        * superblock is updated to new log tail. Updating superblock (either
+        * directly call md_update_sb() or depend on md thread) must hold
+        * reconfig mutex. On the other hand, raid5_quiesce is called with
+        * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
+        * for all IO finish, hence waitting for reclaim thread, while reclaim
+        * thread is calling this function and waitting for reconfig mutex. So
+        * there is a deadlock. We workaround this issue with a trylock.
+        * FIXME: we could miss discard if we can't take reconfig mutex
         */
-       if (!log->in_teardown) {
-               set_mask_bits(&mddev->flags, 0,
-                             BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
-               md_wakeup_thread(mddev->thread);
-               wait_event(mddev->sb_wait,
-                       !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
-                       log->in_teardown);
-               /*
-                * r5l_quiesce could run after in_teardown check and hold
-                * mutex first. Superblock might get updated twice.
-                */
-               if (log->in_teardown)
-                       md_update_sb(mddev, 1);
-       } else {
-               WARN_ON(!mddev_is_locked(mddev));
-               md_update_sb(mddev, 1);
-       }
+       set_mask_bits(&mddev->flags, 0,
+               BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+       if (!mddev_trylock(mddev))
+               return;
+       md_update_sb(mddev, 1);
+       mddev_unlock(mddev);
 
        /* discard IO error really doesn't matter, ignore it */
        if (log->last_checkpoint < end) {
@@ -827,7 +817,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
        if (!log || state == 2)
                return;
        if (state == 0) {
-               log->in_teardown = 0;
                /*
                 * This is a special case for hotadd. In suspend, the array has
                 * no journal. In resume, journal is initialized as well as the
@@ -838,11 +827,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
                log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
                                        log->rdev->mddev, "reclaim");
        } else if (state == 1) {
-               /*
-                * at this point all stripes are finished, so io_unit is at
-                * least in STRIPE_END state
-                */
-               log->in_teardown = 1;
                /* make sure r5l_write_super_and_discard_space exits */
                mddev = log->rdev->mddev;
                wake_up(&mddev->sb_wait);
index 8912407a4dd0edb251a36e3befe3ff8182598bdf..ee7fc37017007a2d6cf4b440f95063535d928522 100644 (file)
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 {
        struct stripe_head *sh;
        int hash = stripe_hash_locks_hash(sector);
+       int inc_empty_inactive_list_flag;
 
        pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
                                        atomic_inc(&conf->active_stripes);
                                BUG_ON(list_empty(&sh->lru) &&
                                       !test_bit(STRIPE_EXPANDING, &sh->state));
+                               inc_empty_inactive_list_flag = 0;
+                               if (!list_empty(conf->inactive_list + hash))
+                                       inc_empty_inactive_list_flag = 1;
                                list_del_init(&sh->lru);
+                               if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+                                       atomic_inc(&conf->empty_inactive_list_nr);
                                if (sh->group) {
                                        sh->group->stripes_cnt--;
                                        sh->group = NULL;
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
        sector_t head_sector, tmp_sec;
        int hash;
        int dd_idx;
+       int inc_empty_inactive_list_flag;
 
        /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
        tmp_sec = sh->sector;
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
                                atomic_inc(&conf->active_stripes);
                        BUG_ON(list_empty(&head->lru) &&
                               !test_bit(STRIPE_EXPANDING, &head->state));
+                       inc_empty_inactive_list_flag = 0;
+                       if (!list_empty(conf->inactive_list + hash))
+                               inc_empty_inactive_list_flag = 1;
                        list_del_init(&head->lru);
+                       if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+                               atomic_inc(&conf->empty_inactive_list_nr);
                        if (head->group) {
                                head->group->stripes_cnt--;
                                head->group = NULL;
@@ -993,7 +1005,6 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bio_reset(bi);
                        bi->bi_bdev = rdev->bdev;
                        bio_set_op_attrs(bi, op, op_flags);
                        bi->bi_end_io = op_is_write(op)
@@ -1045,7 +1056,6 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bio_reset(rbi);
                        rbi->bi_bdev = rrdev->bdev;
                        bio_set_op_attrs(rbi, op, op_flags);
                        BUG_ON(!op_is_write(op));
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
-static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
+static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
+       int disks)
 {
        struct stripe_head *sh;
+       int i;
 
        sh = kmem_cache_zalloc(sc, gfp);
        if (sh) {
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
                INIT_LIST_HEAD(&sh->batch_list);
                INIT_LIST_HEAD(&sh->lru);
                atomic_set(&sh->count, 1);
+               for (i = 0; i < disks; i++) {
+                       struct r5dev *dev = &sh->dev[i];
+
+                       bio_init(&dev->req);
+                       dev->req.bi_io_vec = &dev->vec;
+                       dev->req.bi_max_vecs = 1;
+
+                       bio_init(&dev->rreq);
+                       dev->rreq.bi_io_vec = &dev->rvec;
+                       dev->rreq.bi_max_vecs = 1;
+               }
        }
        return sh;
 }
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
 {
        struct stripe_head *sh;
 
-       sh = alloc_stripe(conf->slab_cache, gfp);
+       sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size);
        if (!sh)
                return 0;
 
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        mutex_lock(&conf->cache_size_mutex);
 
        for (i = conf->max_nr_stripes; i; i--) {
-               nsh = alloc_stripe(sc, GFP_KERNEL);
+               nsh = alloc_stripe(sc, GFP_KERNEL, newsize);
                if (!nsh)
                        break;
 
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi)
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                bi->bi_error);
        if (i == disks) {
+               bio_reset(bi);
                BUG();
                return;
        }
@@ -2399,6 +2423,7 @@ static void raid5_end_read_request(struct bio * bi)
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
+       bio_reset(bi);
        clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);
        raid5_release_stripe(sh);
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi)
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                bi->bi_error);
        if (i == disks) {
+               bio_reset(bi);
                BUG();
                return;
        }
@@ -2472,6 +2498,7 @@ static void raid5_end_write_request(struct bio *bi)
        if (sh->batch_head && bi->bi_error && !replacement)
                set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
+       bio_reset(bi);
        if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
                clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);
@@ -2485,16 +2512,6 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
 {
        struct r5dev *dev = &sh->dev[i];
 
-       bio_init(&dev->req);
-       dev->req.bi_io_vec = &dev->vec;
-       dev->req.bi_max_vecs = 1;
-       dev->req.bi_private = sh;
-
-       bio_init(&dev->rreq);
-       dev->rreq.bi_io_vec = &dev->rvec;
-       dev->rreq.bi_max_vecs = 1;
-       dev->rreq.bi_private = sh;
-
        dev->flags = 0;
        dev->sector = raid5_compute_blocknr(sh, i, previous);
 }
@@ -4628,7 +4645,9 @@ finish:
        }
 
        if (!bio_list_empty(&s.return_bi)) {
-               if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+               if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
+                               (s.failed <= conf->max_degraded ||
+                                       conf->mddev->external == 0)) {
                        spin_lock_irq(&conf->device_lock);
                        bio_list_merge(&conf->return_bi, &s.return_bi);
                        spin_unlock_irq(&conf->device_lock);
@@ -6620,6 +6639,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        }
 
        conf->min_nr_stripes = NR_STRIPES;
+       if (mddev->reshape_position != MaxSector) {
+               int stripes = max_t(int,
+                       ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4,
+                       ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
+               conf->min_nr_stripes = max(NR_STRIPES, stripes);
+               if (conf->min_nr_stripes != NR_STRIPES)
+                       printk(KERN_INFO
+                               "md/raid:%s: force stripe size %d for reshape\n",
+                               mdname(mddev), conf->min_nr_stripes);
+       }
        memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
                 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
        atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
@@ -6826,11 +6855,14 @@ static int raid5_run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
-       if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) {
-               printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n",
-                      mdname(mddev));
-               mddev->ro = 1;
-               set_disk_ro(mddev->gendisk, 1);
+       if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+               if (!journal_dev) {
+                       pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
+                              mdname(mddev));
+                       mddev->ro = 1;
+                       set_disk_ro(mddev->gendisk, 1);
+               } else if (mddev->recovery_cp == MaxSector)
+                       set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
        }
 
        conf->min_offset_diff = min_offset_diff;
index 70018247bdda9ec9bc90a22e48713c27d47ff24b..5719b991e340d3df77cdbbedee48e36c67463273 100644 (file)
@@ -70,7 +70,10 @@ static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
                                u8 tag = edid[i] >> 5;
                                u8 len = edid[i] & 0x1f;
 
-                               if (tag == 3 && len >= 5 && i + len <= end)
+                               if (tag == 3 && len >= 5 && i + len <= end &&
+                                   edid[i + 1] == 0x03 &&
+                                   edid[i + 2] == 0x0c &&
+                                   edid[i + 3] == 0x00)
                                        return i + 4;
                                i += len + 1;
                        } while (i < end);
index efec2d1a7afddf877432a8d15316dcec40060fb6..4d080da7afaf2ebf46df5c66ec0a8dfc6d468722 100644 (file)
@@ -1552,6 +1552,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
        q->mem_ops = &vb2_dma_sg_memops;
        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
        q->lock = &dev->lock;
+       q->dev = &dev->pci->dev;
 
        err = vb2_queue_init(q);
        if (err < 0)
index db987e5b93eb19c1529569f48c173cfc0533e560..59a4b5f7724e502002dfccc278df272fbeecf13b 100644 (file)
@@ -1238,6 +1238,7 @@ static int dvb_init(struct saa7134_dev *dev)
        q->buf_struct_size = sizeof(struct saa7134_buf);
        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
        q->lock = &dev->lock;
+       q->dev = &dev->pci->dev;
        ret = vb2_queue_init(q);
        if (ret) {
                vb2_dvb_dealloc_frontends(&dev->frontends);
index ca417a454d6787aecc77a0a338137dce78d7acb0..791a5161809be07c4d8c90719ba975db8c9786cf 100644 (file)
@@ -295,6 +295,7 @@ static int empress_init(struct saa7134_dev *dev)
        q->buf_struct_size = sizeof(struct saa7134_buf);
        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
        q->lock = &dev->lock;
+       q->dev = &dev->pci->dev;
        err = vb2_queue_init(q);
        if (err)
                return err;
index f25344bc79126e2fb8df0005646c6b68bb3f080a..552b635cfce7f02b4f3e65d1d641e9e39903ec43 100644 (file)
@@ -169,7 +169,7 @@ config VIDEO_MEDIATEK_VPU
 config VIDEO_MEDIATEK_VCODEC
        tristate "Mediatek Video Codec driver"
        depends on MTK_IOMMU || COMPILE_TEST
-       depends on VIDEO_DEV && VIDEO_V4L2
+       depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
        depends on ARCH_MEDIATEK || COMPILE_TEST
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
index 94f0a425be42e93fe72979f242d8e68c6a7f5c62..3a8e6958adae1c1dd21397757d52328e2e5aef77 100644 (file)
@@ -23,7 +23,6 @@
 #include <media/v4l2-ioctl.h>
 #include <media/videobuf2-core.h>
 
-#include "mtk_vcodec_util.h"
 
 #define MTK_VCODEC_DRV_NAME    "mtk_vcodec_drv"
 #define MTK_VCODEC_ENC_NAME    "mtk-vcodec-enc"
index 3ed3f2d31df507018b7cbdafb2ab924309f0f5c6..2c5719ac23b28445b2c605f1a3e5b19debd645b2 100644 (file)
@@ -487,7 +487,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
        struct mtk_q_data *q_data;
        int ret, i;
        struct mtk_video_fmt *fmt;
-       unsigned int pitch_w_div16;
        struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
 
        vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
@@ -530,15 +529,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
        q_data->coded_width = f->fmt.pix_mp.width;
        q_data->coded_height = f->fmt.pix_mp.height;
 
-       pitch_w_div16 = DIV_ROUND_UP(q_data->visible_width, 16);
-       if (pitch_w_div16 % 8 != 0) {
-               /* Adjust returned width/height, so application could correctly
-                * allocate hw required memory
-                */
-               q_data->visible_height += 32;
-               vidioc_try_fmt(f, q_data->fmt);
-       }
-
        q_data->field = f->fmt.pix_mp.field;
        ctx->colorspace = f->fmt.pix_mp.colorspace;
        ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
@@ -878,7 +868,8 @@ static int mtk_venc_encode_header(void *priv)
 {
        struct mtk_vcodec_ctx *ctx = priv;
        int ret;
-       struct vb2_buffer *dst_buf;
+       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
        struct mtk_vcodec_mem bs_buf;
        struct venc_done_result enc_result;
 
@@ -911,6 +902,15 @@ static int mtk_venc_encode_header(void *priv)
                mtk_v4l2_err("venc_if_encode failed=%d", ret);
                return -EINVAL;
        }
+       src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+       if (src_buf) {
+               src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+               dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+               dst_buf->timestamp = src_buf->timestamp;
+               dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+       } else {
+               mtk_v4l2_err("No timestamp for the header buffer.");
+       }
 
        ctx->state = MTK_STATE_HEADER;
        dst_buf->planes[0].bytesused = enc_result.bs_size;
@@ -1003,7 +1003,7 @@ static void mtk_venc_worker(struct work_struct *work)
        struct mtk_vcodec_mem bs_buf;
        struct venc_done_result enc_result;
        int ret, i;
-       struct vb2_v4l2_buffer *vb2_v4l2;
+       struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
 
        /* check dst_buf, dst_buf may be removed in device_run
         * to stored encdoe header so we need check dst_buf and
@@ -1043,9 +1043,14 @@ static void mtk_venc_worker(struct work_struct *work)
        ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
                             &frm_buf, &bs_buf, &enc_result);
 
-       vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+       src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+       dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+
+       dst_buf->timestamp = src_buf->timestamp;
+       dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+
        if (enc_result.is_key_frm)
-               vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
+               dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
 
        if (ret) {
                v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
@@ -1217,7 +1222,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
                        0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
        v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
                        V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
-                       0, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
+                       0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
        v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
                        V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
                        0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
@@ -1288,5 +1293,10 @@ int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
 
 void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
 {
-       venc_if_deinit(ctx);
+       int ret = venc_if_deinit(ctx);
+
+       if (ret)
+               mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+
+       ctx->state = MTK_STATE_FREE;
 }
index c7806ecda2ddc8a122b96bf0ed72e8d1554ae635..5cd2151431bf55884d269c7874cf0c6936851993 100644 (file)
@@ -218,11 +218,15 @@ static int fops_vcodec_release(struct file *file)
        mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
        mutex_lock(&dev->dev_mutex);
 
+       /*
+        * Call v4l2_m2m_ctx_release to make sure the worker thread is not
+        * running after venc_if_deinit.
+        */
+       v4l2_m2m_ctx_release(ctx->m2m_ctx);
        mtk_vcodec_enc_release(ctx);
        v4l2_fh_del(&ctx->fh);
        v4l2_fh_exit(&ctx->fh);
        v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
-       v4l2_m2m_ctx_release(ctx->m2m_ctx);
 
        list_del_init(&ctx->list);
        dev->num_instances--;
index 33e890f5aa9c2158847d1ef9e076fefc0d5ebe18..12131855b46a7382be29659313c6d119caeb29d9 100644 (file)
@@ -16,7 +16,6 @@
 #define _MTK_VCODEC_INTR_H_
 
 #define MTK_INST_IRQ_RECEIVED          0x1
-#define MTK_INST_WORK_THREAD_ABORT_DONE        0x2
 
 struct mtk_vcodec_ctx;
 
index 9a600525b3c172e100250315c772dd3df620d4ff..63d4be4ff3276e9b52d5e17c7ceac41ce0874327 100644 (file)
@@ -61,6 +61,8 @@ enum venc_h264_bs_mode {
 
 /*
  * struct venc_h264_vpu_config - Structure for h264 encoder configuration
+ *                               AP-W/R : AP is writer/reader on this item
+ *                               VPU-W/R: VPU is write/reader on this item
  * @input_fourcc: input fourcc
  * @bitrate: target bitrate (in bps)
  * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -94,13 +96,13 @@ struct venc_h264_vpu_config {
 
 /*
  * struct venc_h264_vpu_buf - Structure for buffer information
- * @align: buffer alignment (in bytes)
+ *                            AP-W/R : AP is writer/reader on this item
+ *                            VPU-W/R: VPU is write/reader on this item
  * @iova: IO virtual address
  * @vpua: VPU side memory addr which is used by RC_CODE
  * @size: buffer size (in bytes)
  */
 struct venc_h264_vpu_buf {
-       u32 align;
        u32 iova;
        u32 vpua;
        u32 size;
@@ -108,6 +110,8 @@ struct venc_h264_vpu_buf {
 
 /*
  * struct venc_h264_vsi - Structure for VPU driver control and info share
+ *                        AP-W/R : AP is writer/reader on this item
+ *                        VPU-W/R: VPU is write/reader on this item
  * This structure is allocated in VPU side and shared to AP side.
  * @config: h264 encoder configuration
  * @work_bufs: working buffer information in VPU side
@@ -150,12 +154,6 @@ struct venc_h264_inst {
        struct mtk_vcodec_ctx *ctx;
 };
 
-static inline void h264_write_reg(struct venc_h264_inst *inst, u32 addr,
-                                 u32 val)
-{
-       writel(val, inst->hw_base + addr);
-}
-
 static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
 {
        return readl(inst->hw_base + addr);
@@ -214,6 +212,8 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
                return 40;
        case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
                return 41;
+       case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+               return 42;
        default:
                mtk_vcodec_debug(inst, "unsupported level %d", level);
                return 31;
index 60bbcd2a051058b7a6d5698240efcd6c0881e778..6d9758479f9ab8dd12cd3b6e79bcc4fd535e5ee1 100644 (file)
@@ -56,6 +56,8 @@ enum venc_vp8_vpu_work_buf {
 
 /*
  * struct venc_vp8_vpu_config - Structure for vp8 encoder configuration
+ *                              AP-W/R : AP is writer/reader on this item
+ *                              VPU-W/R: VPU is write/reader on this item
  * @input_fourcc: input fourcc
  * @bitrate: target bitrate (in bps)
  * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -83,14 +85,14 @@ struct venc_vp8_vpu_config {
 };
 
 /*
- * struct venc_vp8_vpu_buf -Structure for buffer information
- * @align: buffer alignment (in bytes)
+ * struct venc_vp8_vpu_buf - Structure for buffer information
+ *                           AP-W/R : AP is writer/reader on this item
+ *                           VPU-W/R: VPU is write/reader on this item
  * @iova: IO virtual address
  * @vpua: VPU side memory addr which is used by RC_CODE
  * @size: buffer size (in bytes)
  */
 struct venc_vp8_vpu_buf {
-       u32 align;
        u32 iova;
        u32 vpua;
        u32 size;
@@ -98,6 +100,8 @@ struct venc_vp8_vpu_buf {
 
 /*
  * struct venc_vp8_vsi - Structure for VPU driver control and info share
+ *                       AP-W/R : AP is writer/reader on this item
+ *                       VPU-W/R: VPU is write/reader on this item
  * This structure is allocated in VPU side and shared to AP side.
  * @config: vp8 encoder configuration
  * @work_bufs: working buffer information in VPU side
@@ -138,12 +142,6 @@ struct venc_vp8_inst {
        struct mtk_vcodec_ctx *ctx;
 };
 
-static inline void vp8_enc_write_reg(struct venc_vp8_inst *inst, u32 addr,
-                                    u32 val)
-{
-       writel(val, inst->hw_base + addr);
-}
-
 static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
 {
        return readl(inst->hw_base + addr);
index 6a7bcc3028b180b6fd5cd77c97ef3da159e6599a..bc50c69ee0c5800d3c6ecce62a5cdc6546076d55 100644 (file)
@@ -99,10 +99,16 @@ EXPORT_SYMBOL_GPL(rcar_fcp_put);
  */
 int rcar_fcp_enable(struct rcar_fcp_device *fcp)
 {
+       int error;
+
        if (!fcp)
                return 0;
 
-       return pm_runtime_get_sync(fcp->dev);
+       error = pm_runtime_get_sync(fcp->dev);
+       if (error < 0)
+               return error;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(rcar_fcp_enable);
 
index 869c83fb3c5d90a751d2e1b1d7c03c6e35fa2517..f00f3e7422657723ad8f69bd90f76554f8938bb6 100644 (file)
@@ -2185,7 +2185,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
        return 0;
 }
 
-static int gpmc_probe_dt_children(struct platform_device *pdev)
+static void gpmc_probe_dt_children(struct platform_device *pdev)
 {
        int ret;
        struct device_node *child;
@@ -2200,11 +2200,11 @@ static int gpmc_probe_dt_children(struct platform_device *pdev)
                else
                        ret = gpmc_probe_generic_child(pdev, child);
 
-               if (ret)
-                       return ret;
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
+                               child->name, ret);
+               }
        }
-
-       return 0;
 }
 #else
 static int gpmc_probe_dt(struct platform_device *pdev)
@@ -2212,9 +2212,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
        return 0;
 }
 
-static int gpmc_probe_dt_children(struct platform_device *pdev)
+static void gpmc_probe_dt_children(struct platform_device *pdev)
 {
-       return 0;
 }
 #endif /* CONFIG_OF */
 
@@ -2369,16 +2368,10 @@ static int gpmc_probe(struct platform_device *pdev)
                goto setup_irq_failed;
        }
 
-       rc = gpmc_probe_dt_children(pdev);
-       if (rc < 0) {
-               dev_err(gpmc->dev, "failed to probe DT children\n");
-               goto dt_children_failed;
-       }
+       gpmc_probe_dt_children(pdev);
 
        return 0;
 
-dt_children_failed:
-       gpmc_free_irq(gpmc);
 setup_irq_failed:
        gpmc_gpio_exit(gpmc);
 gpio_init_failed:
index a216b46677429402168c587c638cc1d710632960..d002528289667732e9032f68aa0dba6624abfd9c 100644 (file)
@@ -345,16 +345,6 @@ config SENSORS_TSL2550
          This driver can also be built as a module.  If so, the module
          will be called tsl2550.
 
-config SENSORS_BH1780
-       tristate "ROHM BH1780GLI ambient light sensor"
-       depends on I2C && SYSFS
-       help
-         If you say yes here you get support for the ROHM BH1780GLI
-         ambient light sensor.
-
-         This driver can also be built as a module.  If so, the module
-         will be called bh1780gli.
-
 config SENSORS_BH1770
          tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
          depends on I2C
index 4387ccb79e642c34f8d3b6c2a39ba4daae15e325..fb32516ddfe2e4f8112beee9e831bcc4eb729667 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_TIFM_CORE)               += tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)        += tifm_7xx1.o
 obj-$(CONFIG_PHANTOM)          += phantom.o
 obj-$(CONFIG_QCOM_COINCELL)    += qcom-coincell.o
-obj-$(CONFIG_SENSORS_BH1780)   += bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1770)   += bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
 obj-$(CONFIG_SGI_IOC4)         += ioc4.o
@@ -69,5 +68,6 @@ OBJCOPYFLAGS :=
 OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
                        --set-section-flags .text=alloc,readonly \
                        --rename-section .text=.rodata
-$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o
+targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
+$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
        $(call if_changed,objcopy)
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
deleted file mode 100644 (file)
index 7f90ce5..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * bh1780gli.c
- * ROHM Ambient Light Sensor Driver
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Hemanth V <hemanthv@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#define BH1780_REG_CONTROL     0x80
-#define BH1780_REG_PARTID      0x8A
-#define BH1780_REG_MANFID      0x8B
-#define BH1780_REG_DLOW        0x8C
-#define BH1780_REG_DHIGH       0x8D
-
-#define BH1780_REVMASK         (0xf)
-#define BH1780_POWMASK         (0x3)
-#define BH1780_POFF            (0x0)
-#define BH1780_PON             (0x3)
-
-/* power on settling time in ms */
-#define BH1780_PON_DELAY       2
-
-struct bh1780_data {
-       struct i2c_client *client;
-       int power_state;
-       /* lock for sysfs operations */
-       struct mutex lock;
-};
-
-static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
-{
-       int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
-       if (ret < 0)
-               dev_err(&ddata->client->dev,
-                       "i2c_smbus_write_byte_data failed error %d Register (%s)\n",
-                       ret, msg);
-       return ret;
-}
-
-static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg)
-{
-       int ret = i2c_smbus_read_byte_data(ddata->client, reg);
-       if (ret < 0)
-               dev_err(&ddata->client->dev,
-                       "i2c_smbus_read_byte_data failed error %d Register (%s)\n",
-                       ret, msg);
-       return ret;
-}
-
-static ssize_t bh1780_show_lux(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       int lsb, msb;
-
-       lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
-       if (lsb < 0)
-               return lsb;
-
-       msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH");
-       if (msb < 0)
-               return msb;
-
-       return sprintf(buf, "%d\n", (msb << 8) | lsb);
-}
-
-static ssize_t bh1780_show_power_state(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       int state;
-
-       state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
-       if (state < 0)
-               return state;
-
-       return sprintf(buf, "%d\n", state & BH1780_POWMASK);
-}
-
-static ssize_t bh1780_store_power_state(struct device *dev,
-                                       struct device_attribute *attr,
-                                       const char *buf, size_t count)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       unsigned long val;
-       int error;
-
-       error = kstrtoul(buf, 0, &val);
-       if (error)
-               return error;
-
-       if (val < BH1780_POFF || val > BH1780_PON)
-               return -EINVAL;
-
-       mutex_lock(&ddata->lock);
-
-       error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
-       if (error < 0) {
-               mutex_unlock(&ddata->lock);
-               return error;
-       }
-
-       msleep(BH1780_PON_DELAY);
-       ddata->power_state = val;
-       mutex_unlock(&ddata->lock);
-
-       return count;
-}
-
-static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
-
-static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
-               bh1780_show_power_state, bh1780_store_power_state);
-
-static struct attribute *bh1780_attributes[] = {
-       &dev_attr_power_state.attr,
-       &dev_attr_lux.attr,
-       NULL
-};
-
-static const struct attribute_group bh1780_attr_group = {
-       .attrs = bh1780_attributes,
-};
-
-static int bh1780_probe(struct i2c_client *client,
-                                               const struct i2c_device_id *id)
-{
-       int ret;
-       struct bh1780_data *ddata;
-       struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
-
-       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
-               return -EIO;
-
-       ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data),
-                            GFP_KERNEL);
-       if (ddata == NULL)
-               return -ENOMEM;
-
-       ddata->client = client;
-       i2c_set_clientdata(client, ddata);
-
-       ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
-       if (ret < 0)
-               return ret;
-
-       dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
-                       (ret & BH1780_REVMASK));
-
-       mutex_init(&ddata->lock);
-
-       return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
-}
-
-static int bh1780_remove(struct i2c_client *client)
-{
-       sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int bh1780_suspend(struct device *dev)
-{
-       struct bh1780_data *ddata;
-       int state, ret;
-       struct i2c_client *client = to_i2c_client(dev);
-
-       ddata = i2c_get_clientdata(client);
-       state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
-       if (state < 0)
-               return state;
-
-       ddata->power_state = state & BH1780_POWMASK;
-
-       ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
-                               "CONTROL");
-
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
-static int bh1780_resume(struct device *dev)
-{
-       struct bh1780_data *ddata;
-       int state, ret;
-       struct i2c_client *client = to_i2c_client(dev);
-
-       ddata = i2c_get_clientdata(client);
-       state = ddata->power_state;
-       ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
-                               "CONTROL");
-
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
-
-static const struct i2c_device_id bh1780_id[] = {
-       { "bh1780", 0 },
-       { },
-};
-
-MODULE_DEVICE_TABLE(i2c, bh1780_id);
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_bh1780_match[] = {
-       { .compatible = "rohm,bh1780gli", },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, of_bh1780_match);
-#endif
-
-static struct i2c_driver bh1780_driver = {
-       .probe          = bh1780_probe,
-       .remove         = bh1780_remove,
-       .id_table       = bh1780_id,
-       .driver = {
-               .name = "bh1780",
-               .pm     = &bh1780_pm,
-               .of_match_table = of_match_ptr(of_bh1780_match),
-       },
-};
-
-module_i2c_driver(bh1780_driver);
-
-MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
index bdee9a01ef35ad6fa34f1238ea7268454e0e56c0..c466ee2b0c973a7c77cb16566770dec3b426db33 100644 (file)
@@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
         */
        mutex_lock(&afu->contexts_lock);
        idr_preload(GFP_KERNEL);
-       i = idr_alloc(&ctx->afu->contexts_idr, ctx,
-                     ctx->afu->adapter->native->sl_ops->min_pe,
+       i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
                      ctx->afu->num_procs, GFP_NOWAIT);
        idr_preload_end();
        mutex_unlock(&afu->contexts_lock);
index de090533f18cb8eb5b2c8f0a0b3bfbf6f11e5e00..344a0ff8f8c7df97e5328f89e46cd3f55b791d68 100644 (file)
@@ -561,7 +561,6 @@ struct cxl_service_layer_ops {
        u64 (*timebase_read)(struct cxl *adapter);
        int capi_mode;
        bool needs_reset_before_disable;
-       int min_pe;
 };
 
 struct cxl_native {
@@ -603,6 +602,7 @@ struct cxl {
        struct bin_attribute cxl_attr;
        int adapter_num;
        int user_irqs;
+       int min_pe;
        u64 ps_size;
        u16 psl_rev;
        u16 base_image;
index 3bcdaee11ba159aa13580bdd48151e0ef0755dbd..e606fdc4bc9cc3ec0ff7d3f29d4e691c30a26bd4 100644 (file)
@@ -924,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
        return fail_psl_irq(afu, &irq_info);
 }
 
-void native_irq_wait(struct cxl_context *ctx)
+static void native_irq_wait(struct cxl_context *ctx)
 {
        u64 dsisr;
        int timeout = 1000;
index d152e2de8c9375e2760b03cd93857d542f9a36d4..6f0c4ac4b6498991913647b0d70b2d4805870f0b 100644 (file)
@@ -379,7 +379,7 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id
 
 static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
 {
-       u64 psl_dsnctl;
+       u64 psl_dsnctl, psl_fircntl;
        u64 chipid;
        u64 capp_unit_id;
        int rc;
@@ -398,8 +398,11 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_
        cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
        /* snoop write mask */
        cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
-       /* set fir_accum */
-       cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
+       /* set fir_cntl to recommended value for production env */
+       psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
+       psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
+       psl_fircntl |= 0x1ULL; /* ce_thresh */
+       cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
        /* for debugging with trace arrays */
        cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
 
@@ -1521,14 +1524,15 @@ static const struct cxl_service_layer_ops xsl_ops = {
        .write_timebase_ctrl = write_timebase_ctrl_xsl,
        .timebase_read = timebase_read_xsl,
        .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
-       .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */
 };
 
 static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
 {
        if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
+               /* Mellanox CX-4 */
                dev_info(&adapter->dev, "Device uses an XSL\n");
                adapter->native->sl_ops = &xsl_ops;
+               adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
        } else {
                dev_info(&adapter->dev, "Device uses a PSL\n");
                adapter->native->sl_ops = &psl_ops;
index dee8def1c1936c36fc82d894e269c1d8825d5c29..3519acebfdab6be8387151d969f55859952c2f09 100644 (file)
@@ -221,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
        /* Setup the PHB using arch provided callback */
        phb->ops = &cxl_pcie_pci_ops;
        phb->cfg_addr = NULL;
-       phb->cfg_data = 0;
+       phb->cfg_data = NULL;
        phb->private_data = afu;
        phb->controller_ops = cxl_pci_controller_ops;
 
@@ -230,6 +230,11 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
        if (phb->bus == NULL)
                return -ENXIO;
 
+       /* Set release hook on root bus */
+       pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
+                                   pcibios_free_controller_deferred,
+                                   (void *) phb);
+
        /* Claim resources. This might need some rework as well depending
         * whether we are doing probe-only or not, like assigning unassigned
         * resources etc...
@@ -256,7 +261,10 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
        afu->phb = NULL;
 
        pci_remove_root_bus(phb->bus);
-       pcibios_free_controller(phb);
+       /*
+        * We don't free phb here - that's handled by
+        * pcibios_free_controller_deferred()
+        */
 }
 
 static bool _cxl_pci_is_vphb_device(struct pci_controller *phb)
index 166b1db3969fbc1f74fed6882bae0995a5768922..3564477b8c2dad4cde23eda8fc8bb3b938d6bc86 100644 (file)
@@ -4,7 +4,7 @@
  */
 #include "lkdtm.h"
 
-void lkdtm_rodata_do_nothing(void)
+void notrace lkdtm_rodata_do_nothing(void)
 {
        /* Does nothing. We just want an architecture agnostic "return". */
 }
index 5a3fd76eec27b226ec4a9a9dd9655d0a2ae95759..1dd611423d8be4a18de022c48f7f30bca84d8970 100644 (file)
@@ -9,7 +9,15 @@
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
 
-static size_t cache_size = 1024;
+/*
+ * Many of the tests here end up using const sizes, but those would
+ * normally be ignored by hardened usercopy, so force the compiler
+ * into choosing the non-const path to make sure we trigger the
+ * hardened usercopy checks by added "unconst" to all the const copies,
+ * and making sure "cache_size" isn't optimized into a const.
+ */
+static volatile size_t unconst = 0;
+static volatile size_t cache_size = 1024;
 static struct kmem_cache *bad_cache;
 
 static const unsigned char test_text[] = "This is a test.\n";
@@ -49,7 +57,7 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
 
        /* This is a pointer to outside our current stack frame. */
        if (bad_frame) {
-               bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
+               bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
        } else {
                /* Put start address just inside stack. */
                bad_stack = task_stack_page(current) + THREAD_SIZE;
@@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
        if (to_user) {
                pr_info("attempting good copy_to_user of local stack\n");
                if (copy_to_user((void __user *)user_addr, good_stack,
-                                sizeof(good_stack))) {
+                                unconst + sizeof(good_stack))) {
                        pr_warn("copy_to_user failed unexpectedly?!\n");
                        goto free_user;
                }
 
                pr_info("attempting bad copy_to_user of distant stack\n");
                if (copy_to_user((void __user *)user_addr, bad_stack,
-                                sizeof(good_stack))) {
+                                unconst + sizeof(good_stack))) {
                        pr_warn("copy_to_user failed, but lacked Oops\n");
                        goto free_user;
                }
@@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
 
                pr_info("attempting good copy_from_user of local stack\n");
                if (copy_from_user(good_stack, (void __user *)user_addr,
-                                  sizeof(good_stack))) {
+                                  unconst + sizeof(good_stack))) {
                        pr_warn("copy_from_user failed unexpectedly?!\n");
                        goto free_user;
                }
 
                pr_info("attempting bad copy_from_user of distant stack\n");
                if (copy_from_user(bad_stack, (void __user *)user_addr,
-                                  sizeof(good_stack))) {
+                                  unconst + sizeof(good_stack))) {
                        pr_warn("copy_from_user failed, but lacked Oops\n");
                        goto free_user;
                }
@@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user)
 {
        unsigned long user_addr;
        unsigned char *one, *two;
-       const size_t size = 1024;
+       size_t size = unconst + 1024;
 
        one = kmalloc(size, GFP_KERNEL);
        two = kmalloc(size, GFP_KERNEL);
@@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void)
 
        pr_info("attempting good copy_to_user from kernel rodata\n");
        if (copy_to_user((void __user *)user_addr, test_text,
-                        sizeof(test_text))) {
+                        unconst + sizeof(test_text))) {
                pr_warn("copy_to_user failed unexpectedly?!\n");
                goto free_user;
        }
 
        pr_info("attempting bad copy_to_user from kernel text\n");
-       if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
+       if (copy_to_user((void __user *)user_addr, vm_mmap,
+                        unconst + PAGE_SIZE)) {
                pr_warn("copy_to_user failed, but lacked Oops\n");
                goto free_user;
        }
index e2fb44cc5c37668765e31f6e2b279cb3ee249229..dc3a854e02d39d1b73f95ff1ed8e203088955cb6 100644 (file)
@@ -1263,8 +1263,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
 static bool mei_me_fw_type_sps(struct pci_dev *pdev)
 {
        u32 reg;
-       /* Read ME FW Status check for SPS Firmware */
-       pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+       unsigned int devfn;
+
+       /*
+        * Read ME FW Status register to check for SPS Firmware
+        * The SPS FW is only signaled in pci function 0
+        */
+       devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+       pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
        trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
        /* if bits [19:16] = 15, running SPS Firmware */
        return (reg & 0xf0000) == 0xf0000;
index 64e64da6da4439c470832ab32a0a0158d6c2d62a..71cea9b296b2f72c1763d335024ff24379e137b7 100644 (file)
@@ -85,8 +85,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
index 48a5dd740f3ba38b854ac8bfbbdfa93de4820f6a..2206d4477dbbdb5190906e277124580b600d71e0 100644 (file)
@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
                        break;
 
                if (req_op(next) == REQ_OP_DISCARD ||
+                   req_op(next) == REQ_OP_SECURE_ERASE ||
                    req_op(next) == REQ_OP_FLUSH)
                        break;
 
@@ -2150,6 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_card *card = md->queue.card;
        struct mmc_host *host = card->host;
        unsigned long flags;
+       bool req_is_special = mmc_req_is_special(req);
 
        if (req && !mq->mqrq_prev->req)
                /* claim host only for the first request */
@@ -2190,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        }
 
 out:
-       if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
-           mmc_req_is_special(req))
+       if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
                /*
                 * Release host when there are no more requests
                 * and after special request(discard, flush) is done.
index bf14642a576a515a50b51de61daf8656b8cfc443..708057261b38982fffa9f42204b841260ff67432 100644 (file)
@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
        /*
         * We only like normal block requests and discards.
         */
-       if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) {
+       if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
+           req_op(req) != REQ_OP_SECURE_ERASE) {
                blk_dump_rq_flags(req, "MMC bad request");
                return BLKPREP_KILL;
        }
@@ -64,6 +65,8 @@ static int mmc_queue_thread(void *d)
                spin_unlock_irq(q->queue_lock);
 
                if (req || mq->mqrq_prev->req) {
+                       bool req_is_special = mmc_req_is_special(req);
+
                        set_current_state(TASK_RUNNING);
                        mq->issue_fn(mq, req);
                        cond_resched();
@@ -79,7 +82,7 @@ static int mmc_queue_thread(void *d)
                         * has been finished. Do not assign it to previous
                         * request.
                         */
-                       if (mmc_req_is_special(req))
+                       if (req_is_special)
                                mq->mqrq_cur->req = NULL;
 
                        mq->mqrq_prev->brq.mrq.data = NULL;
index d62531124d542c0ff82893ed04226e2a0c04016e..fee5e127146519efdd2b6b3ed35fc5ea1858667a 100644 (file)
@@ -4,7 +4,9 @@
 static inline bool mmc_req_is_special(struct request *req)
 {
        return req &&
-               (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
+               (req_op(req) == REQ_OP_FLUSH ||
+                req_op(req) == REQ_OP_DISCARD ||
+                req_op(req) == REQ_OP_SECURE_ERASE);
 }
 
 struct request;
index 32380d5d4f6b15440497b952b035cfe3b76b74ea..767af2026f8b4660511d57d3cfce608c3f9d9bac 100644 (file)
@@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 
                div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
 
-               dev_info(&slot->mmc->class_dev,
-                        "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
-                        slot->id, host->bus_hz, clock,
-                        div ? ((host->bus_hz / div) >> 1) :
-                        host->bus_hz, div);
+               if (clock != slot->__clk_old || force_clkinit)
+                       dev_info(&slot->mmc->class_dev,
+                                "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+                                slot->id, host->bus_hz, clock,
+                                div ? ((host->bus_hz / div) >> 1) :
+                                host->bus_hz, div);
 
                /* disable clock */
                mci_writel(host, CLKENA, 0);
@@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 
                /* inform CIU */
                mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+
+               /* keep the last clock value that was requested from core */
+               slot->__clk_old = clock;
        }
 
        host->current_speed = clock;
index 9e740bc232a8bed09ef7ee668a8a3abdef6e93d1..e8cd2dec3263d308ed0ebc9f52271c4b72bc5f05 100644 (file)
@@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
  * @queue_node: List node for placing this node in the @queue list of
  *     &struct dw_mci.
  * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @__clk_old: The last clock value that was requested from core.
+ *     Keeping track of this helps us to avoid spamming the console.
  * @flags: Random state bits associated with the slot.
  * @id: Number of this slot.
  * @sdio_id: Number of this slot in the SDIO interrupt registers.
@@ -263,6 +265,7 @@ struct dw_mci_slot {
        struct list_head        queue_node;
 
        unsigned int            clock;
+       unsigned int            __clk_old;
 
        unsigned long           flags;
 #define DW_MMC_CARD_PRESENT    0
index f23d65eb070d85eaaef5929c6ee4c14a5d0c5a36..be3c49fa7382f31bde9b671834e74dc9f41c9874 100644 (file)
@@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 
                /* Only reconfigure if we have a different burst size */
                if (*bp != burst) {
-                       struct dma_slave_config cfg;
-
-                       cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
-                       cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
-                       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-                       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-                       cfg.src_maxburst = burst;
-                       cfg.dst_maxburst = burst;
+                       struct dma_slave_config cfg = {
+                               .src_addr = host->phys_base +
+                                           OMAP_MMC_REG(host, DATA),
+                               .dst_addr = host->phys_base +
+                                           OMAP_MMC_REG(host, DATA),
+                               .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+                               .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+                               .src_maxburst = burst,
+                               .dst_maxburst = burst,
+                       };
 
                        if (dmaengine_slave_config(c, &cfg))
                                goto use_pio;
index 24ebc9a8de89a072201fba04276fd27e1b68d3e8..5f2f24a7360d87988981372e5ba7eea438d22a78 100644 (file)
@@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
                                        struct mmc_request *req)
 {
-       struct dma_slave_config cfg;
        struct dma_async_tx_descriptor *tx;
        int ret = 0, i;
        struct mmc_data *data = req->data;
        struct dma_chan *chan;
+       struct dma_slave_config cfg = {
+               .src_addr = host->mapbase + OMAP_HSMMC_DATA,
+               .dst_addr = host->mapbase + OMAP_HSMMC_DATA,
+               .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+               .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+               .src_maxburst = data->blksz / 4,
+               .dst_maxburst = data->blksz / 4,
+       };
 
        /* Sanity check: all the SG entries must be aligned by block size. */
        for (i = 0; i < data->sg_len; i++) {
@@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
 
        chan = omap_hsmmc_get_dma_chan(host, data);
 
-       cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
-       cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
-       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       cfg.src_maxburst = data->blksz / 4;
-       cfg.dst_maxburst = data->blksz / 4;
-
        ret = dmaengine_slave_config(chan, &cfg);
        if (ret)
                return ret;
index c95ba83366a0f2d95b1b45a70824327069f6b79d..ed92ce729dde1d60b91ce5395e1802816cca448b 100644 (file)
@@ -28,6 +28,7 @@
 
 struct st_mmc_platform_data {
        struct  reset_control *rstc;
+       struct  clk *icnclk;
        void __iomem *top_ioaddr;
 };
 
@@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        struct sdhci_host *host;
        struct st_mmc_platform_data *pdata;
        struct sdhci_pltfm_host *pltfm_host;
-       struct clk *clk;
+       struct clk *clk, *icnclk;
        int ret = 0;
        u16 host_version;
        struct resource *res;
@@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev)
                return PTR_ERR(clk);
        }
 
+       /* ICN clock isn't compulsory, but use it if it's provided. */
+       icnclk = devm_clk_get(&pdev->dev, "icn");
+       if (IS_ERR(icnclk))
+               icnclk = NULL;
+
        rstc = devm_reset_control_get(&pdev->dev, NULL);
        if (IS_ERR(rstc))
                rstc = NULL;
@@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        }
 
        clk_prepare_enable(clk);
+       clk_prepare_enable(icnclk);
 
        /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        }
 
        pltfm_host->clk = clk;
+       pdata->icnclk = icnclk;
 
        /* Configure the Arasan HC inside the flashSS */
        st_mmcss_cconfig(np, host);
@@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        return 0;
 
 err_out:
+       clk_disable_unprepare(icnclk);
        clk_disable_unprepare(clk);
 err_of:
        sdhci_pltfm_free(pdev);
@@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev)
 
        ret = sdhci_pltfm_unregister(pdev);
 
+       clk_disable_unprepare(pdata->icnclk);
+
        if (rstc)
                reset_control_assert(rstc);
 
@@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev)
        if (pdata->rstc)
                reset_control_assert(pdata->rstc);
 
+       clk_disable_unprepare(pdata->icnclk);
        clk_disable_unprepare(pltfm_host->clk);
 out:
        return ret;
@@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev)
        struct device_node *np = dev->of_node;
 
        clk_prepare_enable(pltfm_host->clk);
+       clk_prepare_enable(pdata->icnclk);
 
        if (pdata->rstc)
                reset_control_deassert(pdata->rstc);
index 25a4fbd4d24ae5c0e6773b9aebd763ec4efde552..d54f666417e183c4f148826db042931b5d81f47c 100644 (file)
@@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
                   u8 *data, u32 bytes)
 {
        dma_addr_t addr;
-       u32 *p, len, i;
+       u8 *p;
+       u32 len, i, val;
        int ret = 0;
 
        addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
@@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
 
        /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
        len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
-       p = (u32 *)(data + bytes);
+       p = data + bytes;
 
        /* write the parity bytes generated by the ECC back to the OOB region */
-       for (i = 0; i < len; i++)
-               p[i] = readl(ecc->regs + ECC_ENCPAR(i));
+       for (i = 0; i < len; i++) {
+               if ((i % 4) == 0)
+                       val = readl(ecc->regs + ECC_ENCPAR(i / 4));
+               p[i] = (val >> ((i % 4) * 8)) & 0xff;
+       }
 timeout:
 
        dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
index ddaa2acb9dd7f716a2f5da80857af78f4f3a4cfa..5223a2182ee44dfbfbda6f578266390f52d4f8b6 100644 (file)
@@ -93,6 +93,9 @@
 #define                NFI_FSM_MASK            (0xf << 16)
 #define NFI_ADDRCNTR           (0x70)
 #define                CNTR_MASK               GENMASK(16, 12)
+#define                ADDRCNTR_SEC_SHIFT      (12)
+#define                ADDRCNTR_SEC(val) \
+               (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
 #define NFI_STRADDR            (0x80)
 #define NFI_BYTELEN            (0x84)
 #define NFI_CSEL               (0x90)
@@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
        }
 
        ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
-                                       (reg & CNTR_MASK) >= chip->ecc.steps,
+                                       ADDRCNTR_SEC(reg) >= chip->ecc.steps,
                                        10, MTK_TIMEOUT);
        if (ret)
                dev_err(dev, "hwecc write timeout\n");
@@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
                dev_warn(nfc->dev, "read ahb/dma done timeout\n");
 
        rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
-                                      (reg & CNTR_MASK) >= sectors, 10,
+                                      ADDRCNTR_SEC(reg) >= sectors, 10,
                                       MTK_TIMEOUT);
        if (rc < 0) {
                dev_err(nfc->dev, "subpage done timeout\n");
index 5173fadc9a4e637f01817ed040a42b72af1d7a68..57cbe2b83849940aa825128c97b091bc92b3e99c 100644 (file)
@@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
        struct nand_chip *nand_chip = mtd_to_nand(mtd);
        int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
 
-       if (section > nand_chip->ecc.steps)
+       if (section >= nand_chip->ecc.steps)
                return -ERANGE;
 
        if (!section) {
index 1f276fa30ba68233339682a0aec35b26a0f80e0f..9599ed6f121382355b660147fb63554d7a405970 100644 (file)
@@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0);
 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
                            "0 for slow, 1 for fast");
 module_param(ad_select, charp, 0);
-MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
+MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
                            "0 for stable (default), 1 for bandwidth, "
                            "2 for count");
 module_param(min_links, int, 0);
@@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                            slave_dev->name);
        }
 
-       /* already enslaved */
-       if (slave_dev->flags & IFF_SLAVE) {
-               netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
+       /* already in-use? */
+       if (netdev_is_rx_handler_busy(slave_dev)) {
+               netdev_err(bond_dev,
+                          "Error: Device is in use and cannot be enslaved\n");
                return -EBUSY;
        }
 
index 41c0fc9f3b1465d9dbbde6b5c7798b1e40cf43ef..16f7cadda5c32b430c0d25aea9746a93a063bd39 100644 (file)
@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
        struct flexcan_priv *priv = netdev_priv(dev);
        int err;
 
-       err = flexcan_chip_disable(priv);
-       if (err)
-               return err;
-
        if (netif_running(dev)) {
+               err = flexcan_chip_disable(priv);
+               if (err)
+                       return err;
                netif_stop_queue(dev);
                netif_device_detach(dev);
        }
@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
 {
        struct net_device *dev = dev_get_drvdata(device);
        struct flexcan_priv *priv = netdev_priv(dev);
+       int err;
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
        if (netif_running(dev)) {
                netif_device_attach(dev);
                netif_start_queue(dev);
+               err = flexcan_chip_enable(priv);
+               if (err)
+                       return err;
        }
-       return flexcan_chip_enable(priv);
+       return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
index 2d1d22eec750de937a9a2f615064d71f92e136e4..368bb0710d8f2157f4d94bd5d332adf9290cc53f 100644 (file)
 #define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6      BIT(15)
 
 #define IFI_CANFD_TDELAY                       0x1c
+#define IFI_CANFD_TDELAY_DEFAULT               0xb
+#define IFI_CANFD_TDELAY_MASK                  0x3fff
+#define IFI_CANFD_TDELAY_ABS                   BIT(14)
+#define IFI_CANFD_TDELAY_EN                    BIT(15)
 
 #define IFI_CANFD_ERROR                                0x20
 #define IFI_CANFD_ERROR_TX_OFFSET              0
@@ -641,7 +645,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
        struct ifi_canfd_priv *priv = netdev_priv(ndev);
        const struct can_bittiming *bt = &priv->can.bittiming;
        const struct can_bittiming *dbt = &priv->can.data_bittiming;
-       u16 brp, sjw, tseg1, tseg2;
+       u16 brp, sjw, tseg1, tseg2, tdc;
 
        /* Configure bit timing */
        brp = bt->brp - 2;
@@ -664,6 +668,11 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
               (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
               (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
               priv->base + IFI_CANFD_FTIME);
+
+       /* Configure transmitter delay */
+       tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
+       writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
+              priv->base + IFI_CANFD_TDELAY);
 }
 
 static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
index 8f12bddd5dc90077add651246fc4e8467cd12960..a0b453ea34c90423ae476c7fc5f98aa8b3bff6ce 100644 (file)
  * BCM5325 and BCM5365 share most definitions below
  */
 #define B53_ARLTBL_MAC_VID_ENTRY(n)    (0x10 * (n))
-#define   ARLTBL_MAC_MASK              0xffffffffffff
+#define   ARLTBL_MAC_MASK              0xffffffffffffULL
 #define   ARLTBL_VID_S                 48
 #define   ARLTBL_VID_MASK_25           0xff
 #define   ARLTBL_VID_MASK              0xfff
index 463bed8cbe4c09358a01ba1bbdb286f64721db42..dd446e4666990ecaae0d9be7215821d0af7e59d7 100644 (file)
@@ -205,8 +205,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val,        \
 static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
                                                u32 mask)               \
 {                                                                      \
-       intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
        priv->irq##which##_mask &= ~(mask);                             \
+       intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
 }                                                                      \
 static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
                                                u32 mask)               \
index d36aedde8cb9b9aa24be3832f727236e3e7cbd72..71067906759419221087e847a2ab5b3e9ac1c6f3 100644 (file)
@@ -2656,15 +2656,19 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
                        return ret;
        }
 
+       /* Rate Control: disable ingress rate limiting. */
        if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
            mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
-           mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
            mv88e6xxx_6320_family(chip)) {
-               /* Rate Control: disable ingress rate limiting. */
                ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
                                           PORT_RATE_CONTROL, 0x0001);
                if (ret)
                        return ret;
+       } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
+               ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+                                          PORT_RATE_CONTROL, 0x0000);
+               if (ret)
+                       return ret;
        }
 
        /* Port Control 1: disable trunking, disable sending
@@ -3187,6 +3191,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
        return err;
 }
 
+#ifdef CONFIG_NET_DSA_HWMON
 static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
                                    int reg)
 {
@@ -3212,6 +3217,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
 
        return ret;
 }
+#endif
 
 static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
 {
index 37a0f463b8de5c9d0479b220ac2303fa895cc6ea..18bb9556dd006861c0cc6e9a8997bac14ade5223 100644 (file)
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev)
                        netdev_err(ndev, "Could not connect to PHY\n");
                        return  -ENODEV;
                }
+#else
+               return -ENODEV;
 #endif
        }
 
index 4bff0f3040df7a85bae40278e13b655009ba1a49..b0da9693f28a130a65e59a007548c773fa422e7f 100644 (file)
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface)
        priv->dev = dev;
 
        priv->regs = devm_ioremap_resource(dev, &res_regs);
-       if (IS_ERR(priv->regs))
-               return PTR_ERR(priv->regs);
+       if (IS_ERR(priv->regs)) {
+               err = PTR_ERR(priv->regs);
+               goto out_put_node;
+       }
 
        dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
 
index 6453148d066a7d6dc698a84ba970d90859d52d85..4eb17daefc4fc2243084c6a84181626d90dd4807 100644 (file)
@@ -1545,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
index 0959e6824cb635f0262524db9729988986193fb4..1fc2d852249fb2b5f1a72233024093b726431659 100644 (file)
@@ -38,6 +38,7 @@
 #define ALX_DEV_ID_AR8161                              0x1091
 #define ALX_DEV_ID_E2200                               0xe091
 #define ALX_DEV_ID_E2400                               0xe0a1
+#define ALX_DEV_ID_E2500                               0xe0b1
 #define ALX_DEV_ID_AR8162                              0x1090
 #define ALX_DEV_ID_AR8171                              0x10A1
 #define ALX_DEV_ID_AR8172                              0x10A0
index 9a9745c4047c3757c4b1733dae9d5908db44baa1..625235db644f73bdbbc7a79088e6a15c60d27019 100644 (file)
@@ -159,7 +159,7 @@ static int bgmac_probe(struct bcma_device *core)
 
        if (!bgmac_is_bcm4707_family(core)) {
                mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
-               if (!IS_ERR(mii_bus)) {
+               if (IS_ERR(mii_bus)) {
                        err = PTR_ERR(mii_bus);
                        goto err;
                }
index 8fc3f3c137f87d2ac4fb0fc867acd50def69a03f..505ceaf451e25c85e4fb6e1baea3a5773658dd41 100644 (file)
@@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev)
        struct bnx2 *bp = netdev_priv(dev);
        int rc;
 
-       rc = bnx2_request_firmware(bp);
-       if (rc < 0)
-               goto out;
-
        netif_carrier_off(dev);
 
        bnx2_disable_int(bp);
@@ -6428,7 +6424,6 @@ open_err:
        bnx2_free_irq(bp);
        bnx2_free_mem(bp);
        bnx2_del_napi(bp);
-       bnx2_release_firmware(bp);
        goto out;
 }
 
@@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
+       rc = bnx2_request_firmware(bp);
+       if (rc < 0)
+               goto error;
+
+
+       bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
        memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
 
        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 error:
+       bnx2_release_firmware(bp);
        pci_iounmap(pdev, bp->regview);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index 97e8925116662b105c7c7a57274fa2f5e906865d..fa3386bb14f7317c6a3c64fdf075c379c5cc2643 100644 (file)
@@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
                (bp->common.bc_ver & 0xff00) >> 8,
                (bp->common.bc_ver & 0xff));
 
+       if (pci_channel_offline(bp->pdev)) {
+               BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
+               return;
+       }
+
        val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
        if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
                BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9415,10 +9420,16 @@ unload_error:
        /* Release IRQs */
        bnx2x_free_irq(bp);
 
-       /* Reset the chip */
-       rc = bnx2x_reset_hw(bp, reset_code);
-       if (rc)
-               BNX2X_ERR("HW_RESET failed\n");
+       /* Reset the chip, unless PCI function is offline. If we reach this
+        * point following a PCI error handling, it means device is really
+        * in a bad state and we're about to remove it, so reset the chip
+        * is not a good idea.
+        */
+       if (!pci_channel_offline(bp->pdev)) {
+               rc = bnx2x_reset_hw(bp, reset_code);
+               if (rc)
+                       BNX2X_ERR("HW_RESET failed\n");
+       }
 
        /* Report UNLOAD_DONE to MCP */
        bnx2x_send_unload_done(bp, keep_link);
index 2cf79100c9cb2241b8172cd4a8543cd28b73a7c1..228c964e709a2d73dc1491c998cddabda59d6180 100644 (file)
@@ -353,8 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                push_len = (length + sizeof(*tx_push) + 7) / 8;
                if (push_len > 16) {
                        __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
-                       __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
-                                        push_len - 16);
+                       __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+                                        (push_len - 16) << 1);
                } else {
                        __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
                                         push_len);
index ff300f7cf5295fc0d4f1e1b0692d74e6a6091912..a2551bcd1027007916f91a364833ee8368315185 100644 (file)
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
                                info->data = TG3_RSS_MAX_NUM_QS;
                }
 
-               /* The first interrupt vector only
-                * handles link interrupts.
-                */
-               info->data -= 1;
                return 0;
 
        default:
@@ -14014,7 +14010,9 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
        }
 
        if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+           (!ec->rx_coalesce_usecs) ||
            (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+           (!ec->tx_coalesce_usecs) ||
            (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
            (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
            (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14025,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
            (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
                return -EINVAL;
 
-       /* No rx interrupts will be generated if both are zero */
-       if ((ec->rx_coalesce_usecs == 0) &&
-           (ec->rx_max_coalesced_frames == 0))
-               return -EINVAL;
-
-       /* No tx interrupts will be generated if both are zero */
-       if ((ec->tx_coalesce_usecs == 0) &&
-           (ec->tx_max_coalesced_frames == 0))
-               return -EINVAL;
-
        /* Only copy relevant parameters, ignore all others. */
        tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
        tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
index 0e4fdc3dd729752fef73f8a268ed795fa1d0a06d..31f61a744d6655b1b999ea34d4aab9bee7ecd3bc 100644 (file)
 #define BNAD_NUM_TXF_COUNTERS 12
 #define BNAD_NUM_RXF_COUNTERS 10
 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
-#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_RXQ_COUNTERS 7
 #define BNAD_NUM_TXQ_COUNTERS 5
 
-#define BNAD_ETHTOOL_STATS_NUM                                         \
-       (sizeof(struct rtnl_link_stats64) / sizeof(u64) +       \
-       sizeof(struct bnad_drv_stats) / sizeof(u64) +           \
-       offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-
-static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[] = {
        "rx_packets",
        "tx_packets",
        "rx_bytes",
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
        "tx_dropped",
        "multicast",
        "collisions",
-
        "rx_length_errors",
-       "rx_over_errors",
        "rx_crc_errors",
        "rx_frame_errors",
-       "rx_fifo_errors",
-       "rx_missed_errors",
-
-       "tx_aborted_errors",
-       "tx_carrier_errors",
        "tx_fifo_errors",
-       "tx_heartbeat_errors",
-       "tx_window_errors",
-
-       "rx_compressed",
-       "tx_compressed",
 
        "netif_queue_stop",
        "netif_queue_wakeup",
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
        "fc_tx_fid_parity_errors",
 };
 
+#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
+
 static int
 bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
 {
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
                                string += ETH_GSTRING_LEN;
                                sprintf(string, "rxq%d_allocbuf_failed", q_num);
                                string += ETH_GSTRING_LEN;
+                               sprintf(string, "rxq%d_mapbuf_failed", q_num);
+                               string += ETH_GSTRING_LEN;
                                sprintf(string, "rxq%d_producer_index", q_num);
                                string += ETH_GSTRING_LEN;
                                sprintf(string, "rxq%d_consumer_index", q_num);
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
                                        sprintf(string, "rxq%d_allocbuf_failed",
                                                                q_num);
                                        string += ETH_GSTRING_LEN;
+                                       sprintf(string, "rxq%d_mapbuf_failed",
+                                               q_num);
+                                       string += ETH_GSTRING_LEN;
                                        sprintf(string, "rxq%d_producer_index",
                                                                q_num);
                                        string += ETH_GSTRING_LEN;
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
                       u64 *buf)
 {
        struct bnad *bnad = netdev_priv(netdev);
-       int i, j, bi;
+       int i, j, bi = 0;
        unsigned long flags;
-       struct rtnl_link_stats64 *net_stats64;
+       struct rtnl_link_stats64 net_stats64;
        u64 *stats64;
        u32 bmap;
 
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
         * under the same lock
         */
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bi = 0;
-       memset(buf, 0, stats->n_stats * sizeof(u64));
-
-       net_stats64 = (struct rtnl_link_stats64 *)buf;
-       bnad_netdev_qstats_fill(bnad, net_stats64);
-       bnad_netdev_hwstats_fill(bnad, net_stats64);
 
-       bi = sizeof(*net_stats64) / sizeof(u64);
+       memset(&net_stats64, 0, sizeof(net_stats64));
+       bnad_netdev_qstats_fill(bnad, &net_stats64);
+       bnad_netdev_hwstats_fill(bnad, &net_stats64);
+
+       buf[bi++] = net_stats64.rx_packets;
+       buf[bi++] = net_stats64.tx_packets;
+       buf[bi++] = net_stats64.rx_bytes;
+       buf[bi++] = net_stats64.tx_bytes;
+       buf[bi++] = net_stats64.rx_errors;
+       buf[bi++] = net_stats64.tx_errors;
+       buf[bi++] = net_stats64.rx_dropped;
+       buf[bi++] = net_stats64.tx_dropped;
+       buf[bi++] = net_stats64.multicast;
+       buf[bi++] = net_stats64.collisions;
+       buf[bi++] = net_stats64.rx_length_errors;
+       buf[bi++] = net_stats64.rx_crc_errors;
+       buf[bi++] = net_stats64.rx_frame_errors;
+       buf[bi++] = net_stats64.tx_fifo_errors;
 
        /* Get netif_queue_stopped from stack */
        bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
index 89c0cfa9719f345cd219a096223285a89fbf88fb..d954a97b0b0b011de07ded9e69f46b893517dde1 100644 (file)
@@ -1323,6 +1323,24 @@ dma_error:
        return 0;
 }
 
+static inline int macb_clear_csum(struct sk_buff *skb)
+{
+       /* no change for packets without checksum offloading */
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       /* make sure we can modify the header */
+       if (unlikely(skb_cow_head(skb, 0)))
+               return -1;
+
+       /* initialize checksum field
+        * This is required - at least for Zynq, which otherwise calculates
+        * wrong UDP header checksums for UDP packets with UDP data len <=2
+        */
+       *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
+       return 0;
+}
+
 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        u16 queue_index = skb_get_queue_mapping(skb);
@@ -1362,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
+       if (macb_clear_csum(skb)) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
        /* Map socket buffer for DMA transfer */
        if (!macb_tx_map(bp, queue, skb)) {
                dev_kfree_skb_any(skb);
index 36893d8958d4d6ce3d73575149f44a68c99e0569..b6fcf10621b63c39975c3ce53528d9497ce628a2 100644 (file)
 #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII    0x00000004
 #define MACB_CAPS_NO_GIGABIT_HALF              0x00000008
 #define MACB_CAPS_USRIO_DISABLED               0x00000010
+#define MACB_CAPS_JUMBO                                0x00000020
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
 #define MACB_CAPS_MACB_IS_GEM                  0x80000000
-#define MACB_CAPS_JUMBO                                0x00000010
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)                                 \
index 83025bb4737c47c44b2056190b68c0acc5461ed9..e29815d9e6f45723feca790b236bd5365f1b1ca2 100644 (file)
@@ -279,6 +279,7 @@ struct nicvf {
        u8                      sqs_id;
        bool                    sqs_mode;
        bool                    hw_tso;
+       bool                    t88;
 
        /* Receive buffer alloc */
        u32                     rb_page_offset;
index 16ed20357c5c30d2cbb786cb414a6d4544fecf83..85cc782b9060f7f1ff73dd866bdf7713812a660f 100644 (file)
@@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
        int lmac;
        u64 lmac_cfg;
 
-       /* Max value that can be set is 60 */
-       if (size > 60)
-               size = 60;
+       /* There is a issue in HW where-in while sending GSO sized
+        * pkts as part of TSO, if pkt len falls below this size
+        * NIC will zero PAD packet and also updates IP total length.
+        * Hence set this value to lessthan min pkt size of MAC+IP+TCP
+        * headers, BGX will do the padding to transmit 64 byte pkt.
+        */
+       if (size > 52)
+               size = 52;
 
        for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
                lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
index afb10e326b4fc46043b4d3f4cddb7d17f1cc341c..fab35a5938987c757f5faa2cbf808d7f28c05922 100644 (file)
 #define   NIC_QSET_SQ_0_7_DOOR                 (0x010838)
 #define   NIC_QSET_SQ_0_7_STATUS               (0x010840)
 #define   NIC_QSET_SQ_0_7_DEBUG                        (0x010848)
-#define   NIC_QSET_SQ_0_7_CNM_CHG              (0x010860)
 #define   NIC_QSET_SQ_0_7_STAT_0_1             (0x010900)
 
 #define   NIC_QSET_RBDR_0_1_CFG                        (0x010C00)
index d2d8ef270142dbd3ec1fed3d019b106e6906cce4..ad4fddb5542160b4512643cde5228f7a9d28b2e0 100644 (file)
@@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev,
                p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
                p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
                p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
-               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+               /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
+                * produces bus errors when read
+                */
+               p[i++] = 0;
                p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
                reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
                p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
index a19e73f11d73c84a829d95980688249c511b21c4..3240349615bdac637238188dc649370e9c86fb78 100644 (file)
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
        struct nicvf *nic = netdev_priv(netdev);
        struct snd_queue *sq;
        struct sq_hdr_subdesc *hdr;
+       struct sq_hdr_subdesc *tso_sqe;
 
        sq = &nic->qs->sq[cqe_tx->sq_idx];
 
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
 
        nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
        skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
-       /* For TSO offloaded packets only one SQE will have a valid SKB */
        if (skb) {
+               /* Check for dummy descriptor used for HW TSO offload on 88xx */
+               if (hdr->dont_send) {
+                       /* Get actual TSO descriptors and free them */
+                       tso_sqe =
+                        (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+                       nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+               }
                nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
                prefetch(skb);
                dev_consume_skb_any(skb);
                sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
        } else {
-               /* In case of HW TSO, HW sends a CQE for each segment of a TSO
-                * packet instead of a single CQE for the whole TSO packet
-                * transmitted. Each of this CQE points to the same SQE, so
-                * avoid freeing same SQE multiple times.
+               /* In case of SW TSO on 88xx, only last segment will have
+                * a SKB attached, so just free SQEs here.
                 */
                if (!nic->hw_tso)
                        nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct net_device *netdev;
        struct nicvf *nic;
        int    err, qcount;
+       u16    sdevid;
 
        err = pci_enable_device(pdev);
        if (err) {
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pass1_silicon(nic->pdev))
                nic->hw_tso = true;
 
+       pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+       if (sdevid == 0xA134)
+               nic->t88 = true;
+
        /* Check if this VF is in QS only mode */
        if (nic->sqs_mode)
                return 0;
index 0ff8e60deccb78a941f73163212a25b23301f871..dda3ea3f3bb66c90c78f1273ed35e7f78635bcb8 100644 (file)
@@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
        return num_edescs + sh->gso_segs;
 }
 
+#define POST_CQE_DESC_COUNT 2
+
 /* Get the number of SQ descriptors needed to xmit this skb */
 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
 {
@@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
                return subdesc_cnt;
        }
 
+       /* Dummy descriptors to get TSO pkt completion notification */
+       if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
+               subdesc_cnt += POST_CQE_DESC_COUNT;
+
        if (skb_shinfo(skb)->nr_frags)
                subdesc_cnt += skb_shinfo(skb)->nr_frags;
 
@@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
        struct sq_hdr_subdesc *hdr;
 
        hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
-       sq->skbuff[qentry] = (u64)skb;
-
        memset(hdr, 0, SND_QUEUE_DESC_SIZE);
        hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
-       /* Enable notification via CQE after processing SQE */
-       hdr->post_cqe = 1;
-       /* No of subdescriptors following this */
-       hdr->subdesc_cnt = subdesc_cnt;
+
+       if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
+               /* post_cqe = 0, to avoid HW posting a CQE for every TSO
+                * segment transmitted on 88xx.
+                */
+               hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
+       } else {
+               sq->skbuff[qentry] = (u64)skb;
+               /* Enable notification via CQE after processing SQE */
+               hdr->post_cqe = 1;
+               /* No of subdescriptors following this */
+               hdr->subdesc_cnt = subdesc_cnt;
+       }
        hdr->tot_len = len;
 
        /* Offload checksum calculation to HW */
@@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
        gather->addr = data;
 }
 
+/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
+ * packet so that a CQE is posted as a notifation for transmission of
+ * TSO packet.
+ */
+static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
+                                           int tso_sqe, struct sk_buff *skb)
+{
+       struct sq_imm_subdesc *imm;
+       struct sq_hdr_subdesc *hdr;
+
+       sq->skbuff[qentry] = (u64)skb;
+
+       hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+       memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+       hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+       /* Enable notification via CQE after processing SQE */
+       hdr->post_cqe = 1;
+       /* There is no packet to transmit here */
+       hdr->dont_send = 1;
+       hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
+       hdr->tot_len = 1;
+       /* Actual TSO header SQE index, needed for cleanup */
+       hdr->rsvd2 = tso_sqe;
+
+       qentry = nicvf_get_nxt_sqentry(sq, qentry);
+       imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
+       memset(imm, 0, SND_QUEUE_DESC_SIZE);
+       imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
+       imm->len = 1;
+}
+
 /* Segment a TSO packet into 'gso_size' segments and append
  * them to SQ for transfer
  */
@@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
 {
        int i, size;
-       int subdesc_cnt;
+       int subdesc_cnt, tso_sqe = 0;
        int sq_num, qentry;
        struct queue_set *qs;
        struct snd_queue *sq;
@@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
        /* Add SQ header subdesc */
        nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
                                 skb, skb->len);
+       tso_sqe = qentry;
 
        /* Add SQ gather subdescs */
        qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
        }
 
 doorbell:
+       if (nic->t88 && skb_shinfo(skb)->gso_size) {
+               qentry = nicvf_get_nxt_sqentry(sq, qentry);
+               nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+       }
+
        /* make sure all memory stores are done before ringing doorbell */
        smp_wmb();
 
index 2e2aa9fec9bbbeaec23fbe8196bd35c3875ff1b7..edd23386b47d84d17aedaa53b2d8ff8f69829e2d 100644 (file)
@@ -419,8 +419,8 @@ struct link_config {
        unsigned short supported;        /* link capabilities */
        unsigned short advertising;      /* advertised capabilities */
        unsigned short lp_advertising;   /* peer advertised capabilities */
-       unsigned short requested_speed;  /* speed user has requested */
-       unsigned short speed;            /* actual link speed */
+       unsigned int   requested_speed;  /* speed user has requested */
+       unsigned int   speed;            /* actual link speed */
        unsigned char  requested_fc;     /* flow control user has requested */
        unsigned char  fc;               /* actual link flow control */
        unsigned char  autoneg;          /* autonegotiating? */
index c45de49dc9630d3c211cad693f888e6d50d9079d..3ceafb55d6da704a3c235012877f6a12bb54dfdd 100644 (file)
@@ -4305,10 +4305,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
        .resume         = eeh_resume,
 };
 
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
 static inline bool is_x_10g_port(const struct link_config *lc)
 {
-       return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
-              (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+       unsigned int speeds, high_speeds;
+
+       speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+       high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+       return high_speeds != 0;
 }
 
 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@@ -4335,6 +4342,11 @@ static void cfg_queues(struct adapter *adap)
 #endif
        int ciq_size;
 
+       /* Reduce memory usage in kdump environment, disable all offload.
+        */
+       if (is_kdump_kernel())
+               adap->params.offload = 0;
+
        for_each_port(adap, i)
                n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
 #ifdef CONFIG_CHELSIO_T4_DCB
@@ -4365,11 +4377,6 @@ static void cfg_queues(struct adapter *adap)
        if (q10g > netif_get_num_default_rss_queues())
                q10g = netif_get_num_default_rss_queues();
 
-       /* Reduce memory usage in kdump environment, disable all offload.
-        */
-       if (is_kdump_kernel())
-               adap->params.offload = 0;
-
        for_each_port(adap, i) {
                struct port_info *pi = adap2pinfo(adap, i);
 
@@ -4756,8 +4763,12 @@ static void print_port_info(const struct net_device *dev)
                bufp += sprintf(bufp, "1000/");
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
                bufp += sprintf(bufp, "10G/");
+       if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+               bufp += sprintf(bufp, "25G/");
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
                bufp += sprintf(bufp, "40G/");
+       if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+               bufp += sprintf(bufp, "100G/");
        if (bufp != buf)
                --bufp;
        sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
index dc92c80a75f481c5825fe34e92b41b3f1b4fb07b..660204bff726346956f990761d065dc8832af1f4 100644 (file)
@@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
 }
 
 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
-                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+                    FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
                     FW_PORT_CAP_ANEG)
 
 /**
@@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
                speed = 1000;
        else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
                speed = 10000;
+       else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+               speed = 25000;
        else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
                speed = 40000;
+       else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+               speed = 100000;
 
        lc = &pi->link_cfg;
 
index a89b30720e386e0e601f55bfd22c506798c35dba..30507d44422c9c7b47d8fd73bc691ab3aa1a59f4 100644 (file)
@@ -2265,6 +2265,12 @@ enum fw_port_cap {
        FW_PORT_CAP_802_3_ASM_DIR       = 0x8000,
 };
 
+#define FW_PORT_CAP_SPEED_S     0
+#define FW_PORT_CAP_SPEED_M     0x3f
+#define FW_PORT_CAP_SPEED_V(x)  ((x) << FW_PORT_CAP_SPEED_S)
+#define FW_PORT_CAP_SPEED_G(x) \
+       (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
+
 enum fw_port_mdi {
        FW_PORT_CAP_MDI_UNCHANGED,
        FW_PORT_CAP_MDI_AUTO,
index 8ee541431e8b321baacb008840844a5a7f49c048..17a2bbcf93f04e8ae6e507edd83355d74b61e7d7 100644 (file)
@@ -108,8 +108,8 @@ struct link_config {
        unsigned int   supported;        /* link capabilities */
        unsigned int   advertising;      /* advertised capabilities */
        unsigned short lp_advertising;   /* peer advertised capabilities */
-       unsigned short requested_speed;  /* speed user has requested */
-       unsigned short speed;            /* actual link speed */
+       unsigned int   requested_speed;  /* speed user has requested */
+       unsigned int   speed;            /* actual link speed */
        unsigned char  requested_fc;     /* flow control user has requested */
        unsigned char  fc;               /* actual link flow control */
        unsigned char  autoneg;          /* autonegotiating? */
@@ -271,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
        return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
 }
 
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
 static inline bool is_x_10g_port(const struct link_config *lc)
 {
-       return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
-               (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+       unsigned int speeds, high_speeds;
+
+       speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+       high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+       return high_speeds != 0;
 }
 
 static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
index 427bfa71388b3a000cdaf5689d10e65fb1e3b8f4..b5622b1689e9c000a44bf983af3b33402180084e 100644 (file)
@@ -314,8 +314,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
 }
 
 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
-                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
-                    FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+                    FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
+                    FW_PORT_CAP_ANEG)
 
 /**
  *     init_link_config - initialize a link's SW state
@@ -1712,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
                        speed = 1000;
                else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
                        speed = 10000;
+               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+                       speed = 25000;
                else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
                        speed = 40000;
+               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+                       speed = 100000;
 
                /*
                 * Scan all of our "ports" (Virtual Interfaces) looking for
index 1471e16ba7199b229c78884ee093799f24718423..f45385f5c6e58269d0d3f593b460fb5952a6c5e3 100644 (file)
@@ -1299,6 +1299,7 @@ static int
 dm9000_open(struct net_device *dev)
 {
        struct board_info *db = netdev_priv(dev);
+       unsigned int irq_flags = irq_get_trigger_type(dev->irq);
 
        if (netif_msg_ifup(db))
                dev_dbg(db->dev, "enabling %s\n", dev->name);
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev)
        /* If there is no IRQ type specified, tell the user that this is a
         * problem
         */
-       if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
+       if (irq_flags == IRQF_TRIGGER_NONE)
                dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
 
+       irq_flags |= IRQF_SHARED;
+
        /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
        iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
        mdelay(1); /* delay needs by DM9000B */
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev)
        /* Initialize DM9000 board */
        dm9000_init_dm9000(dev);
 
-       if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
-                       dev->name, dev))
+       if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
                return -EAGAIN;
        /* Now that we have an interrupt handler hooked up we can unmask
         * our interrupts
index d20935dc8399f7edc172d9ced666b7154080d912..4b4f5bc0e2799cdaea3f1be09fc29e07cbb6b559 100644 (file)
@@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
 {
        unsigned int size = lstatus & BD_LENGTH_MASK;
        struct page *page = rxb->page;
+       bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
 
        /* Remove the FCS from the packet length */
-       if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+       if (last)
                size -= ETH_FCS_LEN;
 
-       if (likely(first))
+       if (likely(first)) {
                skb_put(skb, size);
-       else
-               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                               rxb->page_offset + RXBUF_ALIGNMENT,
-                               size, GFAR_RXB_TRUESIZE);
+       } else {
+               /* the last fragments' length contains the full frame length */
+               if (last)
+                       size -= skb->len;
+
+               /* in case the last fragment consisted only of the FCS */
+               if (size > 0)
+                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                                       rxb->page_offset + RXBUF_ALIGNMENT,
+                                       size, GFAR_RXB_TRUESIZE);
+       }
 
        /* try reuse page */
        if (unlikely(page_count(page) != 1))
index 373fd094f2f320b5beff6c0194a150c5cc044e0f..6e8a9c8467b9a62f8383b9b692ceabeb9ad1cf42 100644 (file)
@@ -100,7 +100,8 @@ extern const char gfar_driver_version[];
 #define DEFAULT_RX_LFC_THR  16
 #define DEFAULT_LFC_PTVVAL  4
 
-#define GFAR_RXB_SIZE 1536
+/* prevent fragmenation by HW in DSA environments */
+#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
 #define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
                          + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 #define GFAR_RXB_TRUESIZE 2048
index 1235c7f2564bd8de42a9416538b536af2dc07d50..1e1eb92998fb3d66f497f88b890817e22c4a8d3d 100644 (file)
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
        {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
        {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
        {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
-       {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+       {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
        {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
        {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
        {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
index ff8b6a468b2487f6db72c3fd37446518371c2def..6ea872287307bd85b13436a42c1d8aacbad05f2d 100644 (file)
@@ -328,9 +328,10 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
 static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
 {
        u32 port;
-       struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
 
        if (ppe_cb->ppe_common_cb) {
+               struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
+
                port = ppe_cb->index;
                dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
        }
index 4c9771d57d6e7fce3a77e093e467fe2d0342243e..7af09cbc53f079114a167d21d7433643930d15fa 100644 (file)
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
                dev->mcast_pending = 1;
                return;
        }
+
+       mutex_lock(&dev->link_lock);
        __emac_set_multicast_list(dev);
+       mutex_unlock(&dev->link_lock);
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *sa)
+{
+       struct emac_instance *dev = netdev_priv(ndev);
+       struct sockaddr *addr = sa;
+       struct emac_regs __iomem *p = dev->emacp;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+              return -EADDRNOTAVAIL;
+
+       mutex_lock(&dev->link_lock);
+
+       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+       emac_rx_disable(dev);
+       emac_tx_disable(dev);
+       out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+       out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+               (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+               ndev->dev_addr[5]);
+       emac_tx_enable(dev);
+       emac_rx_enable(dev);
+
+       mutex_unlock(&dev->link_lock);
+
+       return 0;
 }
 
 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
        .ndo_do_ioctl           = emac_ioctl,
        .ndo_tx_timeout         = emac_tx_timeout,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = emac_set_mac_address,
        .ndo_start_xmit         = emac_start_xmit,
        .ndo_change_mtu         = eth_change_mtu,
 };
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
        .ndo_do_ioctl           = emac_ioctl,
        .ndo_tx_timeout         = emac_tx_timeout,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = emac_set_mac_address,
        .ndo_start_xmit         = emac_start_xmit_sg,
        .ndo_change_mtu         = emac_change_mtu,
 };
index 7fd4d54599e4557dd37b457396145195a77da3fa..6b03c8553e59710b5cad2cb903f0e4e05d0cae5e 100644 (file)
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = {
                                  | FLAG2_DISABLE_ASPM_L0S
                                  | FLAG2_DISABLE_ASPM_L1
                                  | FLAG2_NO_DISABLE_RX
-                                 | FLAG2_DMA_BURST,
+                                 | FLAG2_DMA_BURST
+                                 | FLAG2_CHECK_SYSTIM_OVERFLOW,
        .pba                    = 32,
        .max_hw_frame_size      = DEFAULT_JUMBO,
        .get_variants           = e1000_get_variants_82571,
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = {
                                  | FLAG_HAS_CTRLEXT_ON_LOAD,
        .flags2                 = FLAG2_DISABLE_ASPM_L0S
                                  | FLAG2_DISABLE_ASPM_L1
-                                 | FLAG2_NO_DISABLE_RX,
+                                 | FLAG2_NO_DISABLE_RX
+                                 | FLAG2_CHECK_SYSTIM_OVERFLOW,
        .pba                    = 32,
        .max_hw_frame_size      = DEFAULT_JUMBO,
        .get_variants           = e1000_get_variants_82571,
index ef96cd11d6d2c34a726db4b1c0c0a5ec8a76bf7c..879cca47b021446565f30ea4e877e569ec17531c 100644 (file)
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
 #define FLAG2_PCIM2PCI_ARBITER_WA         BIT(11)
 #define FLAG2_DFLT_CRC_STRIPPING          BIT(12)
 #define FLAG2_CHECK_RX_HWTSTAMP           BIT(13)
+#define FLAG2_CHECK_SYSTIM_OVERFLOW       BIT(14)
 
 #define E1000_RX_DESC_PS(R, i)     \
        (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
index 3e11322d8d586a839bb19b15b2dc37b680ad8bf5..f3aaca743ea3ff3a2250547e47fb8d1c275f0cea 100644 (file)
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = {
                                  | FLAG_HAS_JUMBO_FRAMES
                                  | FLAG_APME_IN_WUC,
        .flags2                 = FLAG2_HAS_PHY_STATS
-                                 | FLAG2_HAS_EEE,
+                                 | FLAG2_HAS_EEE
+                                 | FLAG2_CHECK_SYSTIM_OVERFLOW,
        .pba                    = 26,
        .max_hw_frame_size      = 9022,
        .get_variants           = e1000_get_variants_ich8lan,
index 02f443958f3199aaf95c786ef1b59bd2948f4937..7017281ba2dc6355449c7970e6f4f3dabd3fb65d 100644 (file)
@@ -4302,6 +4302,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
        clear_bit(__E1000_RESETTING, &adapter->state);
 }
 
+/**
+ * e1000e_sanitize_systim - sanitize raw cycle counter reads
+ * @hw: pointer to the HW structure
+ * @systim: cycle_t value read, sanitized and returned
+ *
+ * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue.
+ **/
+static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
+{
+       u64 time_delta, rem, temp;
+       cycle_t systim_next;
+       u32 incvalue;
+       int i;
+
+       incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+       for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+               /* latch SYSTIMH on read of SYSTIML */
+               systim_next = (cycle_t)er32(SYSTIML);
+               systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+               time_delta = systim_next - systim;
+               temp = time_delta;
+               /* VMWare users have seen incvalue of zero, don't div / 0 */
+               rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
+
+               systim = systim_next;
+
+               if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
+                       break;
+       }
+
+       return systim;
+}
+
 /**
  * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
  * @cc: cyclecounter structure
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
                                                     cc);
        struct e1000_hw *hw = &adapter->hw;
        u32 systimel, systimeh;
-       cycle_t systim, systim_next;
+       cycle_t systim;
        /* SYSTIMH latching upon SYSTIML read does not work well.
         * This means that if SYSTIML overflows after we read it but before
         * we read SYSTIMH, the value of SYSTIMH has been incremented and we
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
        systim = (cycle_t)systimel;
        systim |= (cycle_t)systimeh << 32;
 
-       if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
-               u64 time_delta, rem, temp;
-               u32 incvalue;
-               int i;
-
-               /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
-                * check to see that the time is incrementing at a reasonable
-                * rate and is a multiple of incvalue
-                */
-               incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
-               for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
-                       /* latch SYSTIMH on read of SYSTIML */
-                       systim_next = (cycle_t)er32(SYSTIML);
-                       systim_next |= (cycle_t)er32(SYSTIMH) << 32;
-
-                       time_delta = systim_next - systim;
-                       temp = time_delta;
-                       /* VMWare users have seen incvalue of zero, don't div / 0 */
-                       rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
-
-                       systim = systim_next;
+       if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
+               systim = e1000e_sanitize_systim(hw, systim);
 
-                       if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
-                           (rem == 0))
-                               break;
-               }
-       }
        return systim;
 }
 
index e1370c556a3c3e90aa69e942d927dc78add80bfa..618f18436618945b807c5e08f499dc1a4cd689cf 100644 (file)
@@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
 void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
 {
        struct i40e_client_instance *cdev;
+       int ret = 0;
 
        if (!vsi)
                return;
@@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
                                        "Cannot locate client instance open routine\n");
                                continue;
                        }
-                       cdev->client->ops->open(&cdev->lan_info, cdev->client);
+                       if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+                                      &cdev->state))) {
+                               ret = cdev->client->ops->open(&cdev->lan_info,
+                                                             cdev->client);
+                               if (!ret)
+                                       set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+                                               &cdev->state);
+                       }
                }
        }
        mutex_unlock(&i40e_client_instance_mutex);
@@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
  * i40e_client_add_instance - add a client instance struct to the instance list
  * @pf: pointer to the board struct
  * @client: pointer to a client struct in the client list.
+ * @existing: if there was already an existing instance
  *
- * Returns cdev ptr on success, NULL on failure
+ * Returns cdev ptr on success or if already exists, NULL on failure
  **/
 static
 struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
-                                                     struct i40e_client *client)
+                                                    struct i40e_client *client,
+                                                    bool *existing)
 {
        struct i40e_client_instance *cdev;
        struct netdev_hw_addr *mac = NULL;
@@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
        mutex_lock(&i40e_client_instance_mutex);
        list_for_each_entry(cdev, &i40e_client_instances, list) {
                if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
-                       cdev = NULL;
+                       *existing = true;
                        goto out;
                }
        }
@@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
 {
        struct i40e_client_instance *cdev;
        struct i40e_client *client;
+       bool existing = false;
        int ret = 0;
 
        if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
@@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
                        /* check if L2 VSI is up, if not we are not ready */
                        if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
                                continue;
+               } else {
+                       dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
+                                client->name);
                }
 
                /* Add the client instance to the instance list */
-               cdev = i40e_client_add_instance(pf, client);
+               cdev = i40e_client_add_instance(pf, client, &existing);
                if (!cdev)
                        continue;
 
-               /* Also up the ref_cnt of no. of instances of this client */
-               atomic_inc(&client->ref_cnt);
-               dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
-                        client->name, pf->hw.pf_id,
-                        pf->hw.bus.device, pf->hw.bus.func);
+               if (!existing) {
+                       /* Also up the ref_cnt for no. of instances of this
+                        * client.
+                        */
+                       atomic_inc(&client->ref_cnt);
+                       dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+                                client->name, pf->hw.pf_id,
+                                pf->hw.bus.device, pf->hw.bus.func);
+               }
 
                /* Send an Open request to the client */
                atomic_inc(&cdev->ref_cnt);
@@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
                 pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
 
        /* Since in some cases register may have happened before a device gets
-        * added, we can schedule a subtask to go initiate the clients.
+        * added, we can schedule a subtask to go initiate the clients if
+        * they can be launched at probe time.
         */
        pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
        i40e_service_event_schedule(pf);
index 81c99e1be708d445270323ece7c6c63cbc681111..d0b3a1bb82cac9ec9fbb05abc63d2a7f44edb45b 100644 (file)
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
  **/
 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
 {
+       int i, tc_unused = 0;
        u8 num_tc = 0;
-       int i;
+       u8 ret = 0;
 
        /* Scan the ETS Config Priority Table to find
         * traffic class enabled for a given priority
-        * and use the traffic class index to get the
-        * number of traffic classes enabled
+        * and create a bitmask of enabled TCs
         */
-       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
-               if (dcbcfg->etscfg.prioritytable[i] > num_tc)
-                       num_tc = dcbcfg->etscfg.prioritytable[i];
-       }
+       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+               num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
 
-       /* Traffic class index starts from zero so
-        * increment to return the actual count
+       /* Now scan the bitmask to check for
+        * contiguous TCs starting with TC0
         */
-       return num_tc + 1;
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (num_tc & BIT(i)) {
+                       if (!tc_unused) {
+                               ret++;
+                       } else {
+                               pr_err("Non-contiguous TC - Disabling DCB\n");
+                               return 1;
+                       }
+               } else {
+                       tc_unused = 1;
+               }
+       }
+
+       /* There is always at least TC0 */
+       if (!ret)
+               ret = 1;
+
+       return ret;
 }
 
 /**
@@ -5098,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                                       DCB_CAP_DCBX_VER_IEEE;
 
                        pf->flags |= I40E_FLAG_DCB_CAPABLE;
-                       /* Enable DCB tagging only when more than one TC */
+                       /* Enable DCB tagging only when more than one TC
+                        * or explicitly disable if only one TC
+                        */
                        if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
                                pf->flags |= I40E_FLAG_DCB_ENABLED;
+                       else
+                               pf->flags &= ~I40E_FLAG_DCB_ENABLED;
                        dev_dbg(&pf->pdev->dev,
                                "DCBX offload is supported for this PF.\n");
                }
@@ -5416,7 +5435,6 @@ int i40e_open(struct net_device *netdev)
        wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
 
        udp_tunnel_get_rx_info(netdev);
-       i40e_notify_client_of_netdev_open(vsi);
 
        return 0;
 }
@@ -5702,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        u8 type;
 
        /* Not DCB capable or capability disabled */
-       if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+       if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
                return ret;
 
        /* Ignore if event is not for Nearest Bridge */
@@ -7882,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
 #endif
                                       I40E_FLAG_RSS_ENABLED    |
                                       I40E_FLAG_DCB_CAPABLE    |
+                                      I40E_FLAG_DCB_ENABLED    |
                                       I40E_FLAG_SRIOV_ENABLED  |
                                       I40E_FLAG_FD_SB_ENABLED  |
                                       I40E_FLAG_FD_ATR_ENABLED |
@@ -10488,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_CAPABLE    |
+                              I40E_FLAG_DCB_ENABLED    |
                               I40E_FLAG_SRIOV_ENABLED  |
                               I40E_FLAG_VMDQ_ENABLED);
        } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10511,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                /* Not enough queues for all TCs */
                if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
                    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
-                       pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+                       pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+                                       I40E_FLAG_DCB_ENABLED);
                        dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
                }
                pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10908,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        err = i40e_init_pf_dcb(pf);
        if (err) {
                dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
-               pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+               pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
                /* Continue without DCB enabled */
        }
 #endif /* CONFIG_I40E_DCB */
index e61b647f5f2a86bcc5844d3d16a65a0db5acb7c1..336c103ae374e4175b2c8274f06e5adb20a17256 100644 (file)
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
                }
        }
 
-       shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
+       shhwtstamps.hwtstamp =
+               ktime_add_ns(shhwtstamps.hwtstamp, adjust);
 
        skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
        dev_kfree_skb_any(adapter->ptp_tx_skb);
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
                         struct sk_buff *skb)
 {
        __le64 *regval = (__le64 *)va;
+       struct igb_adapter *adapter = q_vector->adapter;
+       int adjust = 0;
 
        /* The timestamp is recorded in little endian format.
         * DWORD: 0        1        2        3
         * Field: Reserved Reserved SYSTIML  SYSTIMH
         */
-       igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+       igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
                                   le64_to_cpu(regval[1]));
+
+       /* adjust timestamp for the RX latency based on link speed */
+       if (adapter->hw.mac.type == e1000_i210) {
+               switch (adapter->link_speed) {
+               case SPEED_10:
+                       adjust = IGB_I210_RX_LATENCY_10;
+                       break;
+               case SPEED_100:
+                       adjust = IGB_I210_RX_LATENCY_100;
+                       break;
+               case SPEED_1000:
+                       adjust = IGB_I210_RX_LATENCY_1000;
+                       break;
+               }
+       }
+       skb_hwtstamps(skb)->hwtstamp =
+               ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
 }
 
 /**
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
                }
        }
        skb_hwtstamps(skb)->hwtstamp =
-               ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+               ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
 
        /* Update the last_rx_timestamp timer in order to enable watchdog check
         * for error case of latched timestamp on a dropped packet.
index b4217f30e89c7bbbc0e3c3a2e6c2252cafa59706..c47b605e86519d1e444212b08dcb96453260524e 100644 (file)
@@ -2958,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        }
 
        /* was that the last pool using this rar? */
-       if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+       if (mpsar_lo == 0 && mpsar_hi == 0 &&
+           rar != 0 && rar != hw->mac.san_mac_rar_index)
                hw->mac.ops.clear_rar(hw, rar);
+
        return 0;
 }
 
index 5418c69a74630bdd8b2aa2519c9e7f41c3e76ac9..b4f03748adc02592890cbdafb4e607f610fce224 100644 (file)
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 vlnctrl, i;
 
+       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
                /* fall through */
        case ixgbe_mac_82598EB:
                /* legacy case, we can just disable VLAN filtering */
-               vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-               vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+               vlnctrl &= ~IXGBE_VLNCTRL_VFE;
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                return;
        }
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
        /* Set flag so we don't redo unnecessary work */
        adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
 
+       /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
+       vlnctrl |= IXGBE_VLNCTRL_VFE;
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
        /* Add PF to all active pools */
        for (i = IXGBE_VLVF_ENTRIES; --i;) {
                u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 vlnctrl, i;
 
+       /* Set VLAN filtering to enabled */
+       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+       vlnctrl |= IXGBE_VLNCTRL_VFE;
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
                        break;
                /* fall through */
        case ixgbe_mac_82598EB:
-               vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-               vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
-               vlnctrl |= IXGBE_VLNCTRL_VFE;
-               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                return;
        }
 
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
                            struct tcf_exts *exts, u64 *action, u8 *queue)
 {
        const struct tc_action *a;
+       LIST_HEAD(actions);
        int err;
 
        if (tc_no_actions(exts))
                return -EINVAL;
 
-       tc_for_each_action(a, exts) {
+       tcf_exts_to_list(exts, &actions);
+       list_for_each_entry(a, &actions, list) {
 
                /* Drop action */
                if (is_tcf_gact_shot(a)) {
@@ -9517,6 +9525,7 @@ skip_sriov:
 
        /* copy netdev features into list of user selectable features */
        netdev->hw_features |= netdev->features |
+                              NETIF_F_HW_VLAN_CTAG_FILTER |
                               NETIF_F_HW_VLAN_CTAG_RX |
                               NETIF_F_HW_VLAN_CTAG_TX |
                               NETIF_F_RXALL |
index b57ae3afb994ab0dde3b22831af841bbec063081..3743af8f1dedd6d2462e78ef84af22f5f7fe82c7 100644 (file)
@@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats {
        MTK_ETHTOOL_STAT(rx_flow_control_packets),
 };
 
+static const char * const mtk_clks_source_name[] = {
+       "ethif", "esw", "gp1", "gp2"
+};
+
 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
 {
        __raw_writel(val, eth->base + reg);
@@ -245,12 +249,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        case PHY_INTERFACE_MODE_MII:
                ge_mode = 1;
                break;
-       case PHY_INTERFACE_MODE_RMII:
+       case PHY_INTERFACE_MODE_REVMII:
                ge_mode = 2;
                break;
+       case PHY_INTERFACE_MODE_RMII:
+               if (!mac->id)
+                       goto err_phy;
+               ge_mode = 3;
+               break;
        default:
-               dev_err(eth->dev, "invalid phy_mode\n");
-               return -1;
+               goto err_phy;
        }
 
        /* put the gmac into the right mode */
@@ -263,19 +271,31 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        mac->phy_dev->autoneg = AUTONEG_ENABLE;
        mac->phy_dev->speed = 0;
        mac->phy_dev->duplex = 0;
+
+       if (of_phy_is_fixed_link(mac->of_node))
+               mac->phy_dev->supported |=
+               SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
        mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
                                   SUPPORTED_Asym_Pause;
        mac->phy_dev->advertising = mac->phy_dev->supported |
                                    ADVERTISED_Autoneg;
        phy_start_aneg(mac->phy_dev);
 
+       of_node_put(np);
+
        return 0;
+
+err_phy:
+       of_node_put(np);
+       dev_err(eth->dev, "invalid phy_mode\n");
+       return -EINVAL;
 }
 
 static int mtk_mdio_init(struct mtk_eth *eth)
 {
        struct device_node *mii_np;
-       int err;
+       int ret;
 
        mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
        if (!mii_np) {
@@ -284,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        }
 
        if (!of_device_is_available(mii_np)) {
-               err = 0;
+               ret = -ENODEV;
                goto err_put_node;
        }
 
-       eth->mii_bus = mdiobus_alloc();
+       eth->mii_bus = devm_mdiobus_alloc(eth->dev);
        if (!eth->mii_bus) {
-               err = -ENOMEM;
+               ret = -ENOMEM;
                goto err_put_node;
        }
 
@@ -301,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        eth->mii_bus->parent = eth->dev;
 
        snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
-       err = of_mdiobus_register(eth->mii_bus, mii_np);
-       if (err)
-               goto err_free_bus;
-
-       return 0;
-
-err_free_bus:
-       mdiobus_free(eth->mii_bus);
+       ret = of_mdiobus_register(eth->mii_bus, mii_np);
 
 err_put_node:
        of_node_put(mii_np);
-       eth->mii_bus = NULL;
-       return err;
+       return ret;
 }
 
 static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -322,8 +334,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
                return;
 
        mdiobus_unregister(eth->mii_bus);
-       of_node_put(eth->mii_bus->dev.of_node);
-       mdiobus_free(eth->mii_bus);
 }
 
 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -542,15 +552,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
        return &ring->buf[idx];
 }
 
-static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
 {
        if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-               dma_unmap_single(dev,
+               dma_unmap_single(eth->dev,
                                 dma_unmap_addr(tx_buf, dma_addr0),
                                 dma_unmap_len(tx_buf, dma_len0),
                                 DMA_TO_DEVICE);
        } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-               dma_unmap_page(dev,
+               dma_unmap_page(eth->dev,
                               dma_unmap_addr(tx_buf, dma_addr0),
                               dma_unmap_len(tx_buf, dma_len0),
                               DMA_TO_DEVICE);
@@ -572,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        dma_addr_t mapped_addr;
        unsigned int nr_frags;
        int i, n_desc = 1;
-       u32 txd4 = 0;
+       u32 txd4 = 0, fport;
 
        itxd = ring->next_free;
        if (itxd == ring->last_free)
                return -ENOMEM;
 
        /* set the forward port */
-       txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+       fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+       txd4 |= fport;
 
        tx_buf = mtk_desc_to_tx_buf(ring, itxd);
        memset(tx_buf, 0, sizeof(*tx_buf));
@@ -595,9 +606,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        if (skb_vlan_tag_present(skb))
                txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
 
-       mapped_addr = dma_map_single(&dev->dev, skb->data,
+       mapped_addr = dma_map_single(eth->dev, skb->data,
                                     skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+       if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
                return -ENOMEM;
 
        WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -623,10 +634,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 
                        n_desc++;
                        frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
-                       mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
+                       mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
                                                       frag_map_size,
                                                       DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+                       if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
                                goto err_dma;
 
                        if (i == nr_frags - 1 &&
@@ -637,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                        WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
                                               TX_DMA_PLEN0(frag_map_size) |
                                               last_frag * TX_DMA_LS0));
-                       WRITE_ONCE(txd->txd4, 0);
+                       WRITE_ONCE(txd->txd4, fport);
 
                        tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -679,7 +690,7 @@ err_dma:
                tx_buf = mtk_desc_to_tx_buf(ring, itxd);
 
                /* unmap dma */
-               mtk_tx_unmap(&dev->dev, tx_buf);
+               mtk_tx_unmap(eth, tx_buf);
 
                itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
                itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -836,11 +847,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                        netdev->stats.rx_dropped++;
                        goto release_desc;
                }
-               dma_addr = dma_map_single(&eth->netdev[mac]->dev,
+               dma_addr = dma_map_single(eth->dev,
                                          new_data + NET_SKB_PAD,
                                          ring->buf_size,
                                          DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
+               if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
                        skb_free_frag(new_data);
                        netdev->stats.rx_dropped++;
                        goto release_desc;
@@ -849,13 +860,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                /* receive data */
                skb = build_skb(data, ring->frag_size);
                if (unlikely(!skb)) {
-                       put_page(virt_to_head_page(new_data));
+                       skb_free_frag(new_data);
                        netdev->stats.rx_dropped++;
                        goto release_desc;
                }
                skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 
-               dma_unmap_single(&netdev->dev, trxd.rxd1,
+               dma_unmap_single(eth->dev, trxd.rxd1,
                                 ring->buf_size, DMA_FROM_DEVICE);
                pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
                skb->dev = netdev;
@@ -937,7 +948,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
                        done[mac]++;
                        budget--;
                }
-               mtk_tx_unmap(eth->dev, tx_buf);
+               mtk_tx_unmap(eth, tx_buf);
 
                ring->last_free = desc;
                atomic_inc(&ring->free_count);
@@ -1092,7 +1103,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
 
        if (ring->buf) {
                for (i = 0; i < MTK_DMA_SIZE; i++)
-                       mtk_tx_unmap(eth->dev, &ring->buf[i]);
+                       mtk_tx_unmap(eth, &ring->buf[i]);
                kfree(ring->buf);
                ring->buf = NULL;
        }
@@ -1490,10 +1501,7 @@ static void mtk_uninit(struct net_device *dev)
        struct mtk_eth *eth = mac->hw;
 
        phy_disconnect(mac->phy_dev);
-       mtk_mdio_cleanup(eth);
        mtk_irq_disable(eth, ~0);
-       free_irq(eth->irq[1], dev);
-       free_irq(eth->irq[2], dev);
 }
 
 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1751,6 +1759,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
                goto free_netdev;
        }
        spin_lock_init(&mac->hw_stats->stats_lock);
+       u64_stats_init(&mac->hw_stats->syncp);
        mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
 
        SET_NETDEV_DEV(eth->netdev[id], eth->dev);
@@ -1796,6 +1805,7 @@ static int mtk_probe(struct platform_device *pdev)
        if (!eth)
                return -ENOMEM;
 
+       eth->dev = &pdev->dev;
        eth->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(eth->base))
                return PTR_ERR(eth->base);
@@ -1830,21 +1840,21 @@ static int mtk_probe(struct platform_device *pdev)
                        return -ENXIO;
                }
        }
+       for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+               eth->clks[i] = devm_clk_get(eth->dev,
+                                           mtk_clks_source_name[i]);
+               if (IS_ERR(eth->clks[i])) {
+                       if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+                               return -EPROBE_DEFER;
+                       return -ENODEV;
+               }
+       }
 
-       eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
-       eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
-       eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
-       eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
-       if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
-           IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
-               return -ENODEV;
-
-       clk_prepare_enable(eth->clk_ethif);
-       clk_prepare_enable(eth->clk_esw);
-       clk_prepare_enable(eth->clk_gp1);
-       clk_prepare_enable(eth->clk_gp2);
+       clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+       clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+       clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+       clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
 
-       eth->dev = &pdev->dev;
        eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
        INIT_WORK(&eth->pending_work, mtk_pending_work);
 
@@ -1886,15 +1896,24 @@ err_free_dev:
 static int mtk_remove(struct platform_device *pdev)
 {
        struct mtk_eth *eth = platform_get_drvdata(pdev);
+       int i;
 
-       clk_disable_unprepare(eth->clk_ethif);
-       clk_disable_unprepare(eth->clk_esw);
-       clk_disable_unprepare(eth->clk_gp1);
-       clk_disable_unprepare(eth->clk_gp2);
+       /* stop all devices to make sure that dma is properly shut down */
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
+               mtk_stop(eth->netdev[i]);
+       }
+
+       clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
 
        netif_napi_del(&eth->tx_napi);
        netif_napi_del(&eth->rx_napi);
        mtk_cleanup(eth);
+       mtk_mdio_cleanup(eth);
        platform_set_drvdata(pdev, NULL);
 
        return 0;
@@ -1904,6 +1923,7 @@ const struct of_device_id of_mtk_match[] = {
        { .compatible = "mediatek,mt7623-eth" },
        {},
 };
+MODULE_DEVICE_TABLE(of, of_mtk_match);
 
 static struct platform_driver mtk_driver = {
        .probe = mtk_probe,
index f82e3acb947b47ab94b9c684cf0ae9d58c6ee052..6e1ade7a25c55364be9bc05492e3b607b45640c7 100644 (file)
@@ -290,6 +290,17 @@ enum mtk_tx_flags {
        MTK_TX_FLAGS_PAGE0      = 0x02,
 };
 
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+       MTK_CLK_ETHIF,
+       MTK_CLK_ESW,
+       MTK_CLK_GP1,
+       MTK_CLK_GP2,
+       MTK_CLK_MAX
+};
+
 /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
  *                     by the TX descriptor    s
  * @skb:               The SKB pointer of the packet being sent
@@ -370,10 +381,7 @@ struct mtk_rx_ring {
  * @scratch_ring:      Newer SoCs need memory for a second HW managed TX ring
  * @phy_scratch_ring:  physical address of scratch_ring
  * @scratch_head:      The scratch memory that scratch_ring points to.
- * @clk_ethif:         The ethif clock
- * @clk_esw:           The switch clock
- * @clk_gp1:           The gmac1 clock
- * @clk_gp2:           The gmac2 clock
+ * @clks:              clock array for all clocks required
  * @mii_bus:           If there is a bus we need to create an instance for it
  * @pending_work:      The workqueue used to reset the dma ring
  */
@@ -400,10 +408,8 @@ struct mtk_eth {
        struct mtk_tx_dma               *scratch_ring;
        dma_addr_t                      phy_scratch_ring;
        void                            *scratch_head;
-       struct clk                      *clk_ethif;
-       struct clk                      *clk_esw;
-       struct clk                      *clk_gp1;
-       struct clk                      *clk_gp2;
+       struct clk                      *clks[MTK_CLK_MAX];
+
        struct mii_bus                  *mii_bus;
        struct work_struct              pending_work;
 };
index 99c6bbdff50115cb4fcde86ffdc6dc4d35e43030..b04760a5034b9c84038c0bbb44fe80035bc1e0f7 100644 (file)
@@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
                *cap = true;
                break;
        case DCB_CAP_ATTR_DCBX:
-               *cap = priv->cee_params.dcbx_cap;
+               *cap = priv->dcbx_cap;
                break;
        case DCB_CAP_ATTR_PFC_TCS:
                *cap = 1 <<  mlx4_max_tc(priv->mdev->dev);
@@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       return priv->cee_params.dcb_cfg.pfc_state;
+       return priv->cee_config.pfc_state;
 }
 
 static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       priv->cee_params.dcb_cfg.pfc_state = state;
+       priv->cee_config.pfc_state = state;
 }
 
 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
+       *setting = priv->cee_config.dcb_pfc[priority];
 }
 
 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
@@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
-       priv->cee_params.dcb_cfg.pfc_state = true;
+       priv->cee_config.dcb_pfc[priority] = setting;
+       priv->cee_config.pfc_state = true;
 }
 
 static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
-       int err = 0;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return -EINVAL;
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+               return 1;
 
-       if (dcb_cfg->pfc_state) {
+       if (priv->cee_config.pfc_state) {
                int tc;
 
                priv->prof->rx_pause = 0;
@@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
                for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
                        u8 tc_mask = 1 << tc;
 
-                       switch (dcb_cfg->tc_config[tc].dcb_pfc) {
+                       switch (priv->cee_config.dcb_pfc[tc]) {
                        case pfc_disabled:
                                priv->prof->tx_ppp &= ~tc_mask;
                                priv->prof->rx_ppp &= ~tc_mask;
@@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
                en_dbg(DRV, priv, "Set pfc off\n");
        }
 
-       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   priv->rx_skb_size + ETH_FCS_LEN,
-                                   priv->prof->tx_pause,
-                                   priv->prof->tx_ppp,
-                                   priv->prof->rx_pause,
-                                   priv->prof->rx_ppp);
-       if (err)
+       if (mlx4_SET_PORT_general(mdev->dev, priv->port,
+                                 priv->rx_skb_size + ETH_FCS_LEN,
+                                 priv->prof->tx_pause,
+                                 priv->prof->tx_ppp,
+                                 priv->prof->rx_pause,
+                                 priv->prof->rx_ppp)) {
                en_err(priv, "Failed setting pause params\n");
-       return err;
+               return 1;
+       }
+
+       return 0;
 }
 
 static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
@@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        int num_tcs = 0;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return 1;
 
        if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
@@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
                priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
        }
 
-       return mlx4_en_setup_tc(dev, num_tcs);
+       if (mlx4_en_setup_tc(dev, num_tcs))
+               return 1;
+
+       return 0;
 }
 
 /* On success returns a non-zero 802.1p user priority bitmap
@@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
                                .selector = idtype,
                                .protocol = id,
                             };
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return 0;
 
        return dcb_getapp(netdev, &app);
@@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        struct dcb_app app;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return -EINVAL;
 
        memset(&app, 0, sizeof(struct dcb_app));
@@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
-       return priv->cee_params.dcbx_cap;
+       return priv->dcbx_cap;
 }
 
 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
        struct ieee_ets ets = {0};
        struct ieee_pfc pfc = {0};
 
-       if (mode == priv->cee_params.dcbx_cap)
+       if (mode == priv->dcbx_cap)
                return 0;
 
        if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
            !(mode & DCB_CAP_DCBX_HOST))
                goto err;
 
-       priv->cee_params.dcbx_cap = mode;
+       priv->dcbx_cap = mode;
 
        ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
        pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
index 4198e9bf89d044733e83596dec346dbd0c6a38a9..fedb829276f4e9835e4b74f97451fcdaeaf78a6a 100644 (file)
@@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
                if (up) {
-                       priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+                       if (priv->dcbx_cap)
+                               priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
                } else {
                        priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
-                       priv->cee_params.dcb_cfg.pfc_state = false;
+                       priv->cee_config.pfc_state = false;
                }
        }
 #endif /* CONFIG_MLX4_EN_DCB */
@@ -3048,9 +3049,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        struct mlx4_en_priv *priv;
        int i;
        int err;
-#ifdef CONFIG_MLX4_EN_DCB
-       struct tc_configuration *tc;
-#endif
 
        dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
                                 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -3117,16 +3115,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
-               priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
-                                           DCB_CAP_DCBX_HOST |
-                                           DCB_CAP_DCBX_VER_IEEE;
+               priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
+                       DCB_CAP_DCBX_VER_IEEE;
                priv->flags |= MLX4_EN_DCB_ENABLED;
-               priv->cee_params.dcb_cfg.pfc_state = false;
+               priv->cee_config.pfc_state = false;
 
-               for (i = 0; i < MLX4_EN_NUM_UP; i++) {
-                       tc = &priv->cee_params.dcb_cfg.tc_config[i];
-                       tc->dcb_pfc = pfc_disabled;
-               }
+               for (i = 0; i < MLX4_EN_NUM_UP; i++)
+                       priv->cee_config.dcb_pfc[i] = pfc_disabled;
 
                if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
                        dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
index 9df87ca0515a2b9d0b523c176c88e44e2ab3fe60..e2509bba3e7c31dd0db45734a8a274f956cccaa2 100644 (file)
@@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
                                  &inline_ok, &fragptr);
        if (unlikely(!real_size))
-               goto tx_drop;
+               goto tx_drop_count;
 
        /* Align descriptor to TXBB size */
        desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
                if (netif_msg_tx_err(priv))
                        en_warn(priv, "Oversized header or SG list\n");
-               goto tx_drop;
+               goto tx_drop_count;
        }
 
        bf_ok = ring->bf_enabled;
@@ -1071,9 +1071,10 @@ tx_drop_unmap:
                               PCI_DMA_TODEVICE);
        }
 
+tx_drop_count:
+       ring->tx_dropped++;
 tx_drop:
        dev_kfree_skb_any(skb);
-       ring->tx_dropped++;
        return NETDEV_TX_OK;
 }
 
@@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
                goto tx_drop;
 
        if (mlx4_en_is_tx_ring_full(ring))
-               goto tx_drop;
+               goto tx_drop_count;
 
        /* fetch ring->cons far ahead before needing it to avoid stall */
        ring_cons = READ_ONCE(ring->cons);
@@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
 
        return NETDEV_TX_OK;
 
-tx_drop:
+tx_drop_count:
        ring->tx_dropped++;
+tx_drop:
        return NETDEV_TX_BUSY;
 }
index f613977455e08340995baeb65763086e408479f3..cf8f8a72a80154c19a6ccb9ca807499491ccebd9 100644 (file)
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        return 0;
 
 err_out_unmap:
-       while (i >= 0)
-               mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+       while (i > 0)
+               mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
 #ifdef CONFIG_RFS_ACCEL
        for (i = 1; i <= dev->caps.num_ports; i++) {
                if (mlx4_priv(dev)->port[i].rmap) {
index 75dd2e3d3059577b496df75659da509724867f1f..7183ac4135d2f97dbb1e7b63f2d57db4c95a5d0c 100644 (file)
@@ -2970,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
                mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
                device_remove_file(&info->dev->persist->pdev->dev,
                                   &info->port_attr);
+               devlink_port_unregister(&info->devlink_port);
                info->port = -1;
        }
 
@@ -2984,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
        device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
        device_remove_file(&info->dev->persist->pdev->dev,
                           &info->port_mtu_attr);
+       devlink_port_unregister(&info->devlink_port);
+
 #ifdef CONFIG_RFS_ACCEL
        free_irq_cpu_rmap(info->rmap);
        info->rmap = NULL;
index 2c2913dcae980b0a665e54b128554fc168b364b7..9099dbd04951a0e18fc3d33357f1e8d5c25d6909 100644 (file)
@@ -482,20 +482,10 @@ enum dcb_pfc_type {
        pfc_enabled_rx
 };
 
-struct tc_configuration {
-       enum dcb_pfc_type  dcb_pfc;
-};
-
 struct mlx4_en_cee_config {
        bool    pfc_state;
-       struct  tc_configuration tc_config[MLX4_EN_NUM_UP];
+       enum    dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
 };
-
-struct mlx4_en_cee_params {
-       u8 dcbx_cap;
-       struct mlx4_en_cee_config dcb_cfg;
-};
-
 #endif
 
 struct ethtool_flow_id {
@@ -624,7 +614,8 @@ struct mlx4_en_priv {
        struct ieee_ets ets;
        u16 maxrate[IEEE_8021QAZ_MAX_TCS];
        enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
-       struct mlx4_en_cee_params cee_params;
+       struct mlx4_en_cee_config cee_config;
+       u8 dcbx_cap;
 #endif
 #ifdef CONFIG_RFS_ACCEL
        spinlock_t filters_lock;
index 3d2095e5c61c594c167507ade0cd109e4cd4ae6a..c5b2064297a19b0dde2640764acb4216343633f1 100644 (file)
@@ -52,7 +52,7 @@
 
 #define MLX4_FLAG_V_IGNORE_FCS_MASK            0x2
 #define MLX4_IGNORE_FCS_MASK                   0x1
-#define MLNX4_TX_MAX_NUMBER                    8
+#define MLX4_TC_MAX_NUMBER                     8
 
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
 {
@@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev)
        u8 num_tc = dev->caps.max_tc_eth;
 
        if (!num_tc)
-               num_tc = MLNX4_TX_MAX_NUMBER;
+               num_tc = MLX4_TC_MAX_NUMBER;
 
        return num_tc;
 }
index d6e2a1cae19ae2d6d636f1d306bca8e607106095..c2ec01a22d55ad9c2ff8e1b73bc5696e5edbe15a 100644 (file)
@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
        return cmd->cmd_buf + (idx << cmd->log_stride);
 }
 
-static u8 xor8_buf(void *buf, int len)
+static u8 xor8_buf(void *buf, size_t offset, int len)
 {
        u8 *ptr = buf;
        u8 sum = 0;
        int i;
+       int end = len + offset;
 
-       for (i = 0; i < len; i++)
+       for (i = offset; i < end; i++)
                sum ^= ptr[i];
 
        return sum;
@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
 
 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
 {
-       if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
+       size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+       int xor_len = sizeof(*block) - sizeof(block->data) - 1;
+
+       if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
                return -EINVAL;
 
-       if (xor8_buf(block, sizeof(*block)) != 0xff)
+       if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
                return -EINVAL;
 
        return 0;
 }
 
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
-                          int csum)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block)
 {
-       block->token = token;
-       if (csum) {
-               block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
-                                           sizeof(block->data) - 2);
-               block->sig = ~xor8_buf(block, sizeof(*block) - 1);
-       }
+       int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
+       size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+
+       block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
+       block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
 }
 
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg)
 {
        struct mlx5_cmd_mailbox *next = msg->next;
-
-       while (next) {
-               calc_block_sig(next->buf, token, csum);
+       int size = msg->len;
+       int blen = size - min_t(int, sizeof(msg->first.data), size);
+       int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+               / MLX5_CMD_DATA_BLOCK_SIZE;
+       int i = 0;
+
+       for (i = 0; i < n && next; i++)  {
+               calc_block_sig(next->buf);
                next = next->next;
        }
 }
 
 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
 {
-       ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
-       calc_chain_sig(ent->in, ent->token, csum);
-       calc_chain_sig(ent->out, ent->token, csum);
+       ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
+       if (csum) {
+               calc_chain_sig(ent->in);
+               calc_chain_sig(ent->out);
+       }
 }
 
 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
        struct mlx5_cmd_mailbox *next = ent->out->next;
        int err;
        u8 sig;
+       int size = ent->out->len;
+       int blen = size - min_t(int, sizeof(ent->out->first.data), size);
+       int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+               / MLX5_CMD_DATA_BLOCK_SIZE;
+       int i = 0;
 
-       sig = xor8_buf(ent->lay, sizeof(*ent->lay));
+       sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
        if (sig != 0xff)
                return -EINVAL;
 
-       while (next) {
+       for (i = 0; i < n && next; i++) {
                err = verify_block_sig(next->buf);
                if (err)
                        return err;
@@ -656,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work)
                spin_unlock_irqrestore(&cmd->alloc_lock, flags);
        }
 
-       ent->token = alloc_token(cmd);
        cmd->ent_arr[ent->idx] = ent;
        lay = get_inst(cmd, ent->idx);
        ent->lay = lay;
@@ -766,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
                           struct mlx5_cmd_msg *out, void *uout, int uout_size,
                           mlx5_cmd_cbk_t callback,
-                          void *context, int page_queue, u8 *status)
+                          void *context, int page_queue, u8 *status,
+                          u8 token)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        struct mlx5_cmd_work_ent *ent;
@@ -783,6 +797,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
        if (IS_ERR(ent))
                return PTR_ERR(ent);
 
+       ent->token = token;
+
        if (!callback)
                init_completion(&ent->done);
 
@@ -854,7 +870,8 @@ static const struct file_operations fops = {
        .write  = dbg_write,
 };
 
-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
+static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
+                           u8 token)
 {
        struct mlx5_cmd_prot_block *block;
        struct mlx5_cmd_mailbox *next;
@@ -880,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
                memcpy(block->data, from, copy);
                from += copy;
                size -= copy;
+               block->token = token;
                next = next->next;
        }
 
@@ -949,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
 }
 
 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
-                                              gfp_t flags, int size)
+                                              gfp_t flags, int size,
+                                              u8 token)
 {
        struct mlx5_cmd_mailbox *tmp, *head = NULL;
        struct mlx5_cmd_prot_block *block;
@@ -978,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
                tmp->next = head;
                block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
                block->block_num = cpu_to_be32(n - i - 1);
+               block->token = token;
                head = tmp;
        }
        msg->next = head;
@@ -1352,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
        }
 
        if (IS_ERR(msg))
-               msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
+               msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
 
        return msg;
 }
@@ -1377,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
        int err;
        u8 status = 0;
        u32 drv_synd;
+       u8 token;
 
        if (pci_channel_offline(dev->pdev) ||
            dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -1395,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
                return err;
        }
 
-       err = mlx5_copy_to_msg(inb, in, in_size);
+       token = alloc_token(&dev->cmd);
+
+       err = mlx5_copy_to_msg(inb, in, in_size, token);
        if (err) {
                mlx5_core_warn(dev, "err %d\n", err);
                goto out_in;
        }
 
-       outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
+       outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
        if (IS_ERR(outb)) {
                err = PTR_ERR(outb);
                goto out_in;
        }
 
        err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
-                             pages_queue, &status);
+                             pages_queue, &status, token);
        if (err)
                goto out_out;
 
@@ -1476,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
        INIT_LIST_HEAD(&cmd->cache.med.head);
 
        for (i = 0; i < NUM_LONG_LISTS; i++) {
-               msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
+               msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
                if (IS_ERR(msg)) {
                        err = PTR_ERR(msg);
                        goto ex_err;
@@ -1486,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
        }
 
        for (i = 0; i < NUM_MED_LISTS; i++) {
-               msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
+               msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
                if (IS_ERR(msg)) {
                        err = PTR_ERR(msg);
                        goto ex_err;
index 1b495efa7490d2d1edb9ccc3f93de4f74ad52d7f..bf722aa88cf05b528518572f7c9971dc0cd414b7 100644 (file)
 #define MLX5_MPWRQ_PAGES_PER_WQE               BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
 #define MLX5_MPWRQ_STRIDES_PER_PAGE            (MLX5_MPWRQ_NUM_STRIDES >> \
                                                 MLX5_MPWRQ_WQE_PAGE_ORDER)
-#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
-                                  BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+
+#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
+       (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
+
 #define MLX5_UMR_ALIGN                         (2048)
 #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD      (128)
 
@@ -219,9 +223,8 @@ struct mlx5e_tstamp {
 };
 
 enum {
-       MLX5E_RQ_STATE_POST_WQES_ENABLE,
+       MLX5E_RQ_STATE_FLUSH,
        MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
-       MLX5E_RQ_STATE_FLUSH_TIMEOUT,
        MLX5E_RQ_STATE_AM,
 };
 
@@ -304,6 +307,7 @@ struct mlx5e_rq {
 
        unsigned long          state;
        int                    ix;
+       u32                    mpwqe_mtt_offset;
 
        struct mlx5e_rx_am     am; /* Adaptive Moderation */
 
@@ -365,9 +369,8 @@ struct mlx5e_sq_dma {
 };
 
 enum {
-       MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+       MLX5E_SQ_STATE_FLUSH,
        MLX5E_SQ_STATE_BF_ENABLE,
-       MLX5E_SQ_STATE_TX_TIMEOUT,
 };
 
 struct mlx5e_ico_wqe_info {
@@ -698,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
 
 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -814,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
                     MLX5E_MAX_NUM_CHANNELS);
 }
 
-static inline int mlx5e_get_mtt_octw(int npages)
-{
-       return ALIGN(npages, 8) / 2;
-}
-
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
index 673043ccd76cfe8d66b2ff1fbbeaf909871fe387..9cce153e10359a305d6d44136890a68b4854221a 100644 (file)
@@ -139,7 +139,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
        struct mlx5e_tir *tir;
        void *in;
        int inlen;
-       int err;
+       int err = 0;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
@@ -151,10 +151,11 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
        list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
                err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
                if (err)
-                       return err;
+                       goto out;
        }
 
+out:
        kvfree(in);
 
-       return 0;
+       return err;
 }
index caa9a3ccc3f38ae4acd8c58db845510784c099b0..762af16ed021b4815779728eae427d1cf593baf9 100644 (file)
@@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
        return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
 }
 
-static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
+static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+                                   struct ieee_ets *ets)
 {
        int bw_sum = 0;
        int i;
 
        /* Validate Priority */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
+               if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
+                       netdev_err(netdev,
+                                  "Failed to validate ETS: priority value greater than max(%d)\n",
+                                   MLX5E_MAX_PRIORITY);
                        return -EINVAL;
+               }
        }
 
        /* Validate Bandwidth Sum */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
                if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
-                       if (!ets->tc_tx_bw[i])
+                       if (!ets->tc_tx_bw[i]) {
+                               netdev_err(netdev,
+                                          "Failed to validate ETS: BW 0 is illegal\n");
                                return -EINVAL;
+                       }
 
                        bw_sum += ets->tc_tx_bw[i];
                }
        }
 
-       if (bw_sum != 0 && bw_sum != 100)
+       if (bw_sum != 0 && bw_sum != 100) {
+               netdev_err(netdev,
+                          "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
+       }
        return 0;
 }
 
@@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
-       err = mlx5e_dbcnl_validate_ets(ets);
+       err = mlx5e_dbcnl_validate_ets(netdev, ets);
        if (err)
                return err;
 
index 4a3757e60441693e0ffcd9ab9d119ede7e9981c8..7a346bb2ed0064c4db51404842a514af513a7f5b 100644 (file)
@@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
        if (mlx5e_query_global_pause_combined(priv)) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
                        data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
-                                                         pport_per_prio_pfc_stats_desc, 0);
+                                                         pport_per_prio_pfc_stats_desc, i);
                }
        }
 
@@ -352,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                                                                   sq_stats_desc, j);
 }
 
+static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
+                                   int num_wqe)
+{
+       int packets_per_wqe;
+       int stride_size;
+       int num_strides;
+       int wqe_size;
+
+       if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+               return num_wqe;
+
+       stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+       num_strides = 1 << priv->params.mpwqe_log_num_strides;
+       wqe_size = stride_size * num_strides;
+
+       packets_per_wqe = wqe_size /
+                         ALIGN(ETH_DATA_LEN, stride_size);
+       return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
+}
+
+static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
+                                   int num_packets)
+{
+       int packets_per_wqe;
+       int stride_size;
+       int num_strides;
+       int wqe_size;
+       int num_wqes;
+
+       if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+               return num_packets;
+
+       stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+       num_strides = 1 << priv->params.mpwqe_log_num_strides;
+       wqe_size = stride_size * num_strides;
+
+       num_packets = (1 << order_base_2(num_packets));
+
+       packets_per_wqe = wqe_size /
+                         ALIGN(ETH_DATA_LEN, stride_size);
+       num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
+       return 1 << (order_base_2(num_wqes));
+}
+
 static void mlx5e_get_ringparam(struct net_device *dev,
                                struct ethtool_ringparam *param)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int rq_wq_type = priv->params.rq_wq_type;
 
-       param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
+       param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+                                                        1 << mlx5_max_log_rq_size(rq_wq_type));
        param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
-       param->rx_pending     = 1 << priv->params.log_rq_size;
+       param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+                                                    1 << priv->params.log_rq_size);
        param->tx_pending     = 1 << priv->params.log_sq_size;
 }
 
@@ -370,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
        struct mlx5e_priv *priv = netdev_priv(dev);
        bool was_opened;
        int rq_wq_type = priv->params.rq_wq_type;
+       u32 rx_pending_wqes;
+       u32 min_rq_size;
+       u32 max_rq_size;
        u16 min_rx_wqes;
        u8 log_rq_size;
        u8 log_sq_size;
+       u32 num_mtts;
        int err = 0;
 
        if (param->rx_jumbo_pending) {
@@ -385,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                            __func__);
                return -EINVAL;
        }
-       if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
+
+       min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+                                              1 << mlx5_min_log_rq_size(rq_wq_type));
+       max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+                                              1 << mlx5_max_log_rq_size(rq_wq_type));
+       rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
+                                                  param->rx_pending);
+
+       if (param->rx_pending < min_rq_size) {
                netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
                            __func__, param->rx_pending,
-                           1 << mlx5_min_log_rq_size(rq_wq_type));
+                           min_rq_size);
                return -EINVAL;
        }
-       if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
+       if (param->rx_pending > max_rq_size) {
                netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
                            __func__, param->rx_pending,
-                           1 << mlx5_max_log_rq_size(rq_wq_type));
+                           max_rq_size);
                return -EINVAL;
        }
+
+       num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
+                                      rx_pending_wqes);
+       if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+           !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+               netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+                           __func__, param->rx_pending);
+               return -EINVAL;
+       }
+
        if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
                netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
                            __func__, param->tx_pending,
@@ -410,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                return -EINVAL;
        }
 
-       log_rq_size = order_base_2(param->rx_pending);
+       log_rq_size = order_base_2(rx_pending_wqes);
        log_sq_size = order_base_2(param->tx_pending);
-       min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
+       min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
 
        if (log_rq_size == priv->params.log_rq_size &&
            log_sq_size == priv->params.log_sq_size &&
@@ -454,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev,
        unsigned int count = ch->combined_count;
        bool arfs_enabled;
        bool was_opened;
+       u32 num_mtts;
        int err = 0;
 
        if (!count) {
@@ -472,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
                return -EINVAL;
        }
 
+       num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
+       if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+           !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+               netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
+                           __func__, count);
+               return -EINVAL;
+       }
+
        if (priv->params.num_channels == count)
                return 0;
 
@@ -582,9 +659,10 @@ out:
 static void ptys2ethtool_supported_link(unsigned long *supported_modes,
                                        u32 eth_proto_cap)
 {
+       unsigned long proto_cap = eth_proto_cap;
        int proto;
 
-       for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+       for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
                bitmap_or(supported_modes, supported_modes,
                          ptys2ethtool_table[proto].supported,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -593,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
 static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
                                    u32 eth_proto_cap)
 {
+       unsigned long proto_cap = eth_proto_cap;
        int proto;
 
-       for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+       for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
                bitmap_or(advertising_modes, advertising_modes,
                          ptys2ethtool_table[proto].advertised,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
index 870bea37c57cb37d8005b63f1b5b877e47280567..2459c7f3db8d26152724dc7edfd20c12a996ff45 100644 (file)
 #include "eswitch.h"
 #include "vxlan.h"
 
-enum {
-       MLX5_EN_QP_FLUSH_TIMEOUT_MS     = 5000,
-       MLX5_EN_QP_FLUSH_MSLEEP_QUANT   = 20,
-       MLX5_EN_QP_FLUSH_MAX_ITER       = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
-                                         MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
-};
-
 struct mlx5e_rq_param {
        u32                     rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param    wq;
@@ -162,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
                        s->tx_queue_dropped     += sq_stats->dropped;
+                       s->tx_xmit_more         += sq_stats->xmit_more;
                        s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
                        tx_offload_none         += sq_stats->csum_none;
                }
@@ -340,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
+               rq->mpwqe_mtt_offset = c->ix *
+                       MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
+
                rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
                rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
                rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
@@ -428,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 
        MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
-       MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
        MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
                                                MLX5_ADAPTER_PAGE_SHIFT);
@@ -525,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
        return -ETIMEDOUT;
 }
 
+static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+       struct mlx5e_rx_wqe *wqe;
+       __be16 wqe_ix_be;
+       u16 wqe_ix;
+
+       /* UMR WQE (if in progress) is always at wq->head */
+       if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+               mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+
+       while (!mlx5_wq_ll_is_empty(wq)) {
+               wqe_ix_be = *wq->tail_next;
+               wqe_ix    = be16_to_cpu(wqe_ix_be);
+               wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+               rq->dealloc_wqe(rq, wqe_ix);
+               mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+                              &wqe->next.next_wqe_index);
+       }
+}
+
 static int mlx5e_open_rq(struct mlx5e_channel *c,
                         struct mlx5e_rq_param *param,
                         struct mlx5e_rq *rq)
@@ -548,8 +565,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (param->am_enabled)
                set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
-
        sq->ico_wqe_info[pi].opcode     = MLX5_OPCODE_NOP;
        sq->ico_wqe_info[pi].num_wqebbs = 1;
        mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
@@ -566,23 +581,8 @@ err_destroy_rq:
 
 static void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
-       int tout = 0;
-       int err;
-
-       clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+       set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-
-       err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
-       while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
-              tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
-               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-
-       if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
-               set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
-
-       /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
-       napi_synchronize(&rq->channel->napi);
-
        cancel_work_sync(&rq->am.work);
 
        mlx5e_disable_rq(rq);
@@ -821,7 +821,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
                goto err_disable_sq;
 
        if (sq->txq) {
-               set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
                netdev_tx_reset_queue(sq->txq);
                netif_tx_start_queue(sq->txq);
        }
@@ -845,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
-       int tout = 0;
-       int err;
+       set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+       /* prevent netif_tx_wake_queue */
+       napi_synchronize(&sq->channel->napi);
 
        if (sq->txq) {
-               clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
-               /* prevent netif_tx_wake_queue */
-               napi_synchronize(&sq->channel->napi);
                netif_tx_disable_queue(sq->txq);
 
-               /* ensure hw is notified of all pending wqes */
+               /* last doorbell out, godspeed .. */
                if (mlx5e_sq_has_room_for(sq, 1))
                        mlx5e_send_nop(sq, true);
-
-               err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
-                                     MLX5_SQC_STATE_ERR, false, 0);
-               if (err)
-                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
-       }
-
-       /* wait till sq is empty, unless a TX timeout occurred on this SQ */
-       while (sq->cc != sq->pc &&
-              !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
-               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-               if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
-                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
        }
 
-       /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
-       napi_synchronize(&sq->channel->napi);
-
-       mlx5e_free_tx_descs(sq);
        mlx5e_disable_sq(sq);
+       mlx5e_free_tx_descs(sq);
        mlx5e_destroy_sq(sq);
 }
 
@@ -1826,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev)
        netif_set_real_num_tx_queues(netdev, num_txqs);
        netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
 
-       err = mlx5e_set_dev_port_mtu(netdev);
-       if (err)
-               goto err_clear_state_opened_flag;
-
        err = mlx5e_open_channels(priv);
        if (err) {
                netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -2573,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
        u16 max_mtu;
        u16 min_mtu;
        int err = 0;
+       bool reset;
 
        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
@@ -2588,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 
        mutex_lock(&priv->state_lock);
 
+       reset = !priv->params.lro_en &&
+               (priv->params.rq_wq_type !=
+                MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+
        was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
+       if (was_opened && reset)
                mlx5e_close_locked(netdev);
 
        netdev->mtu = new_mtu;
+       mlx5e_set_dev_port_mtu(netdev);
 
-       if (was_opened)
+       if (was_opened && reset)
                err = mlx5e_open_locked(netdev);
 
        mutex_unlock(&priv->state_lock);
@@ -2794,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
                if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
                        continue;
                sched_work = true;
-               set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+               set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
                netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
                           i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
        }
@@ -3231,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
        struct mlx5_create_mkey_mbox_in *in;
        struct mlx5_mkey_seg *mkc;
        int inlen = sizeof(*in);
-       u64 npages =
-               priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+       u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
+                                        BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
        int err;
 
        in = mlx5_vzalloc(inlen);
@@ -3246,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
                     MLX5_PERM_LOCAL_WRITE |
                     MLX5_ACCESS_MODE_MTT;
 
+       npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
+
        mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
        mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
        mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
-       mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
+       mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
        mkc->log2_page_size = PAGE_SHIFT;
 
        err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
@@ -3385,6 +3370,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
        if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+               mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
                rep.load = mlx5e_nic_rep_load;
                rep.unload = mlx5e_nic_rep_unload;
                rep.vport = 0;
@@ -3463,6 +3449,8 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
 
        mlx5e_init_l2_addr(priv);
 
+       mlx5e_set_dev_port_mtu(netdev);
+
        err = register_netdev(netdev);
        if (err) {
                mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@@ -3501,16 +3489,20 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
        int total_vfs = MLX5_TOTAL_VPORTS(mdev);
        int vport;
+       u8 mac[ETH_ALEN];
 
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return;
 
+       mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
        for (vport = 1; vport < total_vfs; vport++) {
                struct mlx5_eswitch_rep rep;
 
                rep.load = mlx5e_vport_rep_load;
                rep.unload = mlx5e_vport_rep_unload;
                rep.vport = vport;
+               ether_addr_copy(rep.hw_id, mac);
                mlx5_eswitch_register_vport_rep(esw, &rep);
        }
 }
index 1c7d8b8314bf43cc084aad96f5d4515dedfc36f3..134de4a11f1d9e65a4cad91be63638884e1a4706 100644 (file)
@@ -135,17 +135,16 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
 int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_eswitch_rep *rep = priv->ppriv;
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       u8 mac[ETH_ALEN];
 
        if (esw->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
        switch (attr->id) {
        case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
-               mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
                attr->u.ppid.id_len = ETH_ALEN;
-               memcpy(&attr->u.ppid.id, &mac, ETH_ALEN);
+               ether_addr_copy(attr->u.ppid.id, rep->hw_id);
                break;
        default:
                return -EOPNOTSUPP;
index 9f2a16a507e04f8cd9861251ab3d0d5973b23e90..e7c969df3dada5d9ebdff8311c565920cebf404b 100644 (file)
@@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
        }
 }
 
-static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
 {
-       return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+       return rq->mpwqe_mtt_offset +
                wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
 }
 
@@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
        struct mlx5_wqe_data_seg      *dseg = &wqe->data;
        struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
        u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
-       u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+       u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
 
        memset(wqe, 0, sizeof(*wqe));
        cseg->opmod_idx_opcode =
@@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
 
        ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
        ucseg->klm_octowords =
-               cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+               cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
        ucseg->bsf_octowords =
-               cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+               cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
        ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
 
        dseg->lkey = sq->mkey_be;
@@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
 {
        struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
        int mtt_sz = mlx5e_get_wqe_mtt_sz();
-       u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+       u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
        int i;
 
        wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
@@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
        struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
 
        clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+
+       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
+               mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+               return;
+       }
+
        mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
        rq->stats.mpwqe_frag++;
 
@@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        wi->free_wqe(rq, wi);
 }
 
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
-{
-       struct mlx5_wq_ll *wq = &rq->wq;
-       struct mlx5e_rx_wqe *wqe;
-       __be16 wqe_ix_be;
-       u16 wqe_ix;
-
-       while (!mlx5_wq_ll_is_empty(wq)) {
-               wqe_ix_be = *wq->tail_next;
-               wqe_ix    = be16_to_cpu(wqe_ix_be);
-               wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
-               rq->dealloc_wqe(rq, wqe_ix);
-               mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
-                              &wqe->next.next_wqe_index);
-       }
-}
-
 #define RQ_CANNOT_POST(rq) \
-               (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
-                test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+       (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
+        test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
 
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 {
@@ -648,24 +637,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
                                 u32 cqe_bcnt)
 {
-       struct ethhdr   *eth    = (struct ethhdr *)(skb->data);
-       struct iphdr    *ipv4   = (struct iphdr *)(skb->data + ETH_HLEN);
-       struct ipv6hdr  *ipv6   = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+       struct ethhdr   *eth = (struct ethhdr *)(skb->data);
+       struct iphdr    *ipv4;
+       struct ipv6hdr  *ipv6;
        struct tcphdr   *tcp;
+       int network_depth = 0;
+       __be16 proto;
+       u16 tot_len;
 
        u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
        int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
                       (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
 
-       u16 tot_len = cqe_bcnt - ETH_HLEN;
+       skb->mac_len = ETH_HLEN;
+       proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+
+       ipv4 = (struct iphdr *)(skb->data + network_depth);
+       ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
+       tot_len = cqe_bcnt - network_depth;
 
-       if (eth->h_proto == htons(ETH_P_IP)) {
-               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+       if (proto == htons(ETH_P_IP)) {
+               tcp = (struct tcphdr *)(skb->data + network_depth +
                                        sizeof(struct iphdr));
                ipv6 = NULL;
                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
        } else {
-               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+               tcp = (struct tcphdr *)(skb->data + network_depth +
                                        sizeof(struct ipv6hdr));
                ipv4 = NULL;
                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
@@ -916,7 +913,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
        int work_done = 0;
 
-       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
                return 0;
 
        if (cq->decmprs_left)
index 7b9d8a989b520e8b59af1294efdefd2475864b59..499487ce3b5393495c8794b73fe7c0760678555d 100644 (file)
@@ -70,6 +70,7 @@ struct mlx5e_sw_stats {
        u64 tx_queue_stopped;
        u64 tx_queue_wake;
        u64 tx_queue_dropped;
+       u64 tx_xmit_more;
        u64 rx_wqe_err;
        u64 rx_mpwqe_filler;
        u64 rx_mpwqe_frag;
@@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
@@ -298,6 +300,7 @@ struct mlx5e_sq_stats {
        /* commonly accessed in data path */
        u64 packets;
        u64 bytes;
+       u64 xmit_more;
        u64 tso_packets;
        u64 tso_bytes;
        u64 tso_inner_packets;
@@ -324,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = {
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
 };
 
 #define NUM_SW_COUNTERS                        ARRAY_SIZE(sw_stats_desc)
index 0f19b01e3fffa202d01366f130012e34856cdaae..22cfc4ac1837500cc9fb5a2fa6062d77f49550aa 100644 (file)
@@ -170,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
                struct flow_dissector_key_control *key =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 FLOW_DISSECTOR_KEY_CONTROL,
                                                  f->key);
                addr_type = key->addr_type;
        }
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                u32 *action, u32 *flow_tag)
 {
        const struct tc_action *a;
+       LIST_HEAD(actions);
 
        if (tc_no_actions(exts))
                return -EINVAL;
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
        *action = 0;
 
-       tc_for_each_action(a, exts) {
+       tcf_exts_to_list(exts, &actions);
+       list_for_each_entry(a, &actions, list) {
                /* Only support a single action per rule */
                if (*action)
                        return -EINVAL;
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                u32 *action, u32 *dest_vport)
 {
        const struct tc_action *a;
+       LIST_HEAD(actions);
 
        if (tc_no_actions(exts))
                return -EINVAL;
 
        *action = 0;
 
-       tc_for_each_action(a, exts) {
+       tcf_exts_to_list(exts, &actions);
+       list_for_each_entry(a, &actions, list) {
                /* Only support a single action per rule */
                if (*action)
                        return -EINVAL;
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow *flow;
        struct tc_action *a;
        struct mlx5_fc *counter;
+       LIST_HEAD(actions);
        u64 bytes;
        u64 packets;
        u64 lastuse;
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
 
        mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
 
-       tc_for_each_action(a, f->exts)
+       tcf_exts_to_list(f->exts, &actions);
+       list_for_each_entry(a, &actions, list)
                tcf_action_stats_update(a, bytes, packets, lastuse);
 
        return 0;
index e073bf59890d33a164f498417ed42acf4cd24228..eb0e72537f10cd559876c5f2710ca143f0fc375f 100644 (file)
@@ -356,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                sq->stats.stopped++;
        }
 
+       sq->stats.xmit_more += skb->xmit_more;
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
                int bf_sz = 0;
 
@@ -394,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        return mlx5e_sq_xmit(sq, skb);
 }
 
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
-{
-       struct mlx5e_tx_wqe_info *wi;
-       struct sk_buff *skb;
-       u16 ci;
-       int i;
-
-       while (sq->cc != sq->pc) {
-               ci = sq->cc & sq->wq.sz_m1;
-               skb = sq->skb[ci];
-               wi = &sq->wqe_info[ci];
-
-               if (!skb) { /* nop */
-                       sq->cc++;
-                       continue;
-               }
-
-               for (i = 0; i < wi->num_dma; i++) {
-                       struct mlx5e_sq_dma *dma =
-                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
-
-                       mlx5e_tx_dma_unmap(sq->pdev, dma);
-               }
-
-               dev_kfree_skb_any(skb);
-               sq->cc += wi->num_wqebbs;
-       }
-}
-
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
        struct mlx5e_sq *sq;
@@ -434,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        sq = container_of(cq, struct mlx5e_sq, cq);
 
-       if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+       if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
                return false;
 
        npkts = 0;
@@ -512,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
        netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 
        if (netif_tx_queue_stopped(sq->txq) &&
-           mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
-           likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
-                               netif_tx_wake_queue(sq->txq);
-                               sq->stats.wake++;
+           mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+               netif_tx_wake_queue(sq->txq);
+               sq->stats.wake++;
        }
 
        return (i == MLX5E_TX_CQ_POLL_BUDGET);
 }
+
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+       struct mlx5e_tx_wqe_info *wi;
+       struct sk_buff *skb;
+       u16 ci;
+       int i;
+
+       while (sq->cc != sq->pc) {
+               ci = sq->cc & sq->wq.sz_m1;
+               skb = sq->skb[ci];
+               wi = &sq->wqe_info[ci];
+
+               if (!skb) { /* nop */
+                       sq->cc++;
+                       continue;
+               }
+
+               for (i = 0; i < wi->num_dma; i++) {
+                       struct mlx5e_sq_dma *dma =
+                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+                       mlx5e_tx_dma_unmap(sq->pdev, dma);
+               }
+
+               dev_kfree_skb_any(skb);
+               sq->cc += wi->num_wqebbs;
+       }
+}
index 64ae2e800daa3e7cfbe992792bcf2a9d3927126c..9bf33bb692106e14901ca55711fa4f7143f1bcce 100644 (file)
@@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
 
 static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 {
+       struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
        struct mlx5_wq_cyc *wq;
        struct mlx5_cqe64 *cqe;
-       struct mlx5e_sq *sq;
        u16 sqcc;
 
+       if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+               return;
+
        cqe = mlx5e_get_cqe(cq);
        if (likely(!cqe))
                return;
 
-       sq = container_of(cq, struct mlx5e_sq, cq);
        wq = &sq->wq;
 
        /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
index f6d667797ee10e57690aeb5ce0bb588b4bb455da..b247949df1352cd6b47b18bdd5ee958c555727d8 100644 (file)
@@ -1451,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 
        esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
 
-       if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+       /* Only VFs need ACLs for VST and spoofchk filtering */
+       if (vport_num && esw->mode == SRIOV_LEGACY) {
                esw_vport_ingress_config(esw, vport);
                esw_vport_egress_config(esw, vport);
        }
@@ -1502,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
         */
        esw_vport_change_handle_locked(vport);
        vport->enabled_events = 0;
-       if (vport_num) {
+       if (vport_num && esw->mode == SRIOV_LEGACY) {
                esw_vport_disable_egress_acl(esw, vport);
                esw_vport_disable_ingress_acl(esw, vport);
        }
@@ -1553,6 +1554,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 
 abort:
        esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+       esw->mode = SRIOV_NONE;
        return err;
 }
 
@@ -1767,7 +1769,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                               vport, err);
 
        mutex_lock(&esw->state_lock);
-       if (evport->enabled)
+       if (evport->enabled && esw->mode == SRIOV_LEGACY)
                err = esw_vport_ingress_config(esw, evport);
        mutex_unlock(&esw->state_lock);
        return err;
@@ -1839,7 +1841,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
        mutex_lock(&esw->state_lock);
        evport->vlan = vlan;
        evport->qos = qos;
-       if (evport->enabled) {
+       if (evport->enabled && esw->mode == SRIOV_LEGACY) {
                err = esw_vport_ingress_config(esw, evport);
                if (err)
                        goto out;
@@ -1868,10 +1870,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
        mutex_lock(&esw->state_lock);
        pschk = evport->spoofchk;
        evport->spoofchk = spoofchk;
-       if (evport->enabled)
+       if (evport->enabled && esw->mode == SRIOV_LEGACY) {
                err = esw_vport_ingress_config(esw, evport);
-       if (err)
-               evport->spoofchk = pschk;
+               if (err)
+                       evport->spoofchk = pschk;
+       }
        mutex_unlock(&esw->state_lock);
 
        return err;
index c0b05603fc31e54a4068d4b4144284ac338d4d4d..a96140971d77a5a9fd63c019135a15354c21b04f 100644 (file)
@@ -174,6 +174,7 @@ struct mlx5_eswitch_rep {
        void                  *priv_data;
        struct list_head       vport_sqs_list;
        bool                   valid;
+       u8                     hw_id[ETH_ALEN];
 };
 
 struct mlx5_esw_offload {
index a357e8eeeed860dd06a2909a94e54f85391ac411..7de40e6b0c2519494efdcd47767940de3247ee44 100644 (file)
@@ -113,7 +113,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest.vport_num = vport;
 
-       flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
+       flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
                                       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
                                       0, &dest);
        if (IS_ERR(flow_rule))
@@ -446,7 +446,7 @@ out:
 
 static int esw_offloads_start(struct mlx5_eswitch *esw)
 {
-       int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+       int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
        if (esw->mode != SRIOV_LEGACY) {
                esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
@@ -455,8 +455,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
 
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
-       if (err)
-               esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
+       if (err) {
+               esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+               err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+               if (err1)
+                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+       }
        return err;
 }
 
@@ -508,12 +512,16 @@ create_ft_err:
 
 static int esw_offloads_stop(struct mlx5_eswitch *esw)
 {
-       int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+       int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
-       if (err)
-               esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
+       if (err) {
+               esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+               err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+               if (err1)
+                       esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+       }
 
        return err;
 }
@@ -535,7 +543,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
        esw_destroy_offloads_fdb_table(esw);
 }
 
-static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
 {
        switch (mode) {
        case DEVLINK_ESWITCH_MODE_LEGACY:
@@ -551,6 +559,22 @@ static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
        return 0;
 }
 
+static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+{
+       switch (mlx5_mode) {
+       case SRIOV_LEGACY:
+               *mode = DEVLINK_ESWITCH_MODE_LEGACY;
+               break;
+       case SRIOV_OFFLOADS:
+               *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
 {
        struct mlx5_core_dev *dev;
@@ -566,7 +590,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
        if (cur_mlx5_mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode))
+       if (esw_mode_from_devlink(mode, &mlx5_mode))
                return -EINVAL;
 
        if (cur_mlx5_mode == mlx5_mode)
@@ -592,9 +616,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
        if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       *mode = dev->priv.eswitch->mode;
-
-       return 0;
+       return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
 }
 
 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
index 9134010e2921cb1ace000d05832c636667d2a5ba..287ade151ec849861a5d9c18eb27f8c7c7014f7a 100644 (file)
@@ -425,11 +425,11 @@ struct mlx5_cmd_fc_bulk *
 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
 {
        struct mlx5_cmd_fc_bulk *b;
-       int outlen = sizeof(*b) +
+       int outlen =
                MLX5_ST_SZ_BYTES(query_flow_counter_out) +
                MLX5_ST_SZ_BYTES(traffic_counter) * num;
 
-       b = kzalloc(outlen, GFP_KERNEL);
+       b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
        if (!b)
                return NULL;
 
index 75bb8c8645575058324c4807356586055f13da3b..3d6c1f65e5860f832c33413bdf00e95e1986a87b 100644 (file)
@@ -80,7 +80,7 @@
                           LEFTOVERS_NUM_PRIOS)
 
 #define ETHTOOL_PRIO_NUM_LEVELS 1
-#define ETHTOOL_NUM_PRIOS 10
+#define ETHTOOL_NUM_PRIOS 11
 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
 /* Vlan, mac, ttc, aRFS */
 #define KERNEL_NIC_PRIO_NUM_LEVELS 4
index c2877e9de8a11418da64e78cded14ae86daf2c2f..3a9195b4169dc0b1cbbb60b31ac3d2a2ddadcecb 100644 (file)
@@ -126,12 +126,21 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
        for (node = &first->node; node; node = rb_next(node)) {
                struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
                struct mlx5_fc_cache *c = &counter->cache;
+               u64 packets;
+               u64 bytes;
 
                if (counter->id > last_id)
                        break;
 
                mlx5_cmd_fc_bulk_get(dev, b,
-                                    counter->id, &c->packets, &c->bytes);
+                                    counter->id, &packets, &bytes);
+
+               if (c->packets == packets)
+                       continue;
+
+               c->packets = packets;
+               c->bytes = bytes;
+               c->lastuse = jiffies;
        }
 
 out:
index 4f491d43e77d6e5a4e0b510d8018729e75d5aa6b..2385bae92672a0e9c134dcfbd6079c7e7bd9abbf 100644 (file)
@@ -1420,36 +1420,12 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
        dev_info(&pdev->dev, "%s was called\n", __func__);
        mlx5_enter_error_state(dev);
        mlx5_unload_one(dev, priv);
+       pci_save_state(pdev);
        mlx5_pci_disable_device(dev);
        return state == pci_channel_io_perm_failure ?
                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
 }
 
-static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
-{
-       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-       int err = 0;
-
-       dev_info(&pdev->dev, "%s was called\n", __func__);
-
-       err = mlx5_pci_enable_device(dev);
-       if (err) {
-               dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
-                       , __func__, err);
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-       pci_set_master(pdev);
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
-       return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
-}
-
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
-       mlx5_pci_err_detected(dev->pdev, 0);
-}
-
 /* wait for the device to show vital signs by waiting
  * for the health counter to start counting.
  */
@@ -1477,21 +1453,44 @@ static int wait_vital(struct pci_dev *pdev)
        return -ETIMEDOUT;
 }
 
-static void mlx5_pci_resume(struct pci_dev *pdev)
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-       struct mlx5_priv *priv = &dev->priv;
        int err;
 
        dev_info(&pdev->dev, "%s was called\n", __func__);
 
-       pci_save_state(pdev);
-       err = wait_vital(pdev);
+       err = mlx5_pci_enable_device(dev);
        if (err) {
+               dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+                       , __func__, err);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+
+       if (wait_vital(pdev)) {
                dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
-               return;
+               return PCI_ERS_RESULT_DISCONNECT;
        }
 
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+       mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+       struct mlx5_priv *priv = &dev->priv;
+       int err;
+
+       dev_info(&pdev->dev, "%s was called\n", __func__);
+
        err = mlx5_load_one(dev, priv);
        if (err)
                dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
index f33b997f2b61c514a7f7d7017843fbbbe766472c..af371a82c35ba82e284d5b687dcf655e1ae84b50 100644 (file)
@@ -56,6 +56,7 @@
 #define MLXSW_PORT_PHY_BITS_MASK       (MLXSW_PORT_MAX_PHY_PORTS - 1)
 
 #define MLXSW_PORT_CPU_PORT            0x0
+#define MLXSW_PORT_ROUTER_PORT         (MLXSW_PORT_MAX_PHY_PORTS + 2)
 
 #define MLXSW_PORT_DONT_CARE           (MLXSW_PORT_MAX_PORTS)
 
index 7ca9201f7dcbf6de88e0cbf2d69393db13d0e57d..1721098eef131773471bb99aa281f7959ff817ff 100644 (file)
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
  */
 MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
 
+/* reg_ritr_lb_en
+ * Loop-back filter enable for unicast packets.
+ * If the flag is set then loop-back filter for unicast packets is
+ * implemented on the RIF. Multicast packets are always subject to
+ * loop-back filtering.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
+
 /* reg_ritr_virtual_router
  * Virtual router ID associated with the router interface.
  * Access: RW
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
        mlxsw_reg_ritr_op_set(payload, op);
        mlxsw_reg_ritr_rif_set(payload, rif);
        mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
+       mlxsw_reg_ritr_lb_en_set(payload, 1);
        mlxsw_reg_ritr_mtu_set(payload, mtu);
        mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
 }
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload,
 {
        MLXSW_REG_ZERO(ralue, payload);
        mlxsw_reg_ralue_protocol_set(payload, protocol);
+       mlxsw_reg_ralue_op_set(payload, op);
        mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
        mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
        mlxsw_reg_ralue_entry_type_set(payload,
index c3e61500819d00c9d1c4d61448f99c51057d3cbb..d48873bcbddfc0d0049ecc2f73b5bfcc68862d20 100644 (file)
@@ -56,6 +56,7 @@
 #include <generated/utsrelease.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_mirred.h>
+#include <net/netevent.h>
 
 #include "spectrum.h"
 #include "core.h"
@@ -942,8 +943,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
        kfree(mlxsw_sp_vport);
 }
 
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
-                         u16 vid)
+static int mlxsw_sp_port_add_vid(struct net_device *dev,
+                                __be16 __always_unused proto, u16 vid)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -956,16 +957,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
        if (!vid)
                return 0;
 
-       if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
-               netdev_warn(dev, "VID=%d already configured\n", vid);
+       if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
                return 0;
-       }
 
        mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
-       if (!mlxsw_sp_vport) {
-               netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
+       if (!mlxsw_sp_vport)
                return -ENOMEM;
-       }
 
        /* When adding the first VLAN interface on a bridged port we need to
         * transition all the active 802.1Q bridge VLANs to use explicit
@@ -973,24 +970,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
         */
        if (list_is_singular(&mlxsw_sp_port->vports_list)) {
                err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
-               if (err) {
-                       netdev_err(dev, "Failed to set to Virtual mode\n");
+               if (err)
                        goto err_port_vp_mode_trans;
-               }
        }
 
        err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
-       if (err) {
-               netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+       if (err)
                goto err_port_vid_learning_set;
-       }
 
        err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
-       if (err) {
-               netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
-                          vid);
+       if (err)
                goto err_port_add_vid;
-       }
 
        return 0;
 
@@ -1010,7 +1000,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        struct mlxsw_sp_port *mlxsw_sp_vport;
        struct mlxsw_sp_fid *f;
-       int err;
 
        /* VLAN 0 is removed from HW filter when device goes down, but
         * it is reserved in our case, so simply return.
@@ -1019,23 +1008,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
                return 0;
 
        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
-       if (!mlxsw_sp_vport) {
-               netdev_warn(dev, "VID=%d does not exist\n", vid);
+       if (WARN_ON(!mlxsw_sp_vport))
                return 0;
-       }
 
-       err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
-       if (err) {
-               netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
-                          vid);
-               return err;
-       }
+       mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
 
-       err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
-       if (err) {
-               netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
-               return err;
-       }
+       mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
 
        /* Drop FID reference. If this was the last reference the
         * resources will be freed.
@@ -1048,13 +1026,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
         * transition all active 802.1Q bridge VLANs to use VID to FID
         * mappings and set port's mode to VLAN mode.
         */
-       if (list_is_singular(&mlxsw_sp_port->vports_list)) {
-               err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
-               if (err) {
-                       netdev_err(dev, "Failed to set to VLAN mode\n");
-                       return err;
-               }
-       }
+       if (list_is_singular(&mlxsw_sp_port->vports_list))
+               mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
 
        mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
 
@@ -1149,6 +1122,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
                                          bool ingress)
 {
        const struct tc_action *a;
+       LIST_HEAD(actions);
        int err;
 
        if (!tc_single_action(cls->exts)) {
@@ -1156,7 +1130,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
                return -ENOTSUPP;
        }
 
-       tc_for_each_action(a, cls->exts) {
+       tcf_exts_to_list(cls->exts, &actions);
+       list_for_each_entry(a, &actions, list) {
                if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
                        return -ENOTSUPP;
 
@@ -2076,6 +2051,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
        return 0;
 }
 
+static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       mlxsw_sp_port->pvid = 1;
+
+       return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
+static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                                bool split, u8 module, u8 width, u8 lane)
 {
@@ -2119,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
 
+       err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_swid_set;
+       }
+
        err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -2144,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                goto err_port_system_port_mapping_set;
        }
 
-       err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
-       if (err) {
-               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
-                       mlxsw_sp_port->local_port);
-               goto err_port_swid_set;
-       }
-
        err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -2191,7 +2178,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                goto err_port_dcb_init;
        }
 
+       err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_pvid_vport_create;
+       }
+
        mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+       mlxsw_sp->ports[local_port] = mlxsw_sp_port;
        err = register_netdev(dev);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -2208,27 +2203,26 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                goto err_core_port_init;
        }
 
-       err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
-       if (err)
-               goto err_port_vlan_init;
-
-       mlxsw_sp->ports[local_port] = mlxsw_sp_port;
        return 0;
 
-err_port_vlan_init:
-       mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
 err_core_port_init:
        unregister_netdev(dev);
 err_register_netdev:
+       mlxsw_sp->ports[local_port] = NULL;
+       mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+       mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+err_port_pvid_vport_create:
+       mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
 err_port_dcb_init:
 err_port_ets_init:
 err_port_buffers_init:
 err_port_admin_status_set:
 err_port_mtu_set:
 err_port_speed_by_width_set:
-err_port_swid_set:
 err_port_system_port_mapping_set:
 err_dev_addr_init:
+       mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
        free_percpu(mlxsw_sp_port->pcpu_stats);
 err_alloc_stats:
        kfree(mlxsw_sp_port->untagged_vlans);
@@ -2245,12 +2239,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 
        if (!mlxsw_sp_port)
                return;
-       mlxsw_sp->ports[local_port] = NULL;
        mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
        unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
-       mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
-       mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+       mlxsw_sp->ports[local_port] = NULL;
        mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+       mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+       mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
        mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
        free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -2659,6 +2653,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
                .local_port = MLXSW_PORT_DONT_CARE,
                .trap_id = MLXSW_TRAP_ID_ARPUC,
        },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_MTUERROR,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_TTLERROR,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_LBERROR,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_OSPF,
+       },
        {
                .func = mlxsw_sp_rx_listener_func,
                .local_port = MLXSW_PORT_DONT_CARE,
@@ -3311,6 +3325,39 @@ static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
        return mlxsw_sp_fid_find(mlxsw_sp, fid);
 }
 
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+       return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+              MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+       return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+                                         bool set)
+{
+       enum mlxsw_flood_table_type table_type;
+       char *sftr_pl;
+       u16 index;
+       int err;
+
+       sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+       if (!sftr_pl)
+               return -ENOMEM;
+
+       table_type = mlxsw_sp_flood_table_type_get(fid);
+       index = mlxsw_sp_flood_table_index_get(fid);
+       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
+                           1, MLXSW_PORT_ROUTER_PORT, set);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+       kfree(sftr_pl);
+       return err;
+}
+
 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
 {
        if (mlxsw_sp_fid_is_vfid(fid))
@@ -3347,10 +3394,14 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
        if (rif == MLXSW_SP_RIF_MAX)
                return -ERANGE;
 
-       err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+       err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
        if (err)
                return err;
 
+       err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+       if (err)
+               goto err_rif_bridge_op;
+
        err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
        if (err)
                goto err_rif_fdb_op;
@@ -3372,6 +3423,8 @@ err_rif_alloc:
        mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
 err_rif_fdb_op:
        mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+err_rif_bridge_op:
+       mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
        return err;
 }
 
@@ -3391,6 +3444,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
 
        mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
 
+       mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
        netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
 }
 
@@ -4487,18 +4542,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
        .priority = 10, /* Must be called before FIB notifier block */
 };
 
+static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
+       .notifier_call = mlxsw_sp_router_netevent_event,
+};
+
 static int __init mlxsw_sp_module_init(void)
 {
        int err;
 
        register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
        register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+       register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+
        err = mlxsw_core_driver_register(&mlxsw_sp_driver);
        if (err)
                goto err_core_driver_register;
        return 0;
 
 err_core_driver_register:
+       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+       unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
        unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
        return err;
 }
@@ -4506,6 +4569,7 @@ err_core_driver_register:
 static void __exit mlxsw_sp_module_exit(void)
 {
        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
        unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
 }
index f69aa37d1521854a2383c8db8c78bbffa1925ac6..ac48abebe904f4d328d3fe16ee864e4592c682d6 100644 (file)
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                 u16 vid);
 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
                           u16 vid_end, bool is_member, bool untagged);
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
-                         u16 vid);
 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
                             bool set);
 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -589,6 +587,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
                                    struct neighbour *n);
 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
                                   struct neighbour *n);
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+                                  unsigned long event, void *ptr);
 
 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
index 074cdda7b6f337a6985e10a8d3620dd2825d2f3e..953b214f38d0d9906054c1220ad5a2156e181ecd 100644 (file)
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
-       MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
@@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
        u8 local_port = mlxsw_sp_port->local_port;
        u8 pg_buff = tc_index;
        enum mlxsw_reg_sbxx_dir dir = pool_type;
-       u8 pool = pool_index;
+       u8 pool = pool_get(pool_index);
        u32 max_buff;
        int err;
 
+       if (dir != dir_get(pool_index))
+               return -EINVAL;
+
        err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
                                       threshold, &max_buff);
        if (err)
                return err;
 
-       if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
-               if (pool < MLXSW_SP_SB_POOL_COUNT)
-                       return -EINVAL;
-               pool -= MLXSW_SP_SB_POOL_COUNT;
-       } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
-               return -EINVAL;
-       }
        return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
                                    0, max_buff, pool);
 }
index 01cfb75128278ca2a1b263636c62314d5bc3d1c2..b6ed7f7c531eec666dfe6ee8343fdd0eb9aa3e1e 100644 (file)
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
        char pfcc_pl[MLXSW_REG_PFCC_LEN];
 
        mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+       mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
+       mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
        mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
 
        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
                                      struct ieee_pfc *pfc)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
        int err;
 
-       if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
-           pfc->pfc_en) {
+       if (pause_en && pfc->pfc_en) {
                netdev_err(dev, "PAUSE frames already enabled on port\n");
                return -EINVAL;
        }
 
        err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
                                           mlxsw_sp_port->dcb.ets->prio_tc,
-                                          false, pfc);
+                                          pause_en, pfc);
        if (err) {
                netdev_err(dev, "Failed to configure port's headroom for PFC\n");
                return err;
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
 
 err_port_pfc_set:
        __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
-                                    mlxsw_sp_port->dcb.ets->prio_tc, false,
+                                    mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
                                     mlxsw_sp_port->dcb.pfc);
        return err;
 }
index 81418d629231667e7fe879fd767e9446eae4eac3..3f5c51da6d3e9d8304c21d69b165ae9dc96bcf55 100644 (file)
@@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
 }
 
 struct mlxsw_sp_fib_key {
+       struct net_device *dev;
        unsigned char addr[sizeof(struct in6_addr)];
        unsigned char prefix_len;
 };
@@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry {
        struct rhash_head ht_node;
        struct mlxsw_sp_fib_key key;
        enum mlxsw_sp_fib_entry_type type;
-       u8 added:1;
+       unsigned int ref_count;
        u16 rif; /* used for action local */
        struct mlxsw_sp_vr *vr;
        struct list_head nexthop_group_node;
@@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
 
 static struct mlxsw_sp_fib_entry *
 mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
-                         size_t addr_len, unsigned char prefix_len)
+                         size_t addr_len, unsigned char prefix_len,
+                         struct net_device *dev)
 {
        struct mlxsw_sp_fib_entry *fib_entry;
 
        fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
        if (!fib_entry)
                return NULL;
+       fib_entry->key.dev = dev;
        memcpy(fib_entry->key.addr, addr, addr_len);
        fib_entry->key.prefix_len = prefix_len;
        return fib_entry;
@@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
 
 static struct mlxsw_sp_fib_entry *
 mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
-                         size_t addr_len, unsigned char prefix_len)
+                         size_t addr_len, unsigned char prefix_len,
+                         struct net_device *dev)
 {
-       struct mlxsw_sp_fib_key key = {{ 0 } };
+       struct mlxsw_sp_fib_key key;
 
+       memset(&key, 0, sizeof(key));
+       key.dev = dev;
        memcpy(key.addr, addr, addr_len);
        key.prefix_len = prefix_len;
        return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
@@ -657,7 +663,7 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
                return 0;
        }
 
-       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
        if (WARN_ON(!r))
                return -EINVAL;
 
@@ -938,8 +944,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
        mlxsw_sp_port_dev_put(mlxsw_sp_port);
 }
 
-static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
-                                         unsigned long event, void *ptr)
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+                                  unsigned long event, void *ptr)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry;
        struct mlxsw_sp_port *mlxsw_sp_port;
@@ -1009,10 +1015,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
-       .notifier_call = mlxsw_sp_router_netevent_event,
-};
-
 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
 {
        int err;
@@ -1027,10 +1029,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
         */
        mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
 
-       err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
-       if (err)
-               goto err_register_netevent_notifier;
-
        /* Create the delayed works for the activity_update */
        INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
                          mlxsw_sp_router_neighs_update_work);
@@ -1039,17 +1037,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
        mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
        mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
        return 0;
-
-err_register_netevent_notifier:
-       rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
-       return err;
 }
 
 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
 {
        cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
        cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
-       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
        rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
 }
 
@@ -1524,7 +1517,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
                return err;
        mlxsw_sp_lpm_init(mlxsw_sp);
        mlxsw_sp_vrs_init(mlxsw_sp);
-       return mlxsw_sp_neigh_init(mlxsw_sp);
+       err = mlxsw_sp_neigh_init(mlxsw_sp);
+       if (err)
+               goto err_neigh_init;
+       return 0;
+
+err_neigh_init:
+       __mlxsw_sp_router_fini(mlxsw_sp);
+       return err;
 }
 
 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1626,11 +1626,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
                                     struct mlxsw_sp_fib_entry *fib_entry)
 {
-       enum mlxsw_reg_ralue_op op;
-
-       op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
-                                MLXSW_REG_RALUE_OP_WRITE_UPDATE;
-       return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+       return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+                                    MLXSW_REG_RALUE_OP_WRITE_WRITE);
 }
 
 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
@@ -1651,9 +1648,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
        const struct mlxsw_sp_router_fib4_add_info *info = data;
        struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
        struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
+       struct mlxsw_sp_vr *vr = fib_entry->vr;
 
        mlxsw_sp_fib_entry_destroy(fib_entry);
-       mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr);
+       mlxsw_sp_vr_put(mlxsw_sp, vr);
        kfree(info);
 }
 
@@ -1694,34 +1692,93 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
        mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
 }
 
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
-                                const struct switchdev_obj_ipv4_fib *fib4,
-                                struct switchdev_trans *trans)
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
+                      const struct switchdev_obj_ipv4_fib *fib4)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct mlxsw_sp_router_fib4_add_info *info;
        struct mlxsw_sp_fib_entry *fib_entry;
+       struct fib_info *fi = fib4->fi;
        struct mlxsw_sp_vr *vr;
        int err;
 
        vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
                             MLXSW_SP_L3_PROTO_IPV4);
        if (IS_ERR(vr))
-               return PTR_ERR(vr);
+               return ERR_CAST(vr);
 
+       fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+                                             sizeof(fib4->dst),
+                                             fib4->dst_len, fi->fib_dev);
+       if (fib_entry) {
+               /* Already exists, just take a reference */
+               fib_entry->ref_count++;
+               return fib_entry;
+       }
        fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
-                                             sizeof(fib4->dst), fib4->dst_len);
+                                             sizeof(fib4->dst),
+                                             fib4->dst_len, fi->fib_dev);
        if (!fib_entry) {
                err = -ENOMEM;
                goto err_fib_entry_create;
        }
        fib_entry->vr = vr;
+       fib_entry->ref_count = 1;
 
        err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
        if (err)
                goto err_fib4_entry_init;
 
+       return fib_entry;
+
+err_fib4_entry_init:
+       mlxsw_sp_fib_entry_destroy(fib_entry);
+err_fib_entry_create:
+       mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+       return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
+                       const struct switchdev_obj_ipv4_fib *fib4)
+{
+       struct mlxsw_sp_vr *vr;
+
+       vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+       if (!vr)
+               return NULL;
+
+       return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+                                        sizeof(fib4->dst), fib4->dst_len,
+                                        fib4->fi->fib_dev);
+}
+
+void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_fib_entry *fib_entry)
+{
+       struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+       if (--fib_entry->ref_count == 0) {
+               mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+               mlxsw_sp_fib_entry_destroy(fib_entry);
+       }
+       mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int
+mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
+                                const struct switchdev_obj_ipv4_fib *fib4,
+                                struct switchdev_trans *trans)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_router_fib4_add_info *info;
+       struct mlxsw_sp_fib_entry *fib_entry;
+       int err;
+
+       fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
+       if (IS_ERR(fib_entry))
+               return PTR_ERR(fib_entry);
+
        info = kmalloc(sizeof(*info), GFP_KERNEL);
        if (!info) {
                err = -ENOMEM;
@@ -1735,11 +1792,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
        return 0;
 
 err_alloc_info:
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-err_fib4_entry_init:
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-err_fib_entry_create:
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return err;
 }
 
@@ -1758,11 +1811,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
        fib_entry = info->fib_entry;
        kfree(info);
 
+       if (fib_entry->ref_count != 1)
+               return 0;
+
        vr = fib_entry->vr;
-       err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
+       err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
        if (err)
                goto err_fib_entry_insert;
-       err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+       err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
        if (err)
                goto err_fib_entry_add;
        return 0;
@@ -1770,9 +1826,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
 err_fib_entry_add:
        mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
 err_fib_entry_insert:
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return err;
 }
 
@@ -1792,23 +1846,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct mlxsw_sp_fib_entry *fib_entry;
-       struct mlxsw_sp_vr *vr;
 
-       vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
-       if (!vr) {
-               dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
-               return -ENOENT;
-       }
-       fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
-                                             sizeof(fib4->dst), fib4->dst_len);
+       fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
        if (!fib_entry) {
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
                return -ENOENT;
        }
-       mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+       if (fib_entry->ref_count == 1) {
+               mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+               mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
+       }
+
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return 0;
 }
index a1ad5e6bdfa8abb640195b5221fa7b6fbfd30209..7b654c517b910f7f22b48ad2c00409e6875d9da1 100644 (file)
@@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                    u16 idx_begin, u16 idx_end, bool set,
-                                    bool only_uc)
+                                    u16 idx_begin, u16 idx_end, bool uc_set,
+                                    bool bm_set)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        u16 local_port = mlxsw_sp_port->local_port;
@@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
                return -ENOMEM;
 
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
-                           table_type, range, local_port, set);
+                           table_type, range, local_port, uc_set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
        if (err)
                goto buffer_out;
 
-       /* Flooding control allows one to decide whether a given port will
-        * flood unicast traffic for which there is no FDB entry.
-        */
-       if (only_uc)
-               goto buffer_out;
-
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
-                           table_type, range, local_port, set);
+                           table_type, range, local_port, bm_set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
        if (err)
                goto err_flood_bm_set;
-       else
-               goto buffer_out;
+
+       goto buffer_out;
 
 err_flood_bm_set:
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
-                           table_type, range, local_port, !set);
+                           table_type, range, local_port, !uc_set);
        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
 buffer_out:
        kfree(sftr_pl);
@@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
         * the start of the vFIDs range.
         */
        vfid = mlxsw_sp_fid_to_vfid(fid);
-       return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
-                                        false);
+       return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
 }
 
 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -450,6 +443,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
 
        kfree(f);
 
+       mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+
        mlxsw_sp_fid_op(mlxsw_sp, fid, false);
 }
 
@@ -458,6 +453,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
 {
        struct mlxsw_sp_fid *f;
 
+       if (test_bit(fid, mlxsw_sp_port->active_vlans))
+               return 0;
+
        f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
        if (!f) {
                f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
@@ -515,7 +513,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 
        err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
-                                       true, false);
+                                       mlxsw_sp_port->uc_flood, true);
        if (err)
                goto err_port_flood_set;
 
@@ -997,13 +995,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
 }
 
 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
-                                    u16 vid_begin, u16 vid_end, bool init)
+                                    u16 vid_begin, u16 vid_end)
 {
        struct net_device *dev = mlxsw_sp_port->dev;
        u16 vid, pvid;
        int err;
 
-       if (!init && !mlxsw_sp_port->bridged)
+       if (!mlxsw_sp_port->bridged)
                return -EINVAL;
 
        err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -1014,9 +1012,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
                return err;
        }
 
-       if (init)
-               goto out;
-
        pvid = mlxsw_sp_port->pvid;
        if (pvid >= vid_begin && pvid <= vid_end) {
                err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
@@ -1028,7 +1023,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
 
        mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
 
-out:
        /* Changing activity bits only if HW operation succeded */
        for (vid = vid_begin; vid <= vid_end; vid++)
                clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -1039,8 +1033,8 @@ out:
 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
                                   const struct switchdev_obj_port_vlan *vlan)
 {
-       return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
-                                        vlan->vid_begin, vlan->vid_end, false);
+       return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
+                                        vlan->vid_end);
 }
 
 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1048,7 +1042,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
        u16 vid;
 
        for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
-               __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+               __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
 }
 
 static int
@@ -1546,32 +1540,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
        mlxsw_sp_fdb_fini(mlxsw_sp);
 }
 
-int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
-{
-       struct net_device *dev = mlxsw_sp_port->dev;
-       int err;
-
-       /* Allow only untagged packets to ingress and tag them internally
-        * with VID 1.
-        */
-       mlxsw_sp_port->pvid = 1;
-       err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
-                                       true);
-       if (err) {
-               netdev_err(dev, "Unable to init VLANs\n");
-               return err;
-       }
-
-       /* Add implicit VLAN interface in the device, so that untagged
-        * packets will be classified to the default vFID.
-        */
-       err = mlxsw_sp_port_add_vid(dev, 0, 1);
-       if (err)
-               netdev_err(dev, "Failed to configure default vFID\n");
-
-       return err;
-}
-
 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
        mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
index 470d7696e9fede42f46ea74adcd6b764d8ca3748..ed8e301864004f8092bcf82caf50edc730a4b7d4 100644 (file)
@@ -56,6 +56,10 @@ enum {
        MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
        MLXSW_TRAP_ID_ARPBC = 0x50,
        MLXSW_TRAP_ID_ARPUC = 0x51,
+       MLXSW_TRAP_ID_MTUERROR = 0x52,
+       MLXSW_TRAP_ID_TTLERROR = 0x53,
+       MLXSW_TRAP_ID_LBERROR = 0x54,
+       MLXSW_TRAP_ID_OSPF = 0x55,
        MLXSW_TRAP_ID_IP2ME = 0x5F,
        MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
        MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
index 88678c172b19f14e7ae00574cab4da76f1edcced..39dadfca84ef428f797516a39bc41be0f933e780 100644 (file)
@@ -41,7 +41,6 @@
  *          Chris Telfer <chris.telfer@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 
                nfp_net_set_hash(nn->netdev, skb, rxd);
 
-               /* Pad small frames to minimum */
-               if (skb_put_padto(skb, 60))
-                       break;
-
                /* Stats update */
                u64_stats_update_begin(&r_vec->rx_sync);
                r_vec->rx_pkts++;
@@ -2049,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev)
 
        nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
                               GFP_KERNEL);
-       if (!nn->rx_rings)
+       if (!nn->rx_rings) {
+               err = -ENOMEM;
                goto err_free_lsc;
+       }
        nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
                               GFP_KERNEL);
-       if (!nn->tx_rings)
+       if (!nn->tx_rings) {
+               err = -ENOMEM;
                goto err_free_rx_rings;
+       }
 
        for (r = 0; r < nn->num_r_vecs; r++) {
                err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
index 7d7933d00b8f15107868dea795de73a6f61d0a07..4c989722096994b431472af20c78ce84be3caf97 100644 (file)
@@ -40,7 +40,6 @@
  *          Brad Petrus <brad.petrus@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
index 37abef016a0a82a41c6b3a8f14c0f13cb7d2a2f2..f7062cb648e1b777342797cd7fc69a767a7488e8 100644 (file)
@@ -38,7 +38,6 @@
  *         Rolf Neugebauer <rolf.neugebauer@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        }
 
        nfp_net_get_fw_version(&fw_ver, ctrl_bar);
-       if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+       if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
                dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
                        fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
                err = -EINVAL;
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        }
 
        /* Determine stride */
-       if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) ||
-           nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
-           nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
+       if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
                stride = 2;
                tx_bar_no = NFP_NET_Q0_BAR;
                rx_bar_no = NFP_NET_Q1_BAR;
index 4d4ecba0aad9fbce893fc410f38a3b34c5ec9599..8e13ec84c53812e92bbabd88a12a25e12c353a75 100644 (file)
@@ -475,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
        mac[5] = tmp >> 8;
 }
 
-static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
-{
-       if (enable)
-               clk_prepare_enable(pldat->clk);
-       else
-               clk_disable_unprepare(pldat->clk);
-}
-
 static void __lpc_params_setup(struct netdata_local *pldat)
 {
        u32 tmp;
@@ -1056,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev)
        writel(0, LPC_ENET_MAC2(pldat->net_base));
        spin_unlock_irqrestore(&pldat->lock, flags);
 
-       __lpc_eth_clock_enable(pldat, false);
+       clk_disable_unprepare(pldat->clk);
 
        return 0;
 }
@@ -1197,11 +1189,14 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
 static int lpc_eth_open(struct net_device *ndev)
 {
        struct netdata_local *pldat = netdev_priv(ndev);
+       int ret;
 
        if (netif_msg_ifup(pldat))
                dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
 
-       __lpc_eth_clock_enable(pldat, true);
+       ret = clk_prepare_enable(pldat->clk);
+       if (ret)
+               return ret;
 
        /* Suspended PHY makes LPC ethernet core block, so resume now */
        phy_resume(ndev->phydev);
@@ -1320,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        }
 
        /* Enable network clock */
-       __lpc_eth_clock_enable(pldat, true);
+       ret = clk_prepare_enable(pldat->clk);
+       if (ret)
+               goto err_out_clk_put;
 
        /* Map IO space */
        pldat->net_base = ioremap(res->start, resource_size(res));
@@ -1454,6 +1451,7 @@ err_out_iounmap:
        iounmap(pldat->net_base);
 err_out_disable_clocks:
        clk_disable_unprepare(pldat->clk);
+err_out_clk_put:
        clk_put(pldat->clk);
 err_out_free_dev:
        free_netdev(ndev);
index 35e53771533fb31ddd7100b4e7931f6aba70b4f1..45ab746765737ae2d9af3d5cb77c5343b9aa0c04 100644 (file)
@@ -561,9 +561,18 @@ struct qed_dev {
 static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
                                        u32 concrete_fid)
 {
+       u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
        u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+       u8 vf_valid = GET_FIELD(concrete_fid,
+                               PXP_CONCRETE_FID_VFVALID);
+       u8 sw_fid;
 
-       return pfid;
+       if (vf_valid)
+               sw_fid = vfid + MAX_NUM_PFS;
+       else
+               sw_fid = pfid;
+
+       return sw_fid;
 }
 
 #define PURE_LB_TC 8
index d0dc28f93c0e2b0426349d45f5ca7cb2615783ac..3656d2fd673d28a37d1bfd4bb54c8046b6f45987 100644 (file)
@@ -19,6 +19,7 @@
 #include "qed_dcbx.h"
 #include "qed_hsi.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
 #ifdef CONFIG_DCB
 #include <linux/qed/qed_eth_if.h>
 #endif
@@ -52,40 +53,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
                  DCBX_APP_SF_ETHTYPE);
 }
 
+static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+       u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+       /* Old MFW */
+       if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+               return qed_dcbx_app_ethtype(app_info_bitmap);
+
+       return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
 static bool qed_dcbx_app_port(u32 app_info_bitmap)
 {
        return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
                  DCBX_APP_SF_PORT);
 }
 
-static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
 {
-       return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
-                 proto_id == QED_ETH_TYPE_DEFAULT);
+       u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+       /* Old MFW */
+       if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+               return qed_dcbx_app_port(app_info_bitmap);
+
+       return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
 }
 
-static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
 {
-       return !!(qed_dcbx_app_port(app_info_bitmap) &&
-                 proto_id == QED_TCP_PORT_ISCSI);
+       bool ethtype;
+
+       if (ieee)
+               ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+       else
+               ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+       return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
 }
 
-static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
 {
-       return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
-                 proto_id == QED_ETH_TYPE_FCOE);
+       bool port;
+
+       if (ieee)
+               port = qed_dcbx_ieee_app_port(app_info_bitmap,
+                                             DCBX_APP_SF_IEEE_TCP_PORT);
+       else
+               port = qed_dcbx_app_port(app_info_bitmap);
+
+       return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
 }
 
-static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
 {
-       return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
-                 proto_id == QED_ETH_TYPE_ROCE);
+       bool ethtype;
+
+       if (ieee)
+               ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+       else
+               ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+       return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
 }
 
-static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
 {
-       return !!(qed_dcbx_app_port(app_info_bitmap) &&
-                 proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
+       bool ethtype;
+
+       if (ieee)
+               ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+       else
+               ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+       return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+       bool port;
+
+       if (ieee)
+               port = qed_dcbx_ieee_app_port(app_info_bitmap,
+                                             DCBX_APP_SF_IEEE_UDP_PORT);
+       else
+               port = qed_dcbx_app_port(app_info_bitmap);
+
+       return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
 }
 
 static void
@@ -164,17 +219,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
 static bool
 qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
                               u32 app_prio_bitmap,
-                              u16 id, enum dcbx_protocol_type *type)
+                              u16 id, enum dcbx_protocol_type *type, bool ieee)
 {
-       if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
+       if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
                *type = DCBX_PROTOCOL_FCOE;
-       } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
+       } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
                *type = DCBX_PROTOCOL_ROCE;
-       } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+       } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
                *type = DCBX_PROTOCOL_ISCSI;
-       } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
+       } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
                *type = DCBX_PROTOCOL_ETH;
-       } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
+       } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
                *type = DCBX_PROTOCOL_ROCE_V2;
        } else {
                *type = DCBX_MAX_PROTOCOL_TYPE;
@@ -194,17 +249,18 @@ static int
 qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                     struct qed_dcbx_results *p_data,
                     struct dcbx_app_priority_entry *p_tbl,
-                    u32 pri_tc_tbl, int count, bool dcbx_enabled)
+                    u32 pri_tc_tbl, int count, u8 dcbx_version)
 {
        u8 tc, priority_map;
        enum dcbx_protocol_type type;
+       bool enable, ieee;
        u16 protocol_id;
        int priority;
-       bool enable;
        int i;
 
        DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
 
+       ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
        /* Parse APP TLV */
        for (i = 0; i < count; i++) {
                protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@@ -219,7 +275,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
 
                tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
                if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
-                                                  protocol_id, &type)) {
+                                                  protocol_id, &type, ieee)) {
                        /* ETH always have the enable bit reset, as it gets
                         * vlan information per packet. For other protocols,
                         * should be set according to the dcbx_enabled
@@ -275,15 +331,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
        struct dcbx_ets_feature *p_ets;
        struct qed_hw_info *p_info;
        u32 pri_tc_tbl, flags;
-       bool dcbx_enabled;
+       u8 dcbx_version;
        int num_entries;
        int rc = 0;
 
-       /* If DCBx version is non zero, then negotiation was
-        * successfuly performed
-        */
        flags = p_hwfn->p_dcbx_info->operational.flags;
-       dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+       dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
 
        p_app = &p_hwfn->p_dcbx_info->operational.features.app;
        p_tbl = p_app->app_pri_tbl;
@@ -295,13 +348,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
        num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
 
        rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
-                                 num_entries, dcbx_enabled);
+                                 num_entries, dcbx_version);
        if (rc)
                return rc;
 
        p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
        data.pf_id = p_hwfn->rel_pf_id;
-       data.dcbx_enabled = dcbx_enabled;
+       data.dcbx_enabled = !!dcbx_version;
 
        qed_dcbx_dp_protocol(p_hwfn, &data);
 
@@ -400,7 +453,7 @@ static void
 qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
                      struct dcbx_app_priority_feature *p_app,
                      struct dcbx_app_priority_entry *p_tbl,
-                     struct qed_dcbx_params *p_params)
+                     struct qed_dcbx_params *p_params, bool ieee)
 {
        struct qed_app_entry *entry;
        u8 pri_map;
@@ -414,15 +467,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
                                                      DCBX_APP_NUM_ENTRIES);
        for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
                entry = &p_params->app_entry[i];
-               entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
-                                                    DCBX_APP_SF));
+               if (ieee) {
+                       u8 sf_ieee;
+                       u32 val;
+
+                       sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                                   DCBX_APP_SF_IEEE);
+                       switch (sf_ieee) {
+                       case DCBX_APP_SF_IEEE_RESERVED:
+                               /* Old MFW */
+                               val = QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                                       DCBX_APP_SF);
+                               entry->sf_ieee = val ?
+                                   QED_DCBX_SF_IEEE_TCP_UDP_PORT :
+                                   QED_DCBX_SF_IEEE_ETHTYPE;
+                               break;
+                       case DCBX_APP_SF_IEEE_ETHTYPE:
+                               entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+                               break;
+                       case DCBX_APP_SF_IEEE_TCP_PORT:
+                               entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+                               break;
+                       case DCBX_APP_SF_IEEE_UDP_PORT:
+                               entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+                               break;
+                       case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+                               entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+                               break;
+                       }
+               } else {
+                       entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                                            DCBX_APP_SF));
+               }
+
                pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
                entry->prio = ffs(pri_map) - 1;
                entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
                                                    DCBX_APP_PROTOCOL_ID);
                qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
                                               entry->proto_id,
-                                              &entry->proto_type);
+                                              &entry->proto_type, ieee);
        }
 
        DP_VERBOSE(p_hwfn, QED_MSG_DCB,
@@ -483,7 +567,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
        bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
        tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
        tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
-       pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]);
+       pri_map = p_ets->pri_tc_tbl[0];
        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
                p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
                p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -500,9 +584,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
                           struct dcbx_app_priority_feature *p_app,
                           struct dcbx_app_priority_entry *p_tbl,
                           struct dcbx_ets_feature *p_ets,
-                          u32 pfc, struct qed_dcbx_params *p_params)
+                          u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
 {
-       qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
+       qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
        qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
        qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
 }
@@ -516,7 +600,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
        p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
        qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
                                   p_feat->app.app_pri_tbl, &p_feat->ets,
-                                  p_feat->pfc, &params->local.params);
+                                  p_feat->pfc, &params->local.params, false);
        params->local.valid = true;
 }
 
@@ -529,7 +613,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
        p_feat = &p_hwfn->p_dcbx_info->remote.features;
        qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
                                   p_feat->app.app_pri_tbl, &p_feat->ets,
-                                  p_feat->pfc, &params->remote.params);
+                                  p_feat->pfc, &params->remote.params, false);
        params->remote.valid = true;
 }
 
@@ -574,7 +658,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
 
        qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
                                   p_feat->app.app_pri_tbl, &p_feat->ets,
-                                  p_feat->pfc, &params->operational.params);
+                                  p_feat->pfc, &params->operational.params,
+                                  p_operational->ieee);
        qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
        err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
        p_operational->err = err;
@@ -861,6 +946,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
        struct qed_ptt *p_ptt;
        int rc;
 
+       if (IS_VF(p_hwfn->cdev))
+               return -EINVAL;
+
        p_ptt = qed_ptt_acquire(p_hwfn);
        if (!p_ptt)
                return -EBUSY;
@@ -900,6 +988,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
                if (p_params->pfc.prio[i])
                        pfc_map |= BIT(i);
 
+       *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
        *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
 
        DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -944,7 +1033,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
                val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
                p_ets->pri_tc_tbl[0] |= val;
        }
-       p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
        for (i = 0; i < 2; i++) {
                p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
                p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
@@ -954,7 +1042,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
 static void
 qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
                      struct dcbx_app_priority_feature *p_app,
-                     struct qed_dcbx_params *p_params)
+                     struct qed_dcbx_params *p_params, bool ieee)
 {
        u32 *entry;
        int i;
@@ -975,12 +1063,45 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
 
        for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
                entry = &p_app->app_pri_tbl[i].entry;
-               *entry &= ~DCBX_APP_SF_MASK;
-               if (p_params->app_entry[i].ethtype)
-                       *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
-                                  DCBX_APP_SF_SHIFT);
-               else
-                       *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT);
+               *entry = 0;
+               if (ieee) {
+                       *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+                       switch (p_params->app_entry[i].sf_ieee) {
+                       case QED_DCBX_SF_IEEE_ETHTYPE:
+                               *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+                                          DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+                                          DCBX_APP_SF_SHIFT);
+                               break;
+                       case QED_DCBX_SF_IEEE_TCP_PORT:
+                               *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+                                          DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
+                               break;
+                       case QED_DCBX_SF_IEEE_UDP_PORT:
+                               *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+                                          DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
+                               break;
+                       case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
+                               *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+                                          DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
+                               break;
+                       }
+               } else {
+                       *entry &= ~DCBX_APP_SF_MASK;
+                       if (p_params->app_entry[i].ethtype)
+                               *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+                                          DCBX_APP_SF_SHIFT);
+                       else
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
+               }
+
                *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
                *entry |= ((u32)p_params->app_entry[i].proto_id <<
                           DCBX_APP_PROTOCOL_ID_SHIFT);
@@ -995,15 +1116,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
                          struct dcbx_local_params *local_admin,
                          struct qed_dcbx_set *params)
 {
+       bool ieee = false;
+
        local_admin->flags = 0;
        memcpy(&local_admin->features,
               &p_hwfn->p_dcbx_info->operational.features,
               sizeof(local_admin->features));
 
-       if (params->enabled)
+       if (params->enabled) {
                local_admin->config = params->ver_num;
-       else
+               ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+       } else {
                local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+       }
 
        if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
                qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
@@ -1015,7 +1140,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
 
        if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
                qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
-                                     &params->config.params);
+                                     &params->config.params, ieee);
 }
 
 int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
@@ -1064,7 +1189,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
                return 0;
        }
 
-       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
        if (!dcbx_info) {
                DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
                return -ENOMEM;
@@ -1101,7 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
 {
        struct qed_dcbx_get *dcbx_info;
 
-       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
        if (!dcbx_info) {
                DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
                return NULL;
@@ -1596,8 +1721,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev,
                if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
                        break;
                /* First empty slot */
-               if (!entry->proto_id)
+               if (!entry->proto_id) {
+                       dcbx_set.config.params.num_app_entries++;
                        break;
+               }
        }
 
        if (i == QED_DCBX_MAX_APP_PROTOCOL) {
@@ -2117,8 +2244,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
                    (entry->proto_id == app->protocol))
                        break;
                /* First empty slot */
-               if (!entry->proto_id)
+               if (!entry->proto_id) {
+                       dcbx_set.config.params.num_app_entries++;
                        break;
+               }
        }
 
        if (i == QED_DCBX_MAX_APP_PROTOCOL) {
index 592784019994fec799c1a16524a2b5bb756baa74..6f9d3b831a2a0d545ef44b3dc6247ae5f141f3d0 100644 (file)
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry {
 #define DCBX_APP_SF_SHIFT              8
 #define DCBX_APP_SF_ETHTYPE            0
 #define DCBX_APP_SF_PORT               1
+#define DCBX_APP_SF_IEEE_MASK          0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT         12
+#define DCBX_APP_SF_IEEE_RESERVED      0
+#define DCBX_APP_SF_IEEE_ETHTYPE       1
+#define DCBX_APP_SF_IEEE_TCP_PORT      2
+#define DCBX_APP_SF_IEEE_UDP_PORT      3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT  4
+
 #define DCBX_APP_PROTOCOL_ID_MASK      0xffff0000
 #define DCBX_APP_PROTOCOL_ID_SHIFT     16
 };
index a240f26344a439379ad3b9a7cf607e7fe243d1c5..f776a77794c5154b1fab58f49aa6f1bcad0031ee 100644 (file)
@@ -1153,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
        p_drv_version = &union_data.drv_version;
        p_drv_version->version = p_ver->version;
 
-       for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
-               val = cpu_to_be32(p_ver->name[i]);
+       for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+               val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
                *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
        }
 
index e4bd02e46e577f41534ae176949ede1220728796..9544e4c4135901ee177393c6e2845a18e231fffb 100644 (file)
@@ -722,11 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        txq->tx_db.data.bd_prod =
                cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
 
-       if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq))
+       if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
                qede_update_tx_producer(txq);
 
        if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
                      < (MAX_SKB_FRAGS + 1))) {
+               if (skb->xmit_more)
+                       qede_update_tx_producer(txq);
+
                netif_tx_stop_queue(netdev_txq);
                DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
                           "Stop queue was called\n");
@@ -2517,7 +2520,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 
 #ifdef CONFIG_DCB
-       qede_set_dcbnl_ops(edev->ndev);
+       if (!IS_VF(edev))
+               qede_set_dcbnl_ops(edev->ndev);
 #endif
 
        INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
index fd973f4f16c7d2db66c5d56f850f392f178e2383..49bad00a0f8f994837b0554f3250d5ca0811bff7 100644 (file)
@@ -37,8 +37,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 64
-#define QLCNIC_LINUX_VERSIONID  "5.3.64"
+#define _QLCNIC_LINUX_SUBVERSION 65
+#define QLCNIC_LINUX_VERSIONID  "5.3.65"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
index 87c642d3b075b2bc9845ba2cdbb4204e788028c3..fedd7366713cf04da3ceb7fff1704d74fb3e26fe 100644 (file)
 #define QLCNIC_RESPONSE_DESC   0x05
 #define QLCNIC_LRO_DESC        0x12
 
-#define QLCNIC_TX_POLL_BUDGET          128
 #define QLCNIC_TCP_HDR_SIZE            20
 #define QLCNIC_TCP_TS_OPTION_SIZE      12
 #define QLCNIC_FETCH_RING_ID(handle)   ((handle) >> 63)
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
        struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_adapter *adapter;
 
-       budget = QLCNIC_TX_POLL_BUDGET;
        tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
        adapter = tx_ring->adapter;
        work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
index 017d8c2c8285abe53eddedc9495fe8a75758f04d..24061b9b92e8c304ed8bd6a8d55ad3e4e281ddde 100644 (file)
@@ -156,10 +156,8 @@ struct qlcnic_vf_info {
        spinlock_t                      vlan_list_lock; /* Lock for VLAN list */
 };
 
-struct qlcnic_async_work_list {
+struct qlcnic_async_cmd {
        struct list_head        list;
-       struct work_struct      work;
-       void                    *ptr;
        struct qlcnic_cmd_args  *cmd;
 };
 
@@ -168,7 +166,10 @@ struct qlcnic_back_channel {
        struct workqueue_struct *bc_trans_wq;
        struct workqueue_struct *bc_async_wq;
        struct workqueue_struct *bc_flr_wq;
-       struct list_head        async_list;
+       struct qlcnic_adapter   *adapter;
+       struct list_head        async_cmd_list;
+       struct work_struct      vf_async_work;
+       spinlock_t              queue_lock; /* async_cmd_list queue lock */
 };
 
 struct qlcnic_sriov {
index 7327b729ba2eae4119efff54ae2a199a77b11da3..d7107055ec6035bf206380d98bbd4670d06660cd 100644 (file)
@@ -29,6 +29,7 @@
 #define QLC_83XX_VF_RESET_FAIL_THRESH  8
 #define QLC_BC_CMD_MAX_RETRY_CNT       5
 
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
        }
 
        bc->bc_async_wq =  wq;
-       INIT_LIST_HEAD(&bc->async_list);
+       INIT_LIST_HEAD(&bc->async_cmd_list);
+       INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
+       spin_lock_init(&bc->queue_lock);
+       bc->adapter = adapter;
 
        for (i = 0; i < num_vfs; i++) {
                vf = &sriov->vf_info[i];
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
 
 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
 {
-       struct list_head *head = &bc->async_list;
-       struct qlcnic_async_work_list *entry;
+       struct list_head *head = &bc->async_cmd_list;
+       struct qlcnic_async_cmd *entry;
 
        flush_workqueue(bc->bc_async_wq);
+       cancel_work_sync(&bc->vf_async_work);
+
+       spin_lock(&bc->queue_lock);
        while (!list_empty(head)) {
-               entry = list_entry(head->next, struct qlcnic_async_work_list,
+               entry = list_entry(head->next, struct qlcnic_async_cmd,
                                   list);
-               cancel_work_sync(&entry->work);
                list_del(&entry->list);
+               kfree(entry->cmd);
                kfree(entry);
        }
+       spin_unlock(&bc->queue_lock);
 }
 
 void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
 
 static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
 {
-       struct qlcnic_async_work_list *entry;
-       struct qlcnic_adapter *adapter;
+       struct qlcnic_async_cmd *entry, *tmp;
+       struct qlcnic_back_channel *bc;
        struct qlcnic_cmd_args *cmd;
+       struct list_head *head;
+       LIST_HEAD(del_list);
+
+       bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
+       head = &bc->async_cmd_list;
+
+       spin_lock(&bc->queue_lock);
+       list_splice_init(head, &del_list);
+       spin_unlock(&bc->queue_lock);
+
+       list_for_each_entry_safe(entry, tmp, &del_list, list) {
+               list_del(&entry->list);
+               cmd = entry->cmd;
+               __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
+               kfree(entry);
+       }
+
+       if (!list_empty(head))
+               queue_work(bc->bc_async_wq, &bc->vf_async_work);
 
-       entry = container_of(work, struct qlcnic_async_work_list, work);
-       adapter = entry->ptr;
-       cmd = entry->cmd;
-       __qlcnic_sriov_issue_cmd(adapter, cmd);
        return;
 }
 
-static struct qlcnic_async_work_list *
-qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+static struct qlcnic_async_cmd *
+qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
+                            struct qlcnic_cmd_args *cmd)
 {
-       struct list_head *node;
-       struct qlcnic_async_work_list *entry = NULL;
-       u8 empty = 0;
+       struct qlcnic_async_cmd *entry = NULL;
 
-       list_for_each(node, &bc->async_list) {
-               entry = list_entry(node, struct qlcnic_async_work_list, list);
-               if (!work_pending(&entry->work)) {
-                       empty = 1;
-                       break;
-               }
-       }
+       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+       if (!entry)
+               return NULL;
 
-       if (!empty) {
-               entry = kzalloc(sizeof(struct qlcnic_async_work_list),
-                               GFP_ATOMIC);
-               if (entry == NULL)
-                       return NULL;
-               list_add_tail(&entry->list, &bc->async_list);
-       }
+       entry->cmd = cmd;
+
+       spin_lock(&bc->queue_lock);
+       list_add_tail(&entry->list, &bc->async_cmd_list);
+       spin_unlock(&bc->queue_lock);
 
        return entry;
 }
 
 static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
-                                           work_func_t func, void *data,
                                            struct qlcnic_cmd_args *cmd)
 {
-       struct qlcnic_async_work_list *entry = NULL;
+       struct qlcnic_async_cmd *entry = NULL;
 
-       entry = qlcnic_sriov_get_free_node_async_work(bc);
-       if (!entry)
+       entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
+       if (!entry) {
+               qlcnic_free_mbx_args(cmd);
+               kfree(cmd);
                return;
+       }
 
-       entry->ptr = data;
-       entry->cmd = cmd;
-       INIT_WORK(&entry->work, func);
-       queue_work(bc->bc_async_wq, &entry->work);
+       queue_work(bc->bc_async_wq, &bc->vf_async_work);
 }
 
 static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
        if (adapter->need_fw_reset)
                return -EIO;
 
-       qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
-                                       adapter, cmd);
+       qlcnic_sriov_schedule_async_cmd(bc, cmd);
+
        return 0;
 }
 
index deae10d7426df0441ad5970f3f425a948c32abb3..5297bf77211cf344c7a8556c2778abc9c1e4a9cd 100644 (file)
@@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
        unsigned int rx_tail = cp->rx_tail;
        int rx;
 
-rx_status_loop:
        rx = 0;
+rx_status_loop:
        cpw16(IntrStatus, cp_rx_intr_mask);
 
        while (rx < budget) {
index 799d58d86e6dcb86fc23c125abbbbc317720e2b7..054e795df90f9de2bea2accd6c9d66eafc01ffb0 100644 (file)
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
 
        [ARSTR]         = 0x0000,
        [TSU_CTRST]     = 0x0004,
+       [TSU_FWSLC]     = 0x0038,
        [TSU_VTAG0]     = 0x0058,
        [TSU_ADSBSY]    = 0x0060,
        [TSU_TEN]       = 0x0064,
+       [TSU_POST1]     = 0x0070,
+       [TSU_POST2]     = 0x0074,
+       [TSU_POST3]     = 0x0078,
+       [TSU_POST4]     = 0x007c,
        [TSU_ADRH0]     = 0x0100,
 
        [TXNLCR0]       = 0x0080,
@@ -2786,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
 {
        if (sh_eth_is_rz_fast_ether(mdp)) {
                sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+               sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
+                                TSU_FWSLC);    /* Enable POST registers */
                return;
        }
 
index f658fee74f188b564f59842654de7036b82850d5..e00a669e9e0911626044a3cfe377f7c7b2251a0e 100644 (file)
@@ -1517,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
        }
 
 #if BITS_PER_LONG == 64
+       BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
        mask[0] = raw_mask[0];
        mask[1] = raw_mask[1];
 #else
+       BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
        mask[0] = raw_mask[0] & 0xffffffff;
        mask[1] = raw_mask[0] >> 32;
        mask[2] = raw_mask[1] & 0xffffffff;
-       mask[3] = raw_mask[1] >> 32;
 #endif
 }
 
index 726b80f4590660f2d08b919320deac3bb54a82ea..503a3b6dce917f3fdb043dab106b80832103ce93 100644 (file)
@@ -2275,6 +2275,13 @@ static int smc_drv_probe(struct platform_device *pdev)
        if (pd) {
                memcpy(&lp->cfg, pd, sizeof(lp->cfg));
                lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
+
+               if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
+                       dev_err(&pdev->dev,
+                               "at least one of 8-bit or 16-bit access support is required.\n");
+                       ret = -ENXIO;
+                       goto out_free_netdev;
+               }
        }
 
 #if IS_BUILTIN(CONFIG_OF)
index 1a55c7976df0fb873b09509b9dda51d81f386811..ea846546746937d76d68e49da0e8e5847a457c27 100644 (file)
 #include <linux/dmaengine.h>
 #include <linux/smc91x.h>
 
+/*
+ * Any 16-bit access is performed with two 8-bit accesses if the hardware
+ * can't do it directly. Most registers are 16-bit so those are mandatory.
+ */
+#define SMC_outw_b(x, a, r)                                            \
+       do {                                                            \
+               unsigned int __val16 = (x);                             \
+               unsigned int __reg = (r);                               \
+               SMC_outb(__val16, a, __reg);                            \
+               SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
+       } while (0)
+
+#define SMC_inw_b(a, r)                                                        \
+       ({                                                              \
+               unsigned int __val16;                                   \
+               unsigned int __reg = r;                                 \
+               __val16  = SMC_inb(a, __reg);                           \
+               __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
+               __val16;                                                \
+       })
+
 /*
  * Define your architecture specific bus configuration parameters here.
  */
 #define SMC_IO_SHIFT           (lp->io_shift)
 
 #define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_inw(a, r)          readw((a) + (r))
+#define SMC_inw(a, r)                                                  \
+       ({                                                              \
+               unsigned int __smc_r = r;                               \
+               SMC_16BIT(lp) ? readw((a) + __smc_r) :                  \
+               SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) :                  \
+               ({ BUG(); 0; });                                        \
+       })
+
 #define SMC_inl(a, r)          readl((a) + (r))
 #define SMC_outb(v, a, r)      writeb(v, (a) + (r))
+#define SMC_outw(v, a, r)                                              \
+       do {                                                            \
+               unsigned int __v = v, __smc_r = r;                      \
+               if (SMC_16BIT(lp))                                      \
+                       __SMC_outw(__v, a, __smc_r);                    \
+               else if (SMC_8BIT(lp))                                  \
+                       SMC_outw_b(__v, a, __smc_r);                    \
+               else                                                    \
+                       BUG();                                          \
+       } while (0)
+
 #define SMC_outl(v, a, r)      writel(v, (a) + (r))
+#define SMC_insb(a, r, p, l)   readsb((a) + (r), p, l)
+#define SMC_outsb(a, r, p, l)  writesb((a) + (r), p, l)
 #define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
 #define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
 #define SMC_insl(a, r, p, l)   readsl((a) + (r), p, l)
 #define SMC_IRQ_FLAGS          (-1)    /* from resource */
 
 /* We actually can't write halfwords properly if not word aligned */
-static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 {
        if ((machine_is_mainstone() || machine_is_stargate2() ||
             machine_is_pxa_idp()) && reg & 2) {
@@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
 
 #if ! SMC_CAN_USE_16BIT
 
-/*
- * Any 16-bit access is performed with two 8-bit accesses if the hardware
- * can't do it directly. Most registers are 16-bit so those are mandatory.
- */
-#define SMC_outw(x, ioaddr, reg)                                       \
-       do {                                                            \
-               unsigned int __val16 = (x);                             \
-               SMC_outb( __val16, ioaddr, reg );                       \
-               SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
-       } while (0)
-#define SMC_inw(ioaddr, reg)                                           \
-       ({                                                              \
-               unsigned int __val16;                                   \
-               __val16 =  SMC_inb( ioaddr, reg );                      \
-               __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
-               __val16;                                                \
-       })
-
+#define SMC_outw(x, ioaddr, reg)       SMC_outw_b(x, ioaddr, reg)
+#define SMC_inw(ioaddr, reg)           SMC_inw_b(ioaddr, reg)
 #define SMC_insw(a, r, p, l)           BUG()
 #define SMC_outsw(a, r, p, l)          BUG()
 
@@ -445,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
 #endif
 
 #if ! SMC_CAN_USE_8BIT
+#undef SMC_inb
 #define SMC_inb(ioaddr, reg)           ({ BUG(); 0; })
+#undef SMC_outb
 #define SMC_outb(x, ioaddr, reg)       BUG()
 #define SMC_insb(a, r, p, l)           BUG()
 #define SMC_outsb(a, r, p, l)          BUG()
index ca3134540d2d5bf33b5faebb8693ff9c8d79421f..4f8910b7db2e1be99d3f50d03d2065f3795b0c64 100644 (file)
@@ -1099,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
                goto err_out_free_bus_2;
        }
 
-       if (smsc911x_mii_probe(dev) < 0) {
-               SMSC_WARN(pdata, probe, "Error registering mii bus");
-               goto err_out_unregister_bus_3;
-       }
-
        return 0;
 
-err_out_unregister_bus_3:
-       mdiobus_unregister(pdata->mii_bus);
 err_out_free_bus_2:
        mdiobus_free(pdata->mii_bus);
 err_out_1:
@@ -1514,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
        smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
 }
 
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct smsc911x_data *pdata = netdev_priv(dev);
+       u32 intsts = smsc911x_reg_read(pdata, INT_STS);
+       u32 inten = smsc911x_reg_read(pdata, INT_EN);
+       int serviced = IRQ_NONE;
+       u32 temp;
+
+       if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+               temp = smsc911x_reg_read(pdata, INT_EN);
+               temp &= (~INT_EN_SW_INT_EN_);
+               smsc911x_reg_write(pdata, INT_EN, temp);
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
+               pdata->software_irq_signal = 1;
+               smp_wmb();
+               serviced = IRQ_HANDLED;
+       }
+
+       if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+               /* Called when there is a multicast update scheduled and
+                * it is now safe to complete the update */
+               SMSC_TRACE(pdata, intr, "RX Stop interrupt");
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
+               if (pdata->multicast_update_pending)
+                       smsc911x_rx_multicast_update_workaround(pdata);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (intsts & inten & INT_STS_TDFA_) {
+               temp = smsc911x_reg_read(pdata, FIFO_INT);
+               temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+               smsc911x_reg_write(pdata, FIFO_INT, temp);
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
+               netif_wake_queue(dev);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (unlikely(intsts & inten & INT_STS_RXE_)) {
+               SMSC_TRACE(pdata, intr, "RX Error interrupt");
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (likely(intsts & inten & INT_STS_RSFL_)) {
+               if (likely(napi_schedule_prep(&pdata->napi))) {
+                       /* Disable Rx interrupts */
+                       temp = smsc911x_reg_read(pdata, INT_EN);
+                       temp &= (~INT_EN_RSFL_EN_);
+                       smsc911x_reg_write(pdata, INT_EN, temp);
+                       /* Schedule a NAPI poll */
+                       __napi_schedule(&pdata->napi);
+               } else {
+                       SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
+               }
+               serviced = IRQ_HANDLED;
+       }
+
+       return serviced;
+}
+
 static int smsc911x_open(struct net_device *dev)
 {
        struct smsc911x_data *pdata = netdev_priv(dev);
        unsigned int timeout;
        unsigned int temp;
        unsigned int intcfg;
+       int retval;
+       int irq_flags;
 
-       /* if the phy is not yet registered, retry later*/
+       /* find and start the given phy */
        if (!dev->phydev) {
-               SMSC_WARN(pdata, hw, "phy_dev is NULL");
-               return -EAGAIN;
+               retval = smsc911x_mii_probe(dev);
+               if (retval < 0) {
+                       SMSC_WARN(pdata, probe, "Error starting phy");
+                       goto out;
+               }
        }
 
        /* Reset the LAN911x */
-       if (smsc911x_soft_reset(pdata)) {
+       retval = smsc911x_soft_reset(pdata);
+       if (retval) {
                SMSC_WARN(pdata, hw, "soft reset failed");
-               return -EIO;
+               goto mii_free_out;
        }
 
        smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1586,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev)
        pdata->software_irq_signal = 0;
        smp_wmb();
 
+       irq_flags = irq_get_trigger_type(dev->irq);
+       retval = request_irq(dev->irq, smsc911x_irqhandler,
+                            irq_flags | IRQF_SHARED, dev->name, dev);
+       if (retval) {
+               SMSC_WARN(pdata, probe,
+                         "Unable to claim requested irq: %d", dev->irq);
+               goto mii_free_out;
+       }
+
        temp = smsc911x_reg_read(pdata, INT_EN);
        temp |= INT_EN_SW_INT_EN_;
        smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1600,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev)
        if (!pdata->software_irq_signal) {
                netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
                            dev->irq);
-               return -ENODEV;
+               retval = -ENODEV;
+               goto irq_stop_out;
        }
        SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
                   dev->irq);
@@ -1646,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev)
 
        netif_start_queue(dev);
        return 0;
+
+irq_stop_out:
+       free_irq(dev->irq, dev);
+mii_free_out:
+       phy_disconnect(dev->phydev);
+       dev->phydev = NULL;
+out:
+       return retval;
 }
 
 /* Entry point for stopping the interface */
@@ -1667,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev)
        dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
        smsc911x_tx_update_txcounters(dev);
 
+       free_irq(dev->irq, dev);
+
        /* Bring the PHY down */
-       if (dev->phydev)
+       if (dev->phydev) {
                phy_stop(dev->phydev);
+               phy_disconnect(dev->phydev);
+               dev->phydev = NULL;
+       }
+       netif_carrier_off(dev);
 
        SMSC_TRACE(pdata, ifdown, "Interface stopped");
        return 0;
@@ -1811,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
        spin_unlock_irqrestore(&pdata->mac_lock, flags);
 }
 
-static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct smsc911x_data *pdata = netdev_priv(dev);
-       u32 intsts = smsc911x_reg_read(pdata, INT_STS);
-       u32 inten = smsc911x_reg_read(pdata, INT_EN);
-       int serviced = IRQ_NONE;
-       u32 temp;
-
-       if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
-               temp = smsc911x_reg_read(pdata, INT_EN);
-               temp &= (~INT_EN_SW_INT_EN_);
-               smsc911x_reg_write(pdata, INT_EN, temp);
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
-               pdata->software_irq_signal = 1;
-               smp_wmb();
-               serviced = IRQ_HANDLED;
-       }
-
-       if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
-               /* Called when there is a multicast update scheduled and
-                * it is now safe to complete the update */
-               SMSC_TRACE(pdata, intr, "RX Stop interrupt");
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
-               if (pdata->multicast_update_pending)
-                       smsc911x_rx_multicast_update_workaround(pdata);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (intsts & inten & INT_STS_TDFA_) {
-               temp = smsc911x_reg_read(pdata, FIFO_INT);
-               temp |= FIFO_INT_TX_AVAIL_LEVEL_;
-               smsc911x_reg_write(pdata, FIFO_INT, temp);
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
-               netif_wake_queue(dev);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (unlikely(intsts & inten & INT_STS_RXE_)) {
-               SMSC_TRACE(pdata, intr, "RX Error interrupt");
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (likely(intsts & inten & INT_STS_RSFL_)) {
-               if (likely(napi_schedule_prep(&pdata->napi))) {
-                       /* Disable Rx interrupts */
-                       temp = smsc911x_reg_read(pdata, INT_EN);
-                       temp &= (~INT_EN_RSFL_EN_);
-                       smsc911x_reg_write(pdata, INT_EN, temp);
-                       /* Schedule a NAPI poll */
-                       __napi_schedule(&pdata->napi);
-               } else {
-                       SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
-               }
-               serviced = IRQ_HANDLED;
-       }
-
-       return serviced;
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void smsc911x_poll_controller(struct net_device *dev)
 {
@@ -2291,16 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
        pdata = netdev_priv(dev);
        BUG_ON(!pdata);
        BUG_ON(!pdata->ioaddr);
-       BUG_ON(!dev->phydev);
+       WARN_ON(dev->phydev);
 
        SMSC_TRACE(pdata, ifdown, "Stopping driver");
 
-       phy_disconnect(dev->phydev);
        mdiobus_unregister(pdata->mii_bus);
        mdiobus_free(pdata->mii_bus);
 
        unregister_netdev(dev);
-       free_irq(dev->irq, dev);
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                           "smsc911x-memory");
        if (!res)
@@ -2385,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        struct smsc911x_data *pdata;
        struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
        struct resource *res;
-       unsigned int intcfg = 0;
-       int res_size, irq, irq_flags;
+       int res_size, irq;
        int retval;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2425,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        pdata = netdev_priv(dev);
        dev->irq = irq;
-       irq_flags = irq_get_trigger_type(irq);
        pdata->ioaddr = ioremap_nocache(res->start, res_size);
 
        pdata->dev = dev;
@@ -2472,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        if (retval < 0)
                goto out_disable_resources;
 
-       /* configure irq polarity and type before connecting isr */
-       if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
-               intcfg |= INT_CFG_IRQ_POL_;
-
-       if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
-               intcfg |= INT_CFG_IRQ_TYPE_;
-
-       smsc911x_reg_write(pdata, INT_CFG, intcfg);
-
-       /* Ensure interrupts are globally disabled before connecting ISR */
-       smsc911x_disable_irq_chip(dev);
+       netif_carrier_off(dev);
 
-       retval = request_irq(dev->irq, smsc911x_irqhandler,
-                            irq_flags | IRQF_SHARED, dev->name, dev);
+       retval = smsc911x_mii_init(pdev, dev);
        if (retval) {
-               SMSC_WARN(pdata, probe,
-                         "Unable to claim requested irq: %d", dev->irq);
+               SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
                goto out_disable_resources;
        }
 
-       netif_carrier_off(dev);
-
        retval = register_netdev(dev);
        if (retval) {
                SMSC_WARN(pdata, probe, "Error %i registering device", retval);
-               goto out_free_irq;
+               goto out_disable_resources;
        } else {
                SMSC_TRACE(pdata, probe,
                           "Network interface: \"%s\"", dev->name);
        }
 
-       retval = smsc911x_mii_init(pdev, dev);
-       if (retval) {
-               SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
-               goto out_unregister_netdev_5;
-       }
-
        spin_lock_irq(&pdata->mac_lock);
 
        /* Check if mac address has been specified when bringing interface up */
@@ -2544,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        return 0;
 
-out_unregister_netdev_5:
-       unregister_netdev(dev);
-out_free_irq:
-       free_irq(dev->irq, dev);
 out_disable_resources:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
index cbefe9e2207cac01a418eb8df24fb8c82278c502..885a5e64519d7af0062955294b083c097df45a06 100644 (file)
@@ -261,7 +261,7 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
        }
        if (mode & WAKE_UCAST) {
                pr_debug("GMAC: WOL on global unicast\n");
-               pmt |= global_unicast;
+               pmt |= power_down | global_unicast | wake_up_frame_en;
        }
 
        writel(pmt, ioaddr + GMAC_PMT);
index df5580dcdfedd22c64fb79d7b0b6687cc3996af5..51019b794be52dede1b50f8ac6d09ff1b09762fe 100644 (file)
@@ -102,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
        }
        if (mode & WAKE_UCAST) {
                pr_debug("GMAC: WOL on global unicast\n");
-               pmt |= global_unicast;
+               pmt |= power_down | global_unicast | wake_up_frame_en;
        }
 
        writel(pmt, ioaddr + GMAC_PMT);
index 9f159a775af3c030addec825df6880b1a0f80aa7..4490ebaed127e90dcf4477cc16c4b4d3215e5e5f 100644 (file)
@@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
        lp->mii_bus->read  = &dwceqos_mdio_read;
        lp->mii_bus->write = &dwceqos_mdio_write;
        lp->mii_bus->priv = lp;
-       lp->mii_bus->parent = &lp->ndev->dev;
+       lp->mii_bus->parent = &lp->pdev->dev;
 
        of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
        snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp)
                DWCEQOS_MMC_CTRL_RSTONRD);
        dwceqos_enable_mmc_interrupt(lp);
 
-       /* Enable Interrupts */
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
-                     DWCEQOS_DMA_CH0_IE_NIE |
-                     DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
-                     DWCEQOS_DMA_CH0_IE_AIE |
-                     DWCEQOS_DMA_CH0_IE_FBEE);
-
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
        dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
 
        dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
@@ -1905,6 +1899,15 @@ static int dwceqos_open(struct net_device *ndev)
        netif_start_queue(ndev);
        tasklet_enable(&lp->tx_bdreclaim_tasklet);
 
+       /* Enable Interrupts -- do this only after we enable NAPI and the
+        * tasklet.
+        */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+                     DWCEQOS_DMA_CH0_IE_NIE |
+                     DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+                     DWCEQOS_DMA_CH0_IE_AIE |
+                     DWCEQOS_DMA_CH0_IE_FBEE);
+
        return 0;
 }
 
@@ -2850,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
 
        ndev->features = ndev->hw_features;
 
-       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
-       ret = register_netdev(ndev);
-       if (ret) {
-               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_clk_dis_aper;
-       }
-
        lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
        if (IS_ERR(lp->phy_ref_clk)) {
                dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
                ret = PTR_ERR(lp->phy_ref_clk);
-               goto err_out_unregister_netdev;
+               goto err_out_clk_dis_aper;
        }
 
        ret = clk_prepare_enable(lp->phy_ref_clk);
        if (ret) {
                dev_err(&pdev->dev, "Unable to enable device clock.\n");
-               goto err_out_unregister_netdev;
+               goto err_out_clk_dis_aper;
        }
 
        lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2877,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
                ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
                if (ret < 0) {
                        dev_err(&pdev->dev, "invalid fixed-link");
-                       goto err_out_unregister_clk_notifier;
+                       goto err_out_clk_dis_phy;
                }
 
                lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2886,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
        ret = of_get_phy_mode(lp->pdev->dev.of_node);
        if (ret < 0) {
                dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        lp->phy_interface = ret;
@@ -2894,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
        ret = dwceqos_mii_init(lp);
        if (ret) {
                dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        ret = dwceqos_mii_probe(ndev);
        if (ret != 0) {
                netdev_err(ndev, "mii_probe fail.\n");
                ret = -ENXIO;
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2919,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
                        ret);
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
        dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
                 pdev->id, ndev->base_addr, ndev->irq);
@@ -2929,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
                        ndev->irq, ret);
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        if (netif_msg_probe(lp))
                netdev_dbg(ndev, "net_local@%p\n", lp);
 
+       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+                       goto err_out_clk_dis_phy;
+       }
+
        return 0;
 
-err_out_unregister_clk_notifier:
+err_out_clk_dis_phy:
        clk_disable_unprepare(lp->phy_ref_clk);
-err_out_unregister_netdev:
-       unregister_netdev(ndev);
 err_out_clk_dis_aper:
        clk_disable_unprepare(lp->apb_pclk);
 err_out_free_netdev:
index 7452b5f9d02427469e58caf516c02b6de4a44a4e..7108c68f16d3e970713e239c88126efde8b95cea 100644 (file)
@@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
                err = pci_enable_msi(pdev);
                if (err)
-                       pr_err("Can't eneble msi. error is %d\n", err);
+                       pr_err("Can't enable msi. error is %d\n", err);
                else
                        nic->irq_type = IRQ_MSI;
        } else
index c51f34693eae40440604c4090e23dcf6201cd21e..f85d605e45606f1b3e8493dfd1e1b3383005d7b4 100644 (file)
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
                netif_receive_skb(skb);
                ndev->stats.rx_bytes += len;
                ndev->stats.rx_packets++;
+               kmemleak_not_leak(new_skb);
        } else {
                ndev->stats.rx_dropped++;
                new_skb = skb;
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
                                kfree_skb(skb);
                                goto err_cleanup;
                        }
+                       kmemleak_not_leak(skb);
                }
                /* continue even if we didn't manage to submit all
                 * receive descs
index 01a77145a0fa487518b6233d5760d3a0a8866323..8fd131207ee106b8c198ed607473947cedc851c1 100644 (file)
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = {
 
 static void tsi108_timed_checker(unsigned long dev_ptr);
 
+#ifdef DEBUG
 static void dump_eth_one(struct net_device *dev)
 {
        struct tsi108_prv_data *data = netdev_priv(dev);
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev)
               TSI_READ(TSI108_EC_RXESTAT),
               TSI_READ(TSI108_EC_RXERR), data->rxpending);
 }
+#endif
 
 /* Synchronization is needed between the thread and up/down events.
  * Note that the PHY is accessed through the same registers for both
index 3cee84a24815d7fec3f089b4c4c3d65519b6e1ae..93dc10b10c0901c8974c4990eb87a22d6411e690 100644 (file)
@@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
        mac_address = of_get_mac_address(ofdev->dev.of_node);
 
-       if (mac_address)
+       if (mac_address) {
                /* Set the MAC address. */
                memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
-       else
-               dev_warn(dev, "No MAC address found\n");
+       } else {
+               dev_warn(dev, "No MAC address found, using random\n");
+               eth_hw_addr_random(ndev);
+       }
 
        /* Clear the Tx CSR's in case this is a restart */
        __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
index 467fb8b4d08389b00fbca446bae9261e2145b909..591af71eae56803d936b503d77a17460a43e38b3 100644 (file)
@@ -644,12 +644,6 @@ struct netvsc_reconfig {
        u32 event;
 };
 
-struct garp_wrk {
-       struct work_struct dwrk;
-       struct net_device *netdev;
-       struct netvsc_device *netvsc_dev;
-};
-
 /* The context of the netvsc device  */
 struct net_device_context {
        /* point back to our device context */
@@ -667,7 +661,6 @@ struct net_device_context {
 
        struct work_struct work;
        u32 msg_enable; /* debug level */
-       struct garp_wrk gwrk;
 
        struct netvsc_stats __percpu *tx_stats;
        struct netvsc_stats __percpu *rx_stats;
@@ -678,6 +671,15 @@ struct net_device_context {
 
        /* the device is going away */
        bool start_remove;
+
+       /* State to manage the associated VF interface. */
+       struct net_device *vf_netdev;
+       bool vf_inject;
+       atomic_t vf_use_cnt;
+       /* 1: allocated, serial number is valid. 0: not allocated */
+       u32 vf_alloc;
+       /* Serial number of the VF to team with */
+       u32 vf_serial;
 };
 
 /* Per netvsc device */
@@ -733,15 +735,7 @@ struct netvsc_device {
        u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
        u32 pkt_align; /* alignment bytes, e.g. 8 */
 
-       /* 1: allocated, serial number is valid. 0: not allocated */
-       u32 vf_alloc;
-       /* Serial number of the VF to team with */
-       u32 vf_serial;
        atomic_t open_cnt;
-       /* State to manage the associated VF interface. */
-       bool vf_inject;
-       struct net_device *vf_netdev;
-       atomic_t vf_use_cnt;
 };
 
 static inline struct netvsc_device *
index 20e09174ff6240bb5b1ba2eb4f13f0a4fd0d5bf3..410fb8e81376f6ac5cd2272db0e1540aa8941aad 100644 (file)
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void)
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
        atomic_set(&net_device->open_cnt, 0);
-       atomic_set(&net_device->vf_use_cnt, 0);
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 
-       net_device->vf_netdev = NULL;
-       net_device->vf_inject = false;
-
        return net_device;
 }
 
@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev,
                nvscdev->send_table[i] = tab[i];
 }
 
-static void netvsc_send_vf(struct netvsc_device *nvdev,
+static void netvsc_send_vf(struct net_device_context *net_device_ctx,
                           struct nvsp_message *nvmsg)
 {
-       nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
-       nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+       net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+       net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
 }
 
 static inline void netvsc_receive_inband(struct hv_device *hdev,
-                                        struct netvsc_device *nvdev,
-                                        struct nvsp_message *nvmsg)
+                                struct net_device_context *net_device_ctx,
+                                struct nvsp_message *nvmsg)
 {
        switch (nvmsg->hdr.msg_type) {
        case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
                break;
 
        case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
-               netvsc_send_vf(nvdev, nvmsg);
+               netvsc_send_vf(net_device_ctx, nvmsg);
                break;
        }
 }
@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
                                   struct vmpacket_descriptor *desc)
 {
        struct nvsp_message *nvmsg;
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
 
        nvmsg = (struct nvsp_message *)((unsigned long)
                desc + (desc->offset8 << 3));
@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
                break;
 
        case VM_PKT_DATA_INBAND:
-               netvsc_receive_inband(device, net_device, nvmsg);
+               netvsc_receive_inband(device, net_device_ctx, nvmsg);
                break;
 
        default:
index 41bd952cc28d37299562b2a9025e6767b39973c1..3ba29fc80d057e744eef4ae98ad60315f742c4ce 100644 (file)
@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        struct sk_buff *skb;
        struct sk_buff *vf_skb;
        struct netvsc_stats *rx_stats;
-       struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
        u32 bytes_recvd = packet->total_data_buflen;
        int ret = 0;
 
        if (!net || net->reg_state != NETREG_REGISTERED)
                return NVSP_STAT_FAIL;
 
-       if (READ_ONCE(netvsc_dev->vf_inject)) {
-               atomic_inc(&netvsc_dev->vf_use_cnt);
-               if (!READ_ONCE(netvsc_dev->vf_inject)) {
+       if (READ_ONCE(net_device_ctx->vf_inject)) {
+               atomic_inc(&net_device_ctx->vf_use_cnt);
+               if (!READ_ONCE(net_device_ctx->vf_inject)) {
                        /*
                         * We raced; just move on.
                         */
-                       atomic_dec(&netvsc_dev->vf_use_cnt);
+                       atomic_dec(&net_device_ctx->vf_use_cnt);
                        goto vf_injection_done;
                }
 
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
                 * the host). Deliver these via the VF interface
                 * in the guest.
                 */
-               vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
-                                              csum_info, *data, vlan_tci);
+               vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
+                                              packet, csum_info, *data,
+                                              vlan_tci);
                if (vf_skb != NULL) {
-                       ++netvsc_dev->vf_netdev->stats.rx_packets;
-                       netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
+                       ++net_device_ctx->vf_netdev->stats.rx_packets;
+                       net_device_ctx->vf_netdev->stats.rx_bytes +=
+                               bytes_recvd;
                        netif_receive_skb(vf_skb);
                } else {
                        ++net->stats.rx_dropped;
                        ret = NVSP_STAT_FAIL;
                }
-               atomic_dec(&netvsc_dev->vf_use_cnt);
+               atomic_dec(&net_device_ctx->vf_use_cnt);
                return ret;
        }
 
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
        free_netdev(netdev);
 }
 
-static void netvsc_notify_peers(struct work_struct *wrk)
-{
-       struct garp_wrk *gwrk;
-
-       gwrk = container_of(wrk, struct garp_wrk, dwrk);
-
-       netdev_notify_peers(gwrk->netdev);
-
-       atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
-}
-
 static struct net_device *get_netvsc_net_device(char *mac)
 {
        struct net_device *dev, *found = NULL;
@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
 
        net_device_ctx = netdev_priv(ndev);
        netvsc_dev = net_device_ctx->nvdev;
-       if (netvsc_dev == NULL)
+       if (!netvsc_dev || net_device_ctx->vf_netdev)
                return NOTIFY_DONE;
 
        netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
         * Take a reference on the module.
         */
        try_module_get(THIS_MODULE);
-       netvsc_dev->vf_netdev = vf_netdev;
+       net_device_ctx->vf_netdev = vf_netdev;
        return NOTIFY_OK;
 }
 
+static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
+{
+       net_device_ctx->vf_inject = true;
+}
+
+static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
+{
+       net_device_ctx->vf_inject = false;
+
+       /* Wait for currently active users to drain out. */
+       while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
+               udelay(50);
+}
 
 static int netvsc_vf_up(struct net_device *vf_netdev)
 {
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
        net_device_ctx = netdev_priv(ndev);
        netvsc_dev = net_device_ctx->nvdev;
 
-       if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+       if (!netvsc_dev || !net_device_ctx->vf_netdev)
                return NOTIFY_DONE;
 
        netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
-       netvsc_dev->vf_inject = true;
+       netvsc_inject_enable(net_device_ctx);
 
        /*
         * Open the device before switching data path.
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
 
        netif_carrier_off(ndev);
 
-       /*
-        * Now notify peers. We are scheduling work to
-        * notify peers; take a reference to prevent
-        * the VF interface from vanishing.
-        */
-       atomic_inc(&netvsc_dev->vf_use_cnt);
-       net_device_ctx->gwrk.netdev = vf_netdev;
-       net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
-       schedule_work(&net_device_ctx->gwrk.dwrk);
+       /* Now notify peers through VF device. */
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
 
        return NOTIFY_OK;
 }
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
        net_device_ctx = netdev_priv(ndev);
        netvsc_dev = net_device_ctx->nvdev;
 
-       if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+       if (!netvsc_dev || !net_device_ctx->vf_netdev)
                return NOTIFY_DONE;
 
        netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
-       netvsc_dev->vf_inject = false;
-       /*
-        * Wait for currently active users to
-        * drain out.
-        */
-
-       while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
-               udelay(50);
+       netvsc_inject_disable(net_device_ctx);
        netvsc_switch_datapath(ndev, false);
        netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
        rndis_filter_close(netvsc_dev);
        netif_carrier_on(ndev);
-       /*
-        * Notify peers.
-        */
-       atomic_inc(&netvsc_dev->vf_use_cnt);
-       net_device_ctx->gwrk.netdev = ndev;
-       net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
-       schedule_work(&net_device_ctx->gwrk.dwrk);
+
+       /* Now notify peers through netvsc device. */
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
 
        return NOTIFY_OK;
 }
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
 
        net_device_ctx = netdev_priv(ndev);
        netvsc_dev = net_device_ctx->nvdev;
-       if (netvsc_dev == NULL)
+       if (!netvsc_dev || !net_device_ctx->vf_netdev)
                return NOTIFY_DONE;
        netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
-
-       netvsc_dev->vf_netdev = NULL;
+       netvsc_inject_disable(net_device_ctx);
+       net_device_ctx->vf_netdev = NULL;
        module_put(THIS_MODULE);
        return NOTIFY_OK;
 }
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev,
 
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
        INIT_WORK(&net_device_ctx->work, do_set_multicast);
-       INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
 
        spin_lock_init(&net_device_ctx->lock);
        INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
 
+       atomic_set(&net_device_ctx->vf_use_cnt, 0);
+       net_device_ctx->vf_netdev = NULL;
+       net_device_ctx->vf_inject = false;
+
        net->netdev_ops = &device_ops;
 
        net->hw_features = NETVSC_HW_FEATURES;
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this,
 {
        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
 
-       /* Avoid Vlan, Bonding dev with same MAC registering as VF */
-       if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING))
+       /* Avoid Vlan dev with same MAC registering as VF */
+       if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+               return NOTIFY_DONE;
+
+       /* Avoid Bonding master dev with same MAC registering as VF */
+       if (event_dev->priv_flags & IFF_BONDING &&
+           event_dev->flags & IFF_MASTER)
                return NOTIFY_DONE;
 
        switch (event) {
index d13e6e15d7b5e394dff13bee8a1238e6576658a7..351e701eb043b340204f1f07aff81ef100c7b5d6 100644 (file)
@@ -270,6 +270,7 @@ struct macsec_dev {
        struct pcpu_secy_stats __percpu *stats;
        struct list_head secys;
        struct gro_cells gro_cells;
+       unsigned int nest_level;
 };
 
 /**
@@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
 
 #define MACSEC_FEATURES \
        (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+static struct lock_class_key macsec_netdev_addr_lock_key;
+
 static int macsec_dev_init(struct net_device *dev)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
@@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev)
        return macsec_priv(dev)->real_dev->ifindex;
 }
 
+
+static int macsec_get_nest_level(struct net_device *dev)
+{
+       return macsec_priv(dev)->nest_level;
+}
+
+
 static const struct net_device_ops macsec_netdev_ops = {
        .ndo_init               = macsec_dev_init,
        .ndo_uninit             = macsec_dev_uninit,
@@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = {
        .ndo_start_xmit         = macsec_start_xmit,
        .ndo_get_stats64        = macsec_get_stats64,
        .ndo_get_iflink         = macsec_get_iflink,
+       .ndo_get_lock_subclass  = macsec_get_nest_level,
 };
 
 static const struct device_type macsec_type = {
@@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec)
        }
 }
 
+static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct macsec_dev *macsec = macsec_priv(dev);
+       struct net_device *real_dev = macsec->real_dev;
+
+       unregister_netdevice_queue(dev, head);
+       list_del_rcu(&macsec->secys);
+       macsec_del_dev(macsec);
+       netdev_upper_dev_unlink(real_dev, dev);
+
+       macsec_generation++;
+}
+
 static void macsec_dellink(struct net_device *dev, struct list_head *head)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
        struct net_device *real_dev = macsec->real_dev;
        struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
 
-       macsec_generation++;
+       macsec_common_dellink(dev, head);
 
-       unregister_netdevice_queue(dev, head);
-       list_del_rcu(&macsec->secys);
        if (list_empty(&rxd->secys)) {
                netdev_rx_handler_unregister(real_dev);
                kfree(rxd);
        }
-
-       macsec_del_dev(macsec);
 }
 
 static int register_macsec_dev(struct net_device *real_dev,
@@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
 
        dev_hold(real_dev);
 
+       macsec->nest_level = dev_get_nest_level(real_dev) + 1;
+       netdev_lockdep_set_classes(dev);
+       lockdep_set_class_and_subclass(&dev->addr_list_lock,
+                                      &macsec_netdev_addr_lock_key,
+                                      macsec_get_nest_level(dev));
+
+       err = netdev_upper_dev_link(real_dev, dev);
+       if (err < 0)
+               goto unregister;
+
        /* need to be already registered so that ->init has run and
         * the MAC addr is set
         */
@@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
 
        if (rx_handler && sci_exists(real_dev, sci)) {
                err = -EBUSY;
-               goto unregister;
+               goto unlink;
        }
 
        err = macsec_add_dev(dev, sci, icv_len);
        if (err)
-               goto unregister;
+               goto unlink;
 
        if (data)
                macsec_changelink_common(dev, data);
@@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
 
 del_dev:
        macsec_del_dev(macsec);
+unlink:
+       netdev_upper_dev_unlink(real_dev, dev);
 unregister:
        unregister_netdevice(dev);
        return err;
@@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
 
                rxd = macsec_data_rtnl(real_dev);
                list_for_each_entry_safe(m, n, &rxd->secys, secys) {
-                       macsec_dellink(m->secy.netdev, &head);
+                       macsec_common_dellink(m->secy.netdev, &head);
                }
+
+               netdev_rx_handler_unregister(real_dev);
+               kfree(rxd);
+
                unregister_netdevice_many(&head);
                break;
        }
index cd9b53834bf60a4345880b8c6b4b02967372dba6..3234fcdea31745046cc5a7ac20f2ca676ddcb2e4 100644 (file)
@@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
-       vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
+       vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
index a38c0dac514b89b1a9e7ab8d23e2b4aa56e07565..070e3290aa6efea6fcb505cdf0860a4dce676b74 100644 (file)
@@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q)
        rtnl_unlock();
 
        synchronize_rcu();
-       skb_array_cleanup(&q->skb_array);
        sock_put(&q->sk);
 }
 
@@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk)
 static void macvtap_sock_destruct(struct sock *sk)
 {
        struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
-       struct sk_buff *skb;
 
-       while ((skb = skb_array_consume(&q->skb_array)) != NULL)
-               kfree_skb(skb);
+       skb_array_cleanup(&q->skb_array);
 }
 
 static int macvtap_open(struct inode *inode, struct file *file)
index 47a64342cc16a50b24a7db69c073b886874a05bf..b4863e4e522b3bb3c927a0f845ca5941edf09de8 100644 (file)
@@ -303,6 +303,7 @@ config MDIO_HISI_FEMAC
 
 config MDIO_XGENE
        tristate "APM X-Gene SoC MDIO bus controller"
+       depends on ARCH_XGENE || COMPILE_TEST
        help
          This module provides a driver for the MDIO busses found in the
          APM X-Gene SoC's.
index 775674808249eb56fa0eb4087745b930b0bef411..92af182951bec5cc0c6374242ec2a59570c6e40b 100644 (file)
@@ -424,10 +424,8 @@ static int xgene_mdio_remove(struct platform_device *pdev)
        mdiobus_unregister(mdio_bus);
        mdiobus_free(mdio_bus);
 
-       if (dev->of_node) {
-               if (IS_ERR(pdata->clk))
-                       clk_disable_unprepare(pdata->clk);
-       }
+       if (dev->of_node)
+               clk_disable_unprepare(pdata->clk);
 
        return 0;
 }
index 1882d9828c998c376b20c50f83e25f8e03f22b0c..885ac9cbab5a95787239e024bfbf93894add2878 100644 (file)
@@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev,
                data[i] = kszphy_get_stat(phydev, i);
 }
 
-static int kszphy_resume(struct phy_device *phydev)
+static int kszphy_suspend(struct phy_device *phydev)
 {
-       int value;
+       /* Disable PHY Interrupts */
+       if (phy_interrupt_is_valid(phydev)) {
+               phydev->interrupts = PHY_INTERRUPT_DISABLED;
+               if (phydev->drv->config_intr)
+                       phydev->drv->config_intr(phydev);
+       }
 
-       mutex_lock(&phydev->lock);
+       return genphy_suspend(phydev);
+}
 
-       value = phy_read(phydev, MII_BMCR);
-       phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
+static int kszphy_resume(struct phy_device *phydev)
+{
+       genphy_resume(phydev);
 
-       kszphy_config_intr(phydev);
-       mutex_unlock(&phydev->lock);
+       /* Enable PHY Interrupts */
+       if (phy_interrupt_is_valid(phydev)) {
+               phydev->interrupts = PHY_INTERRUPT_ENABLED;
+               if (phydev->drv->config_intr)
+                       phydev->drv->config_intr(phydev);
+       }
 
        return 0;
 }
@@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = {
        .get_sset_count = kszphy_get_sset_count,
        .get_strings    = kszphy_get_strings,
        .get_stats      = kszphy_get_stats,
-       .suspend        = genphy_suspend,
+       .suspend        = kszphy_suspend,
        .resume         = kszphy_resume,
 }, {
        .phy_id         = PHY_ID_KSZ8061,
@@ -953,7 +964,7 @@ static struct phy_driver ksphy_driver[] = {
        .get_strings    = kszphy_get_strings,
        .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
-       .resume         = genphy_resume,
+       .resume         = kszphy_resume,
 }, {
        .phy_id         = PHY_ID_KSZ8873MLL,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
index c5dc2c363f96fc296120c29eb7c700e25f619e15..c6f66832a1a641e387fe093b4e6aa61115e6abfa 100644 (file)
@@ -722,8 +722,10 @@ phy_err:
 int phy_start_interrupts(struct phy_device *phydev)
 {
        atomic_set(&phydev->irq_disable, 0);
-       if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
-                       phydev) < 0) {
+       if (request_irq(phydev->irq, phy_interrupt,
+                               IRQF_SHARED,
+                               "phy_interrupt",
+                               phydev) < 0) {
                pr_warn("%s: Can't get IRQ %d (PHY)\n",
                        phydev->mdio.bus->name, phydev->irq);
                phydev->irq = PHY_POLL;
index cdb19b385d42c878e59bf9bf345dd8bfdfd53eb3..b228bea7931f3c836e9681dfd9376e2b38b8099f 100644 (file)
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/filter.h>
 #include <linux/if_team.h>
 
+static rx_handler_result_t lb_receive(struct team *team, struct team_port *port,
+                                     struct sk_buff *skb)
+{
+       if (unlikely(skb->protocol == htons(ETH_P_SLOW))) {
+               /* LACPDU packets should go to exact delivery */
+               const unsigned char *dest = eth_hdr(skb)->h_dest;
+
+               if (is_link_local_ether_addr(dest) && dest[5] == 0x02)
+                       return RX_HANDLER_EXACT;
+       }
+       return RX_HANDLER_ANOTHER;
+}
+
 struct lb_priv;
 
 typedef struct team_port *lb_select_tx_port_func_t(struct team *,
@@ -652,6 +666,7 @@ static const struct team_mode_ops lb_mode_ops = {
        .port_enter             = lb_port_enter,
        .port_leave             = lb_port_leave,
        .port_disabled          = lb_port_disabled,
+       .receive                = lb_receive,
        .transmit               = lb_transmit,
 };
 
index 9c8b5bc2b9d8e851c53cc4d311c245654ebe75e2..6f9df375c5d411dc52510b95ec677f79f1996e4e 100644 (file)
@@ -894,11 +894,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
                goto drop;
 
-       if (skb->sk && sk_fullsock(skb->sk)) {
-               sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
-                                 &skb_shinfo(skb)->tx_flags);
-               sw_tx_timestamp(skb);
-       }
+       skb_tx_timestamp(skb);
 
        /* Orphan the skb - required as we might hang on to it
         * for indefinite time.
index 770212baaf05ddb032967730acc6b0ff46ec8f40..528b9c9c4e60bc2e397b3a1c489a5639d986af9a 100644 (file)
@@ -1009,6 +1009,7 @@ static int kaweth_probe(
        struct net_device *netdev;
        const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
        int result = 0;
+       int rv = -EIO;
 
        dev_dbg(dev,
                "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
@@ -1029,6 +1030,7 @@ static int kaweth_probe(
        kaweth = netdev_priv(netdev);
        kaweth->dev = udev;
        kaweth->net = netdev;
+       kaweth->intf = intf;
 
        spin_lock_init(&kaweth->device_lock);
        init_waitqueue_head(&kaweth->term_wait);
@@ -1048,6 +1050,10 @@ static int kaweth_probe(
                /* Download the firmware */
                dev_info(dev, "Downloading firmware...\n");
                kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
+               if (!kaweth->firmware_buf) {
+                       rv = -ENOMEM;
+                       goto err_free_netdev;
+               }
                if ((result = kaweth_download_firmware(kaweth,
                                                      "kaweth/new_code.bin",
                                                      100,
@@ -1139,8 +1145,6 @@ err_fw:
 
        dev_dbg(dev, "Initializing net device.\n");
 
-       kaweth->intf = intf;
-
        kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!kaweth->tx_urb)
                goto err_free_netdev;
@@ -1204,7 +1208,7 @@ err_only_tx:
 err_free_netdev:
        free_netdev(netdev);
 
-       return -EIO;
+       return rv;
 }
 
 /****************************************************************
index f41a8ad4740e28ee36d046804b2e987c65805935..c254248863d41b3c6263d96906720f9e2204aa28 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "5"
+#define NET_VERSION            "6"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -2552,6 +2552,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable)
        }
 }
 
+static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
+{
+       ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
+       ocp_reg_write(tp, OCP_EEE_DATA, reg);
+       ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
+}
+
+static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
+{
+       u16 data;
+
+       r8152_mmd_indirect(tp, dev, reg);
+       data = ocp_reg_read(tp, OCP_EEE_DATA);
+       ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+
+       return data;
+}
+
+static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
+{
+       r8152_mmd_indirect(tp, dev, reg);
+       ocp_reg_write(tp, OCP_EEE_DATA, data);
+       ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+}
+
+static void r8152_eee_en(struct r8152 *tp, bool enable)
+{
+       u16 config1, config2, config3;
+       u32 ocp_data;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+       config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
+       config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
+       config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
+
+       if (enable) {
+               ocp_data |= EEE_RX_EN | EEE_TX_EN;
+               config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
+               config1 |= sd_rise_time(1);
+               config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
+               config3 |= fast_snr(42);
+       } else {
+               ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+               config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
+                            RX_QUIET_EN);
+               config1 |= sd_rise_time(7);
+               config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
+               config3 |= fast_snr(511);
+       }
+
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+       ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
+       ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
+       ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
+}
+
+static void r8152b_enable_eee(struct r8152 *tp)
+{
+       r8152_eee_en(tp, true);
+       r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
+}
+
+static void r8152b_enable_fc(struct r8152 *tp)
+{
+       u16 anar;
+
+       anar = r8152_mdio_read(tp, MII_ADVERTISE);
+       anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+       r8152_mdio_write(tp, MII_ADVERTISE, anar);
+}
+
 static void rtl8152_disable(struct r8152 *tp)
 {
        r8152_aldps_en(tp, false);
@@ -2561,13 +2632,9 @@ static void rtl8152_disable(struct r8152 *tp)
 
 static void r8152b_hw_phy_cfg(struct r8152 *tp)
 {
-       u16 data;
-
-       data = r8152_mdio_read(tp, MII_BMCR);
-       if (data & BMCR_PDOWN) {
-               data &= ~BMCR_PDOWN;
-               r8152_mdio_write(tp, MII_BMCR, data);
-       }
+       r8152b_enable_eee(tp);
+       r8152_aldps_en(tp, true);
+       r8152b_enable_fc(tp);
 
        set_bit(PHY_RESET, &tp->flags);
 }
@@ -2701,20 +2768,52 @@ static void r8152b_enter_oob(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
 }
 
+static void r8153_aldps_en(struct r8152 *tp, bool enable)
+{
+       u16 data;
+
+       data = ocp_reg_read(tp, OCP_POWER_CFG);
+       if (enable) {
+               data |= EN_ALDPS;
+               ocp_reg_write(tp, OCP_POWER_CFG, data);
+       } else {
+               data &= ~EN_ALDPS;
+               ocp_reg_write(tp, OCP_POWER_CFG, data);
+               msleep(20);
+       }
+}
+
+static void r8153_eee_en(struct r8152 *tp, bool enable)
+{
+       u32 ocp_data;
+       u16 config;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+       config = ocp_reg_read(tp, OCP_EEE_CFG);
+
+       if (enable) {
+               ocp_data |= EEE_RX_EN | EEE_TX_EN;
+               config |= EEE10_EN;
+       } else {
+               ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+               config &= ~EEE10_EN;
+       }
+
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+       ocp_reg_write(tp, OCP_EEE_CFG, config);
+}
+
 static void r8153_hw_phy_cfg(struct r8152 *tp)
 {
        u32 ocp_data;
        u16 data;
 
-       if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
-           tp->version == RTL_VER_05)
-               ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+       /* disable ALDPS before updating the PHY parameters */
+       r8153_aldps_en(tp, false);
 
-       data = r8152_mdio_read(tp, MII_BMCR);
-       if (data & BMCR_PDOWN) {
-               data &= ~BMCR_PDOWN;
-               r8152_mdio_write(tp, MII_BMCR, data);
-       }
+       /* disable EEE before updating the PHY parameters */
+       r8153_eee_en(tp, false);
+       ocp_reg_write(tp, OCP_EEE_ADV, 0);
 
        if (tp->version == RTL_VER_03) {
                data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -2745,6 +2844,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
        sram_write(tp, SRAM_10M_AMP1, 0x00af);
        sram_write(tp, SRAM_10M_AMP2, 0x0208);
 
+       r8153_eee_en(tp, true);
+       ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+
+       r8153_aldps_en(tp, true);
+       r8152b_enable_fc(tp);
+
        set_bit(PHY_RESET, &tp->flags);
 }
 
@@ -2866,21 +2971,6 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
 }
 
-static void r8153_aldps_en(struct r8152 *tp, bool enable)
-{
-       u16 data;
-
-       data = ocp_reg_read(tp, OCP_POWER_CFG);
-       if (enable) {
-               data |= EN_ALDPS;
-               ocp_reg_write(tp, OCP_POWER_CFG, data);
-       } else {
-               data &= ~EN_ALDPS;
-               ocp_reg_write(tp, OCP_POWER_CFG, data);
-               msleep(20);
-       }
-}
-
 static void rtl8153_disable(struct r8152 *tp)
 {
        r8153_aldps_en(tp, false);
@@ -3246,103 +3336,6 @@ static int rtl8152_close(struct net_device *netdev)
        return res;
 }
 
-static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
-{
-       ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
-       ocp_reg_write(tp, OCP_EEE_DATA, reg);
-       ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
-}
-
-static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
-{
-       u16 data;
-
-       r8152_mmd_indirect(tp, dev, reg);
-       data = ocp_reg_read(tp, OCP_EEE_DATA);
-       ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-
-       return data;
-}
-
-static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
-{
-       r8152_mmd_indirect(tp, dev, reg);
-       ocp_reg_write(tp, OCP_EEE_DATA, data);
-       ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-}
-
-static void r8152_eee_en(struct r8152 *tp, bool enable)
-{
-       u16 config1, config2, config3;
-       u32 ocp_data;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
-       config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
-       config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
-       config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
-
-       if (enable) {
-               ocp_data |= EEE_RX_EN | EEE_TX_EN;
-               config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
-               config1 |= sd_rise_time(1);
-               config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
-               config3 |= fast_snr(42);
-       } else {
-               ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
-               config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
-                            RX_QUIET_EN);
-               config1 |= sd_rise_time(7);
-               config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
-               config3 |= fast_snr(511);
-       }
-
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
-       ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
-       ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
-       ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
-}
-
-static void r8152b_enable_eee(struct r8152 *tp)
-{
-       r8152_eee_en(tp, true);
-       r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
-}
-
-static void r8153_eee_en(struct r8152 *tp, bool enable)
-{
-       u32 ocp_data;
-       u16 config;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
-       config = ocp_reg_read(tp, OCP_EEE_CFG);
-
-       if (enable) {
-               ocp_data |= EEE_RX_EN | EEE_TX_EN;
-               config |= EEE10_EN;
-       } else {
-               ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
-               config &= ~EEE10_EN;
-       }
-
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
-       ocp_reg_write(tp, OCP_EEE_CFG, config);
-}
-
-static void r8153_enable_eee(struct r8152 *tp)
-{
-       r8153_eee_en(tp, true);
-       ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
-}
-
-static void r8152b_enable_fc(struct r8152 *tp)
-{
-       u16 anar;
-
-       anar = r8152_mdio_read(tp, MII_ADVERTISE);
-       anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-       r8152_mdio_write(tp, MII_ADVERTISE, anar);
-}
-
 static void rtl_tally_reset(struct r8152 *tp)
 {
        u32 ocp_data;
@@ -3355,10 +3348,17 @@ static void rtl_tally_reset(struct r8152 *tp)
 static void r8152b_init(struct r8152 *tp)
 {
        u32 ocp_data;
+       u16 data;
 
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       data = r8152_mdio_read(tp, MII_BMCR);
+       if (data & BMCR_PDOWN) {
+               data &= ~BMCR_PDOWN;
+               r8152_mdio_write(tp, MII_BMCR, data);
+       }
+
        r8152_aldps_en(tp, false);
 
        if (tp->version == RTL_VER_01) {
@@ -3380,9 +3380,6 @@ static void r8152b_init(struct r8152 *tp)
                   SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
 
-       r8152b_enable_eee(tp);
-       r8152_aldps_en(tp, true);
-       r8152b_enable_fc(tp);
        rtl_tally_reset(tp);
 
        /* enable rx aggregation */
@@ -3394,12 +3391,12 @@ static void r8152b_init(struct r8152 *tp)
 static void r8153_init(struct r8152 *tp)
 {
        u32 ocp_data;
+       u16 data;
        int i;
 
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
-       r8153_aldps_en(tp, false);
        r8153_u1u2en(tp, false);
 
        for (i = 0; i < 500; i++) {
@@ -3416,6 +3413,23 @@ static void r8153_init(struct r8152 *tp)
                msleep(20);
        }
 
+       if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+           tp->version == RTL_VER_05)
+               ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
+       data = r8152_mdio_read(tp, MII_BMCR);
+       if (data & BMCR_PDOWN) {
+               data &= ~BMCR_PDOWN;
+               r8152_mdio_write(tp, MII_BMCR, data);
+       }
+
+       for (i = 0; i < 500; i++) {
+               ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+               if (ocp_data == PHY_STAT_LAN_ON)
+                       break;
+               msleep(20);
+       }
+
        usb_disable_lpm(tp->udev);
        r8153_u2p3en(tp, false);
 
@@ -3483,9 +3497,6 @@ static void r8153_init(struct r8152 *tp)
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
 
-       r8153_enable_eee(tp);
-       r8153_aldps_en(tp, true);
-       r8152b_enable_fc(tp);
        rtl_tally_reset(tp);
        r8153_u2p3en(tp, true);
 }
index c68fe495d3f9bc86caff19f69257ef48b93ad49b..4244b9d4418e15e2cce0772ca8a8f3e07da1c64e 100644 (file)
@@ -914,7 +914,9 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 {
        struct Vmxnet3_TxDataDesc *tdd;
 
-       tdd = tq->data_ring.base + tq->tx_ring.next2fill;
+       tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
+                                           tq->tx_ring.next2fill *
+                                           tq->txdata_desc_size);
 
        memcpy(tdd->data, skb->data, ctx->copy_size);
        netdev_dbg(adapter->netdev,
index 74fc03072b878d456881a9664cc5e13fa45d6a10..7dc37a090549f7ff949e7e5c4a8275128689b980 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.9.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.a.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040900
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040a00
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index da4e3d6632f647f95b6fdbc0d22abf098afdbe01..6e65832051d6df2f40d8fb280b8dd23716ffbcdf 100644 (file)
@@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
        fl4.flowi4_mark = skb->mark;
        fl4.flowi4_proto = IPPROTO_UDP;
        fl4.daddr = daddr;
-       fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+       fl4.saddr = *saddr;
 
        rt = ip_route_output_key(vxlan->net, &fl4);
        if (!IS_ERR(rt)) {
@@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.daddr = *daddr;
-       fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
+       fl6.saddr = *saddr;
        fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
        fl6.flowi6_mark = skb->mark;
        fl6.flowi6_proto = IPPROTO_UDP;
@@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
        union vxlan_addr *dst;
-       union vxlan_addr remote_ip;
+       union vxlan_addr remote_ip, local_ip;
+       union vxlan_addr *src;
        struct vxlan_metadata _md;
        struct vxlan_metadata *md = &_md;
        __be16 src_port = 0, dst_port;
@@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
                vni = rdst->remote_vni;
                dst = &rdst->remote_ip;
+               src = &vxlan->cfg.saddr;
                dst_cache = &rdst->dst_cache;
        } else {
                if (!info) {
@@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
                vni = vxlan_tun_id_to_vni(info->key.tun_id);
                remote_ip.sa.sa_family = ip_tunnel_info_af(info);
-               if (remote_ip.sa.sa_family == AF_INET)
+               if (remote_ip.sa.sa_family == AF_INET) {
                        remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
-               else
+                       local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
+               } else {
                        remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
+                       local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
+               }
                dst = &remote_ip;
+               src = &local_ip;
                dst_cache = &info->dst_cache;
        }
 
@@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        }
 
        if (dst->sa.sa_family == AF_INET) {
-               __be32 saddr;
-
                if (!vxlan->vn4_sock)
                        goto drop;
                sk = vxlan->vn4_sock->sock->sk;
 
                rt = vxlan_get_route(vxlan, skb,
                                     rdst ? rdst->remote_ifindex : 0, tos,
-                                    dst->sin.sin_addr.s_addr, &saddr,
+                                    dst->sin.sin_addr.s_addr,
+                                    &src->sin.sin_addr.s_addr,
                                     dst_cache, info);
                if (IS_ERR(rt)) {
                        netdev_dbg(dev, "no route to %pI4\n",
@@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                /* Bypass encapsulation if the destination is local */
-               if (rt->rt_flags & RTCF_LOCAL &&
+               if (!info && rt->rt_flags & RTCF_LOCAL &&
                    !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
                        struct vxlan_dev *dst_vxlan;
 
@@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                if (err < 0)
                        goto xmit_tx_error;
 
-               udp_tunnel_xmit_skb(rt, sk, skb, saddr,
+               udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
                                    dst->sin.sin_addr.s_addr, tos, ttl, df,
                                    src_port, dst_port, xnet, !udp_sum);
 #if IS_ENABLED(CONFIG_IPV6)
        } else {
                struct dst_entry *ndst;
-               struct in6_addr saddr;
                u32 rt6i_flags;
 
                if (!vxlan->vn6_sock)
@@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                ndst = vxlan6_get_route(vxlan, skb,
                                        rdst ? rdst->remote_ifindex : 0, tos,
-                                       label, &dst->sin6.sin6_addr, &saddr,
+                                       label, &dst->sin6.sin6_addr,
+                                       &src->sin6.sin6_addr,
                                        dst_cache, info);
                if (IS_ERR(ndst)) {
                        netdev_dbg(dev, "no route to %pI6\n",
@@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                /* Bypass encapsulation if the destination is local */
                rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
-               if (rt6i_flags & RTF_LOCAL &&
+               if (!info && rt6i_flags & RTF_LOCAL &&
                    !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
                        struct vxlan_dev *dst_vxlan;
 
@@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        return;
                }
                udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
-                                    &saddr, &dst->sin6.sin6_addr, tos, ttl,
+                                    &src->sin6.sin6_addr,
+                                    &dst->sin6.sin6_addr, tos, ttl,
                                     label, src_port, dst_port, !udp_sum);
 #endif
        }
@@ -2776,14 +2782,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        struct net_device *lowerdev = NULL;
 
        if (conf->flags & VXLAN_F_GPE) {
-               if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
-                       return -EINVAL;
                /* For now, allow GPE only together with COLLECT_METADATA.
                 * This can be relaxed later; in such case, the other side
                 * of the PtP link will have to be provided.
                 */
-               if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+               if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
+                   !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+                       pr_info("unsupported combination of extensions\n");
                        return -EINVAL;
+               }
 
                vxlan_raw_setup(dev);
        } else {
@@ -2836,6 +2843,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                        dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 
                needed_headroom = lowerdev->hard_header_len;
+       } else if (vxlan_addr_multicast(&dst->remote_ip)) {
+               pr_info("multicast destination requires interface to be specified\n");
+               return -EINVAL;
        }
 
        if (conf->mtu) {
@@ -2868,8 +2878,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                     tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
                    tmp->cfg.dst_port == vxlan->cfg.dst_port &&
                    (tmp->flags & VXLAN_F_RCV_FLAGS) ==
-                   (vxlan->flags & VXLAN_F_RCV_FLAGS))
-               return -EEXIST;
+                   (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
+                       pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
+                       return -EEXIST;
+               }
        }
 
        dev->ethtool_ops = &vxlan_ethtool_ops;
@@ -2903,7 +2915,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
        struct vxlan_config conf;
-       int err;
 
        memset(&conf, 0, sizeof(conf));
 
@@ -3012,26 +3023,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (tb[IFLA_MTU])
                conf.mtu = nla_get_u32(tb[IFLA_MTU]);
 
-       err = vxlan_dev_configure(src_net, dev, &conf);
-       switch (err) {
-       case -ENODEV:
-               pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
-               break;
-
-       case -EPERM:
-               pr_info("IPv6 is disabled via sysctl\n");
-               break;
-
-       case -EEXIST:
-               pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
-               break;
-
-       case -EINVAL:
-               pr_info("unsupported combination of extensions\n");
-               break;
-       }
-
-       return err;
+       return vxlan_dev_configure(src_net, dev, &conf);
 }
 
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
index 78db5d679f1980188c788163d4f54fa283fc83f2..24c8d65bcf34340cfda9ae59f65d6a5ec33f1cc3 100644 (file)
@@ -1525,7 +1525,7 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
-       static struct ieee80211_rx_status rx_status;
+       struct ieee80211_rx_status *rx_status = &htt->rx_status;
        struct sk_buff_head amsdu;
        int ret;
 
@@ -1549,11 +1549,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
                return ret;
        }
 
-       ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+       ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
        ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
-       ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
-       ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
-       ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+       ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+       ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+       ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
 
        return 0;
 }
index 9a22c478dd1b1dd28fd5139ede1b7bed4063ec42..07933c51a8508ef8cc7b4eef85309e3e2cdd8991 100644 (file)
@@ -3162,7 +3162,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
                break;
        case QCA9887_1_0_DEVICE_ID:
-               dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
                hw_rev = ATH10K_HW_QCA9887;
                pci_ps = false;
                pci_soft_reset = ath10k_pci_warm_reset;
index d1d0c06d627cb058df27e90a69bec0209e94c980..14b13f07cd1fa87658f22f8352ff6bc32b3935ac 100644 (file)
@@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                return -EINVAL;
        }
 
+       ath9k_gpio_cap_init(ah);
+
        if (AR_SREV_9485(ah) ||
            AR_SREV_9285(ah) ||
            AR_SREV_9330(ah) ||
@@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        else
                pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
 
-       ath9k_gpio_cap_init(ah);
-
        if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
                pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
        else
index a394622c9022c09488c59a85b878021c7c7b11a0..7cb65c303f8d95bb435f74d8980bcd94c54be90e 100644 (file)
@@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
        if (!ath_complete_reset(sc, false))
                ah->reset_power_on = false;
 
-       if (ah->led_pin >= 0)
+       if (ah->led_pin >= 0) {
                ath9k_hw_set_gpio(ah, ah->led_pin,
                                  (ah->config.led_active_high) ? 1 : 0);
+               ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+       }
 
        /*
         * Reset key cache to sane defaults (all entries cleared) instead of
@@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
        spin_lock_bh(&sc->sc_pcu_lock);
 
-       if (ah->led_pin >= 0)
+       if (ah->led_pin >= 0) {
                ath9k_hw_set_gpio(ah, ah->led_pin,
                                  (ah->config.led_active_high) ? 0 : 1);
+               ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
+       }
 
        ath_prepare_reset(sc);
 
@@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
                bool changed = (iter_data.primary_sta != ctx->primary_sta);
 
                if (iter_data.primary_sta) {
+                       iter_data.primary_beacon_vif = iter_data.primary_sta;
                        iter_data.beacons = true;
                        ath9k_set_assoc_state(sc, iter_data.primary_sta,
                                              changed);
@@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
 
-       if (old_state == IEEE80211_STA_AUTH &&
-           new_state == IEEE80211_STA_ASSOC) {
+       if (old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE) {
                ret = ath9k_sta_add(hw, vif, sta);
                ath_dbg(common, CONFIG,
                        "Add station: %pM\n", sta->addr);
-       } else if (old_state == IEEE80211_STA_ASSOC &&
-                  new_state == IEEE80211_STA_AUTH) {
+       } else if (old_state == IEEE80211_STA_NONE &&
+                  new_state == IEEE80211_STA_NOTEXIST) {
                ret = ath9k_sta_remove(hw, vif, sta);
                ath_dbg(common, CONFIG,
                        "Remove station: %pM\n", sta->addr);
index 2628d5e12c6424b657b7dd9138c277ef0a9d5d47..b8aec5e5ef93e28dfb70c95557d7388fd9b67bef 100644 (file)
@@ -4527,7 +4527,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                                (u8 *)&settings->beacon.head[ie_offset],
                                settings->beacon.head_len - ie_offset,
                                WLAN_EID_SSID);
-               if (!ssid_ie)
+               if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
                        return -EINVAL;
 
                memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
@@ -5635,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
                  ifevent->action, ifevent->flags, ifevent->ifidx,
                  ifevent->bsscfgidx);
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        event->action = ifevent->action;
        vif = event->vif;
 
@@ -5643,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
        case BRCMF_E_IF_ADD:
                /* waiting process may have timed out */
                if (!cfg->vif_event.vif) {
-                       mutex_unlock(&event->vif_event_lock);
+                       spin_unlock(&event->vif_event_lock);
                        return -EBADF;
                }
 
@@ -5654,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
                        ifp->ndev->ieee80211_ptr = &vif->wdev;
                        SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
                }
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                wake_up(&event->vif_wq);
                return 0;
 
        case BRCMF_E_IF_DEL:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                /* event may not be upon user request */
                if (brcmf_cfg80211_vif_event_armed(cfg))
                        wake_up(&event->vif_wq);
                return 0;
 
        case BRCMF_E_IF_CHANGE:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                wake_up(&event->vif_wq);
                return 0;
 
        default:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                break;
        }
        return -EINVAL;
@@ -5792,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
 static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
 {
        init_waitqueue_head(&event->vif_wq);
-       mutex_init(&event->vif_event_lock);
+       spin_lock_init(&event->vif_event_lock);
 }
 
 static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
@@ -6691,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
 {
        u8 evt_action;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        evt_action = event->action;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
        return evt_action == action;
 }
 
@@ -6702,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
 {
        struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        event->vif = vif;
        event->action = 0;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
 }
 
 bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
@@ -6713,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
        struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
        bool armed;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        armed = event->vif != NULL;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
 
        return armed;
 }
index 7d77f869b7f13be772fc039b5be3a6452dabef25..8889832c17e0dae310029981d6778fdf08ae08dd 100644 (file)
@@ -227,7 +227,7 @@ struct escan_info {
  */
 struct brcmf_cfg80211_vif_event {
        wait_queue_head_t vif_wq;
-       struct mutex vif_event_lock;
+       spinlock_t vif_event_lock;
        u8 action;
        struct brcmf_cfg80211_vif *vif;
 };
index 8d16f0204985ad3997c00dd9ba479b890d213393..65e8c8766441b718e886e3aa07095ce01fbb8061 100644 (file)
@@ -743,7 +743,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
                 * serious troublesome side effects. The p2p module will clean
                 * up the ifp if needed.
                 */
-               brcmf_p2p_ifp_removed(ifp);
+               brcmf_p2p_ifp_removed(ifp, rtnl_locked);
                kfree(ifp);
        }
 }
index 66f942f7448eabb2297fe34a56d4b2eb22c8c8cd..de19c7c92bc6c095b3b111abfc280fe228e588ac 100644 (file)
@@ -2297,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
        return err;
 }
 
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
 {
        struct brcmf_cfg80211_info *cfg;
        struct brcmf_cfg80211_vif *vif;
@@ -2306,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
        vif = ifp->vif;
        cfg = wdev_to_cfg(&vif->wdev);
        cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-       rtnl_lock();
+       if (!rtnl_locked)
+               rtnl_lock();
        cfg80211_unregister_wdev(&vif->wdev);
-       rtnl_unlock();
+       if (!rtnl_locked)
+               rtnl_unlock();
        brcmf_free_vif(vif);
 }
 
index a3bd18c2360b5f4a5da48ce0bca549b0eeced334..8ce9447533ef8fa519caab0f985949487346464d 100644 (file)
@@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
 int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
                       enum brcmf_fil_p2p_if_types if_type);
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked);
 int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
 void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
 int brcmf_p2p_scan_prep(struct wiphy *wiphy,
index 1abcabb9b6cd78d675c5a0585eb4ee59d98c3b1e..46b52bf705fb438bb40aabbe6fd51698bba982eb 100644 (file)
@@ -960,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
        }
 
        mvm->fw_dbg_conf = conf_id;
-       return ret;
+
+       return 0;
 }
index f7dff7612c9c58e4ffb84f7aaa427245f5c77ad0..e9f1be9da7d41613b7a124111d1af85064625198 100644 (file)
@@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
 {
        u32 trig_vif = le32_to_cpu(trig->vif_type);
 
-       return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
+       return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
+              ieee80211_vif_type_p2p(vif) == trig_vif;
 }
 
 static inline bool
index 6d6064534d590c76f71444bfa7f3b0c9163ad003..5dd77e33661727338aca43e3ad93802ef6db0bc7 100644 (file)
@@ -624,6 +624,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_LOW_PRIORITY_SCAN |
                               NL80211_FEATURE_P2P_GO_OPPPS |
+                              NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
                               NL80211_FEATURE_DYNAMIC_SMPS |
                               NL80211_FEATURE_STATIC_SMPS |
                               NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
index b4fc86d5d7ef1252cde57cfb872c390d698b7d16..6a615bb73042504b0c7912805ce36cb70690b44b 100644 (file)
@@ -467,6 +467,8 @@ struct iwl_mvm_vif {
 static inline struct iwl_mvm_vif *
 iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
 {
+       if (!vif)
+               return NULL;
        return (void *)vif->drv_priv;
 }
 
index c6585ab48df3b5e15698bdfbfbaecc89333cf782..b3a87a31de30ebb21877b9ebeeababe56b5fff5a 100644 (file)
@@ -513,6 +513,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
        int queue;
 
+       /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
+        * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
+        * queue. STATION (HS2.0) uses the auxiliary context of the FW,
+        * and hence needs to be sent on the aux queue
+        */
+       if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+           skb_info->control.vif->type == NL80211_IFTYPE_STATION)
+               IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+
        memcpy(&info, skb->cb, sizeof(info));
 
        if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
@@ -526,16 +535,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        /* This holds the amsdu headers length */
        skb_info->driver_data[0] = (void *)(uintptr_t)0;
 
-       /*
-        * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
-        * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
-        * queue. STATION (HS2.0) uses the auxiliary context of the FW,
-        * and hence needs to be sent on the aux queue
-        */
-       if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
-           info.control.vif->type == NL80211_IFTYPE_STATION)
-               IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
-
        queue = info.hw_queue;
 
        /*
index dc49c3de1f25db6945942449aaa62580a581e51e..c47d6366875d06c5c0c3ceb2af29f31bd55e5158 100644 (file)
@@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
 
        do {
                /* Check if AMSDU can accommodate this MSDU */
-               if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
+               if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
+                   adapter->tx_buf_size)
                        break;
 
                skb_src = skb_dequeue(&pra_list->skb_head);
index 1d689169da768167cea253a396dff3771220c5a1..9e1f2d9c98659279d85c914247c7b7f95a62b8c7 100644 (file)
@@ -5700,10 +5700,11 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta)
+static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
+                                            struct ieee80211_sta *sta)
 {
        struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
-       struct wl1271 *wl = wl_sta->wl;
+       struct wl1271 *wl = hw->priv;
        u8 hlid = wl_sta->hlid;
 
        /* return in units of Kbps */
index 6a31f2610c2378185c4274cd9828cd5117ca61ea..daf4c78671024a24ea38d2037d7d955d83df3fe6 100644 (file)
@@ -271,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev,
        be->dev = dev;
        dev_set_drvdata(&dev->dev, be);
 
+       be->state = XenbusStateInitialising;
+       err = xenbus_switch_state(dev, XenbusStateInitialising);
+       if (err)
+               goto fail;
+
        sg = 1;
 
        do {
@@ -383,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev,
 
        be->hotplug_script = script;
 
-       err = xenbus_switch_state(dev, XenbusStateInitWait);
-       if (err)
-               goto fail;
-
-       be->state = XenbusStateInitWait;
 
        /* This kicks hotplug scripts, so do it immediately. */
        err = backend_create_xenvif(be);
@@ -492,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be,
 
 /* Handle backend state transitions:
  *
- * The backend state starts in InitWait and the following transitions are
+ * The backend state starts in Initialising and the following transitions are
  * allowed.
  *
- * InitWait -> Connected
- *
- *    ^    \         |
- *    |     \        |
- *    |      \       |
- *    |       \      |
- *    |        \     |
- *    |         \    |
- *    |          V   V
+ * Initialising -> InitWait -> Connected
+ *          \
+ *           \        ^    \         |
+ *            \       |     \        |
+ *             \      |      \       |
+ *              \     |       \      |
+ *               \    |        \     |
+ *                \   |         \    |
+ *                 V  |          V   V
  *
- *  Closed  <-> Closing
+ *                  Closed  <-> Closing
  *
  * The state argument specifies the eventual state of the backend and the
  * function transitions to that state via the shortest path.
@@ -515,6 +515,20 @@ static void set_backend_state(struct backend_info *be,
 {
        while (be->state != state) {
                switch (be->state) {
+               case XenbusStateInitialising:
+                       switch (state) {
+                       case XenbusStateInitWait:
+                       case XenbusStateConnected:
+                       case XenbusStateClosing:
+                               backend_switch_state(be, XenbusStateInitWait);
+                               break;
+                       case XenbusStateClosed:
+                               backend_switch_state(be, XenbusStateClosed);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
                case XenbusStateClosed:
                        switch (state) {
                        case XenbusStateInitWait:
index 88e91666f145f0e7b0e95bf560ecc5e78f2ada22..368795aad5c974dbb59b1a43825b76c3e443acd2 100644 (file)
@@ -1269,6 +1269,7 @@ static int btt_blk_init(struct btt *btt)
                }
        }
        set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
+       btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
        revalidate_disk(btt->btt_disk);
 
        return 0;
index 3fa7919f94a8785860afd3487d803f5b3010acd9..97dd2925ed6e95f1f06ffa6f4a0b5643acd4c07a 100644 (file)
@@ -140,10 +140,30 @@ static ssize_t namespace_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(namespace);
 
+static ssize_t size_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nd_btt *nd_btt = to_nd_btt(dev);
+       ssize_t rc;
+
+       device_lock(dev);
+       if (dev->driver)
+               rc = sprintf(buf, "%llu\n", nd_btt->size);
+       else {
+               /* no size to convey if the btt instance is disabled */
+               rc = -ENXIO;
+       }
+       device_unlock(dev);
+
+       return rc;
+}
+static DEVICE_ATTR_RO(size);
+
 static struct attribute *nd_btt_attributes[] = {
        &dev_attr_sector_size.attr,
        &dev_attr_namespace.attr,
        &dev_attr_uuid.attr,
+       &dev_attr_size.attr,
        NULL,
 };
 
index 458daf9273362a19cc26d6a4f2c9113764f493b8..935866fe5ec2ea7c47d3f192f2b17572470098c1 100644 (file)
@@ -185,8 +185,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
                return -ENXIO;
 
        nd_desc = nvdimm_bus->nd_desc;
+       /*
+        * if ndctl does not exist, it's PMEM_LEGACY and
+        * we want to just pretend everything is handled.
+        */
        if (!nd_desc->ndctl)
-               return -ENXIO;
+               return len;
 
        memset(&ars_cap, 0, sizeof(ars_cap));
        ars_cap.address = phys;
index 40476399d22793aece0438da0f5a0976cef063ab..8024a0ef86d3af9f0ba5ef169260e2e342023d8e 100644 (file)
@@ -143,6 +143,7 @@ struct nd_btt {
        struct nd_namespace_common *ndns;
        struct btt *btt;
        unsigned long lbasize;
+       u64 size;
        u8 *uuid;
        int id;
 };
index db39d53cdfb9fbd63258a426d4539b63d1f92701..f7d37a62f874b780c94a0bbbf3d2607845c0c5ab 100644 (file)
@@ -30,8 +30,8 @@ config NVME_FABRICS
 
 config NVME_RDMA
        tristate "NVM Express over Fabrics RDMA host driver"
-       depends on INFINIBAND
-       depends on BLK_DEV_NVME
+       depends on INFINIBAND && BLOCK
+       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
index 7ff2e820bbf473e8f9d31a19c17cfd6ec1ccb833..2feacc70bf61f10892ee4b7bddd97bb93e4fcd7d 100644 (file)
@@ -81,10 +81,12 @@ EXPORT_SYMBOL_GPL(nvme_cancel_request);
 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                enum nvme_ctrl_state new_state)
 {
-       enum nvme_ctrl_state old_state = ctrl->state;
+       enum nvme_ctrl_state old_state;
        bool changed = false;
 
        spin_lock_irq(&ctrl->lock);
+
+       old_state = ctrl->state;
        switch (new_state) {
        case NVME_CTRL_LIVE:
                switch (old_state) {
@@ -140,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
        default:
                break;
        }
-       spin_unlock_irq(&ctrl->lock);
 
        if (changed)
                ctrl->state = new_state;
 
+       spin_unlock_irq(&ctrl->lock);
+
        return changed;
 }
 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -608,7 +611,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
 
        ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
                        NVME_QID_ANY, 0, 0);
-       if (ret >= 0)
+       if (ret >= 0 && result)
                *result = le32_to_cpu(cqe.result);
        return ret;
 }
@@ -628,7 +631,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
 
        ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
                        NVME_QID_ANY, 0, 0);
-       if (ret >= 0)
+       if (ret >= 0 && result)
                *result = le32_to_cpu(cqe.result);
        return ret;
 }
index dc996761042ffb61e3fc38d4d92b2d43fcfea530..4eff4917446617f448262846ee6860b36e7f4340 100644 (file)
@@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
        mutex_lock(&nvmf_hosts_mutex);
        host = __nvmf_host_find(hostnqn);
-       if (host)
+       if (host) {
+               kref_get(&host->ref);
                goto out_unlock;
+       }
 
        host = kmalloc(sizeof(*host), GFP_KERNEL);
        if (!host)
@@ -56,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
        kref_init(&host->ref);
        memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
-       uuid_le_gen(&host->id);
+       uuid_be_gen(&host->id);
 
        list_add_tail(&host->list, &nvmf_hosts);
 out_unlock:
@@ -73,9 +75,9 @@ static struct nvmf_host *nvmf_host_default(void)
                return NULL;
 
        kref_init(&host->ref);
-       uuid_le_gen(&host->id);
+       uuid_be_gen(&host->id);
        snprintf(host->nqn, NVMF_NQN_SIZE,
-               "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id);
+               "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
 
        mutex_lock(&nvmf_hosts_mutex);
        list_add_tail(&host->list, &nvmf_hosts);
@@ -363,7 +365,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        cmd.connect.opcode = nvme_fabrics_command;
        cmd.connect.fctype = nvme_fabrics_type_connect;
        cmd.connect.qid = 0;
-       cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+
+       /*
+        * fabrics spec sets a minimum of depth 32 for admin queue,
+        * so set the queue with this depth always until
+        * justification otherwise.
+        */
+       cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+
        /*
         * Set keep-alive timeout in seconds granularity (ms * 1000)
         * and add a grace period for controller kato enforcement
@@ -375,7 +384,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
        data->cntlid = cpu_to_le16(0xffff);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -434,7 +443,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
        data->cntlid = cpu_to_le16(ctrl->cntlid);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
index 89df52c8be978cdf2e18b2c0b18a6211642ff8ad..46e460aee52da54306fd4121e73ab997e6e01f5b 100644 (file)
@@ -34,7 +34,7 @@ struct nvmf_host {
        struct kref             ref;
        struct list_head        list;
        char                    nqn[NVMF_NQN_SIZE];
-       uuid_le                 id;
+       uuid_be                 id;
 };
 
 /**
index d7c33f9361aa0361d762d1da31368f03bdca3082..60f7eab11865114ae1a13d7a811fe36d862d4029 100644 (file)
@@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
                reinit_completion(&dev->ioq_wait);
  retry:
                timeout = ADMIN_TIMEOUT;
-               for (; i > 0; i--) {
-                       struct nvme_queue *nvmeq = dev->queues[i];
-
-                       if (!pass)
-                               nvme_suspend_queue(nvmeq);
-                       if (nvme_delete_queue(nvmeq, opcode))
+               for (; i > 0; i--, sent++)
+                       if (nvme_delete_queue(dev->queues[i], opcode))
                                break;
-                       ++sent;
-               }
+
                while (sent--) {
                        timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
                        if (timeout == 0)
@@ -1693,11 +1688,17 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
                nvme_stop_queues(&dev->ctrl);
                csts = readl(dev->bar + NVME_REG_CSTS);
        }
+
+       for (i = dev->queue_count - 1; i > 0; i--)
+               nvme_suspend_queue(dev->queues[i]);
+
        if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
-               for (i = dev->queue_count - 1; i >= 0; i--) {
-                       struct nvme_queue *nvmeq = dev->queues[i];
-                       nvme_suspend_queue(nvmeq);
-               }
+               /* A device might become IO incapable very soon during
+                * probe, before the admin queue is configured. Thus,
+                * queue_count can be 0 here.
+                */
+               if (dev->queue_count)
+                       nvme_suspend_queue(dev->queues[0]);
        } else {
                nvme_disable_io_queues(dev);
                nvme_disable_admin_queue(dev, shutdown);
@@ -2116,6 +2117,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+       { PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
+               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
        { 0, }
index 3e3ce2b0424e4844d759bcf395d4177726d4e301..fbdb2267e4603499021b39382d932c571874c505 100644 (file)
  * more details.
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/string.h>
-#include <linux/jiffies.h>
 #include <linux/atomic.h>
 #include <linux/blk-mq.h>
 #include <linux/types.h>
@@ -26,7 +24,6 @@
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
 #include <linux/nvme.h>
-#include <linux/t10-pi.h>
 #include <asm/unaligned.h>
 
 #include <rdma/ib_verbs.h>
 
 #define NVME_RDMA_MAX_INLINE_SEGMENTS  1
 
-#define NVME_RDMA_MAX_PAGES_PER_MR     512
-
-#define NVME_RDMA_DEF_RECONNECT_DELAY  20
-
 /*
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
@@ -80,7 +73,6 @@ struct nvme_rdma_request {
        u32                     num_sge;
        int                     nents;
        bool                    inline_data;
-       bool                    need_inval;
        struct ib_reg_wr        reg_wr;
        struct ib_cqe           reg_cqe;
        struct nvme_rdma_queue  *queue;
@@ -90,6 +82,8 @@ struct nvme_rdma_request {
 
 enum nvme_rdma_queue_flags {
        NVME_RDMA_Q_CONNECTED = (1 << 0),
+       NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
+       NVME_RDMA_Q_DELETING = (1 << 2),
 };
 
 struct nvme_rdma_queue {
@@ -169,7 +163,6 @@ MODULE_PARM_DESC(register_always,
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
                struct rdma_cm_event *event);
 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
-static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
 
 /* XXX: really should move to a generic header sooner or later.. */
 static inline void put_unaligned_le24(u32 val, u8 *p)
@@ -290,7 +283,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        int ret = 0;
 
-       if (!req->need_inval)
+       if (!req->mr->need_inval)
                goto out;
 
        ib_dereg_mr(req->mr);
@@ -300,9 +293,10 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
        if (IS_ERR(req->mr)) {
                ret = PTR_ERR(req->mr);
                req->mr = NULL;
+               goto out;
        }
 
-       req->need_inval = false;
+       req->mr->need_inval = false;
 
 out:
        return ret;
@@ -489,9 +483,14 @@ out_err:
 
 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 {
-       struct nvme_rdma_device *dev = queue->device;
-       struct ib_device *ibdev = dev->dev;
+       struct nvme_rdma_device *dev;
+       struct ib_device *ibdev;
+
+       if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
+               return;
 
+       dev = queue->device;
+       ibdev = dev->dev;
        rdma_destroy_qp(queue->cm_id);
        ib_free_cq(queue->ib_cq);
 
@@ -542,6 +541,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
                ret = -ENOMEM;
                goto out_destroy_qp;
        }
+       set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
 
        return 0;
 
@@ -594,11 +594,13 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
                goto out_destroy_cm_id;
        }
 
+       clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
        set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
 
        return 0;
 
 out_destroy_cm_id:
+       nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
        return ret;
 }
@@ -617,7 +619,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
 
 static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
 {
-       if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
+       if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
                return;
        nvme_rdma_stop_queue(queue);
        nvme_rdma_free_queue(queue);
@@ -649,7 +651,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
        int i, ret;
 
        for (i = 1; i < ctrl->queue_count; i++) {
-               ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+               ret = nvme_rdma_init_queue(ctrl, i,
+                                          ctrl->ctrl.opts->queue_size);
                if (ret) {
                        dev_info(ctrl->ctrl.device,
                                "failed to initialize i/o queue: %d\n", ret);
@@ -660,7 +663,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
        return 0;
 
 out_free_queues:
-       for (; i >= 1; i--)
+       for (i--; i >= 1; i--)
                nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
 
        return ret;
@@ -687,11 +690,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
        list_del(&ctrl->list);
        mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-       if (ctrl->ctrl.tagset) {
-               blk_cleanup_queue(ctrl->ctrl.connect_q);
-               blk_mq_free_tag_set(&ctrl->tag_set);
-               nvme_rdma_dev_put(ctrl->device);
-       }
        kfree(ctrl->queues);
        nvmf_free_options(nctrl->opts);
 free_ctrl:
@@ -748,8 +746,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
        WARN_ON_ONCE(!changed);
 
-       if (ctrl->queue_count > 1)
+       if (ctrl->queue_count > 1) {
                nvme_start_queues(&ctrl->ctrl);
+               nvme_queue_scan(&ctrl->ctrl);
+               nvme_queue_async_events(&ctrl->ctrl);
+       }
 
        dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
 
@@ -771,8 +772,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 {
        struct nvme_rdma_ctrl *ctrl = container_of(work,
                        struct nvme_rdma_ctrl, err_work);
+       int i;
 
        nvme_stop_keep_alive(&ctrl->ctrl);
+
+       for (i = 0; i < ctrl->queue_count; i++)
+               clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+
        if (ctrl->queue_count > 1)
                nvme_stop_queues(&ctrl->ctrl);
        blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -855,7 +861,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
        if (!blk_rq_bytes(rq))
                return;
 
-       if (req->need_inval) {
+       if (req->mr->need_inval) {
                res = nvme_rdma_inv_rkey(queue, req);
                if (res < 0) {
                        dev_err(ctrl->ctrl.device,
@@ -941,7 +947,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
                             IB_ACCESS_REMOTE_READ |
                             IB_ACCESS_REMOTE_WRITE;
 
-       req->need_inval = true;
+       req->mr->need_inval = true;
 
        sg->addr = cpu_to_le64(req->mr->iova);
        put_unaligned_le24(req->mr->length, sg->length);
@@ -964,7 +970,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
        req->num_sge = 1;
        req->inline_data = false;
-       req->need_inval = false;
+       req->mr->need_inval = false;
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
@@ -1151,7 +1157,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
 
        if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
            wc->ex.invalidate_rkey == req->mr->rkey)
-               req->need_inval = false;
+               req->mr->need_inval = false;
 
        blk_mq_complete_request(rq, status);
 
@@ -1269,7 +1275,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
 {
        struct nvme_rdma_ctrl *ctrl = queue->ctrl;
        struct rdma_conn_param param = { };
-       struct nvme_rdma_cm_req priv;
+       struct nvme_rdma_cm_req priv = { };
        int ret;
 
        param.qp_num = queue->qp->qp_num;
@@ -1284,8 +1290,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
 
        priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
        priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
-       priv.hrqsize = cpu_to_le16(queue->queue_size);
-       priv.hsqsize = cpu_to_le16(queue->queue_size);
+       /*
+        * set the admin queue depth to the minimum size
+        * specified by the Fabrics standard.
+        */
+       if (priv.qid == 0) {
+               priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
+               priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+       } else {
+               /*
+                * current interpretation of the fabrics spec
+                * is at minimum you make hrqsize sqsize+1, or a
+                * 1's based representation of sqsize.
+                */
+               priv.hrqsize = cpu_to_le16(queue->queue_size);
+               priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+       }
 
        ret = rdma_connect(queue->cm_id, &param);
        if (ret) {
@@ -1301,56 +1321,6 @@ out_destroy_queue_ib:
        return ret;
 }
 
-/**
- * nvme_rdma_device_unplug() - Handle RDMA device unplug
- * @queue:      Queue that owns the cm_id that caught the event
- *
- * DEVICE_REMOVAL event notifies us that the RDMA device is about
- * to unplug so we should take care of destroying our RDMA resources.
- * This event will be generated for each allocated cm_id.
- *
- * In our case, the RDMA resources are managed per controller and not
- * only per queue. So the way we handle this is we trigger an implicit
- * controller deletion upon the first DEVICE_REMOVAL event we see, and
- * hold the event inflight until the controller deletion is completed.
- *
- * One exception that we need to handle is the destruction of the cm_id
- * that caught the event. Since we hold the callout until the controller
- * deletion is completed, we'll deadlock if the controller deletion will
- * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
- * of destroying this queue before-hand, destroy the queue resources
- * after the controller deletion completed with the exception of destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
- */
-static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
-{
-       struct nvme_rdma_ctrl *ctrl = queue->ctrl;
-       int ret, ctrl_deleted = 0;
-
-       /* First disable the queue so ctrl delete won't free it */
-       if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
-               goto out;
-
-       /* delete the controller */
-       ret = __nvme_rdma_del_ctrl(ctrl);
-       if (!ret) {
-               dev_warn(ctrl->ctrl.device,
-                       "Got rdma device removal event, deleting ctrl\n");
-               flush_work(&ctrl->delete_work);
-
-               /* Return non-zero so the cm_id will destroy implicitly */
-               ctrl_deleted = 1;
-
-               /* Free this queue ourselves */
-               rdma_disconnect(queue->cm_id);
-               ib_drain_qp(queue->qp);
-               nvme_rdma_destroy_queue_ib(queue);
-       }
-
-out:
-       return ctrl_deleted;
-}
-
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
                struct rdma_cm_event *ev)
 {
@@ -1392,8 +1362,8 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
                nvme_rdma_error_recovery(queue->ctrl);
                break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
-               /* return 1 means impliciy CM ID destroy */
-               return nvme_rdma_device_unplug(queue);
+               /* device removal is handled via the ib_client API */
+               break;
        default:
                dev_err(queue->ctrl->ctrl.device,
                        "Unexpected RDMA CM event (%d)\n", ev->event);
@@ -1465,7 +1435,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
                flush = true;
        ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
-                       req->need_inval ? &req->reg_wr.wr : NULL, flush);
+                       req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
        if (ret) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
@@ -1648,7 +1618,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
                nvme_rdma_free_io_queues(ctrl);
        }
 
-       if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+       if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
                nvme_shutdown_ctrl(&ctrl->ctrl);
 
        blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -1657,15 +1627,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
        nvme_rdma_destroy_admin_queue(ctrl);
 }
 
+static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
+{
+       nvme_uninit_ctrl(&ctrl->ctrl);
+       if (shutdown)
+               nvme_rdma_shutdown_ctrl(ctrl);
+
+       if (ctrl->ctrl.tagset) {
+               blk_cleanup_queue(ctrl->ctrl.connect_q);
+               blk_mq_free_tag_set(&ctrl->tag_set);
+               nvme_rdma_dev_put(ctrl->device);
+       }
+
+       nvme_put_ctrl(&ctrl->ctrl);
+}
+
 static void nvme_rdma_del_ctrl_work(struct work_struct *work)
 {
        struct nvme_rdma_ctrl *ctrl = container_of(work,
                                struct nvme_rdma_ctrl, delete_work);
 
-       nvme_remove_namespaces(&ctrl->ctrl);
-       nvme_rdma_shutdown_ctrl(ctrl);
-       nvme_uninit_ctrl(&ctrl->ctrl);
-       nvme_put_ctrl(&ctrl->ctrl);
+       __nvme_rdma_remove_ctrl(ctrl, true);
 }
 
 static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
@@ -1682,15 +1664,19 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
 static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-       int ret;
+       int ret = 0;
 
+       /*
+        * Keep a reference until all work is flushed since
+        * __nvme_rdma_del_ctrl can free the ctrl mem
+        */
+       if (!kref_get_unless_zero(&ctrl->ctrl.kref))
+               return -EBUSY;
        ret = __nvme_rdma_del_ctrl(ctrl);
-       if (ret)
-               return ret;
-
-       flush_work(&ctrl->delete_work);
-
-       return 0;
+       if (!ret)
+               flush_work(&ctrl->delete_work);
+       nvme_put_ctrl(&ctrl->ctrl);
+       return ret;
 }
 
 static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
@@ -1698,9 +1684,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
        struct nvme_rdma_ctrl *ctrl = container_of(work,
                                struct nvme_rdma_ctrl, delete_work);
 
-       nvme_remove_namespaces(&ctrl->ctrl);
-       nvme_uninit_ctrl(&ctrl->ctrl);
-       nvme_put_ctrl(&ctrl->ctrl);
+       __nvme_rdma_remove_ctrl(ctrl, false);
 }
 
 static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
@@ -1739,6 +1723,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
        if (ctrl->queue_count > 1) {
                nvme_start_queues(&ctrl->ctrl);
                nvme_queue_scan(&ctrl->ctrl);
+               nvme_queue_async_events(&ctrl->ctrl);
        }
 
        return;
@@ -1809,7 +1794,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
 
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_rdma_mq_ops;
-       ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+       ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
        ctrl->tag_set.reserved_tags = 1; /* fabric connect */
        ctrl->tag_set.numa_node = NUMA_NO_NODE;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -1907,7 +1892,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        spin_lock_init(&ctrl->lock);
 
        ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
-       ctrl->ctrl.sqsize = opts->queue_size;
+       ctrl->ctrl.sqsize = opts->queue_size - 1;
        ctrl->ctrl.kato = opts->kato;
 
        ret = -ENOMEM;
@@ -1988,27 +1973,57 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
        .create_ctrl    = nvme_rdma_create_ctrl,
 };
 
+static void nvme_rdma_add_one(struct ib_device *ib_device)
+{
+}
+
+static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+       struct nvme_rdma_ctrl *ctrl;
+
+       /* Delete all controllers using this device */
+       mutex_lock(&nvme_rdma_ctrl_mutex);
+       list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+               if (ctrl->device->dev != ib_device)
+                       continue;
+               dev_info(ctrl->ctrl.device,
+                       "Removing ctrl: NQN \"%s\", addr %pISp\n",
+                       ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+               __nvme_rdma_del_ctrl(ctrl);
+       }
+       mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+       flush_workqueue(nvme_rdma_wq);
+}
+
+static struct ib_client nvme_rdma_ib_client = {
+       .name   = "nvme_rdma",
+       .add = nvme_rdma_add_one,
+       .remove = nvme_rdma_remove_one
+};
+
 static int __init nvme_rdma_init_module(void)
 {
+       int ret;
+
        nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
        if (!nvme_rdma_wq)
                return -ENOMEM;
 
+       ret = ib_register_client(&nvme_rdma_ib_client);
+       if (ret) {
+               destroy_workqueue(nvme_rdma_wq);
+               return ret;
+       }
+
        nvmf_register_transport(&nvme_rdma_transport);
        return 0;
 }
 
 static void __exit nvme_rdma_cleanup_module(void)
 {
-       struct nvme_rdma_ctrl *ctrl;
-
        nvmf_unregister_transport(&nvme_rdma_transport);
-
-       mutex_lock(&nvme_rdma_ctrl_mutex);
-       list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
-               __nvme_rdma_del_ctrl(ctrl);
-       mutex_unlock(&nvme_rdma_ctrl_mutex);
-
+       ib_unregister_client(&nvme_rdma_ib_client);
        destroy_workqueue(nvme_rdma_wq);
 }
 
index a5c31cbeb4815efe58f8b5ff60a009ea97c62d31..3a5b9d0576cb0f71a5bf22a34c905ae8dfc634fb 100644 (file)
@@ -15,8 +15,8 @@ config NVME_TARGET
 
 config NVME_TARGET_LOOP
        tristate "NVMe loopback device support"
-       depends on BLK_DEV_NVME
        depends on NVME_TARGET
+       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
index 2fac17a5ad53a8aba3b2f9cb84369887dfaa88f6..47c564b5a2895198b020a26851b824350f10006e 100644 (file)
@@ -13,7 +13,6 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
-#include <linux/random.h>
 #include <generated/utsrelease.h>
 #include "nvmet.h"
 
@@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 {
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
        struct nvme_id_ctrl *id;
-       u64 serial;
        u16 status = 0;
 
        id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
        id->vid = 0;
        id->ssvid = 0;
 
-       /* generate a random serial number as our controllers are ephemeral: */
-       get_random_bytes(&serial, sizeof(serial));
        memset(id->sn, ' ', sizeof(id->sn));
-       snprintf(id->sn, sizeof(id->sn), "%llx", serial);
+       snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
 
        memset(id->mn, ' ', sizeof(id->mn));
        strncpy((char *)id->mn, "Linux", sizeof(id->mn));
index 8a891ca53367eaa6498f8326e745b081a110157e..6559d5afa7bfd9f808281658f686429c53fc7903 100644 (file)
@@ -13,6 +13,7 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
+#include <linux/random.h>
 #include "nvmet.h"
 
 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
@@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
        memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
        memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
 
+       /* generate a random serial number as our controllers are ephemeral: */
+       get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
+
        kref_init(&ctrl->ref);
        ctrl->subsys = subsys;
 
index 94e782987cc9eb9b2bd29d0a1534811030e7850a..395e60dad83542ff5300107a68bf73caeedaf6dd 100644 (file)
@@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work)
        struct nvme_loop_ctrl *ctrl = container_of(work,
                                struct nvme_loop_ctrl, delete_work);
 
-       nvme_remove_namespaces(&ctrl->ctrl);
-       nvme_loop_shutdown_ctrl(ctrl);
        nvme_uninit_ctrl(&ctrl->ctrl);
+       nvme_loop_shutdown_ctrl(ctrl);
        nvme_put_ctrl(&ctrl->ctrl);
 }
 
@@ -501,7 +500,6 @@ out_free_queues:
        nvme_loop_destroy_admin_queue(ctrl);
 out_disable:
        dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
-       nvme_remove_namespaces(&ctrl->ctrl);
        nvme_uninit_ctrl(&ctrl->ctrl);
        nvme_put_ctrl(&ctrl->ctrl);
 }
@@ -558,7 +556,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_loop_mq_ops;
-       ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+       ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
        ctrl->tag_set.reserved_tags = 1; /* fabric connect */
        ctrl->tag_set.numa_node = NUMA_NO_NODE;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -622,7 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 
        ret = -ENOMEM;
 
-       ctrl->ctrl.sqsize = opts->queue_size;
+       ctrl->ctrl.sqsize = opts->queue_size - 1;
        ctrl->ctrl.kato = opts->kato;
 
        ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
index 57dd6d834c28a84a2c8960f84737ec9d082d67e1..76b6eedccaf92ce4708cc4b730c03775d5469f0f 100644 (file)
@@ -113,6 +113,7 @@ struct nvmet_ctrl {
 
        struct mutex            lock;
        u64                     cap;
+       u64                     serial;
        u32                     cc;
        u32                     csts;
 
index e06d504bdf0c81aad35c2643c299abec105e7337..1cbe6e053b5b89f2187ae520e9a7fe792dd8cafa 100644 (file)
@@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state {
        NVMET_RDMA_Q_CONNECTING,
        NVMET_RDMA_Q_LIVE,
        NVMET_RDMA_Q_DISCONNECTING,
+       NVMET_RDMA_IN_DEVICE_REMOVAL,
 };
 
 struct nvmet_rdma_queue {
@@ -615,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
        if (!len)
                return 0;
 
-       /* use the already allocated data buffer if possible */
-       if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) {
-               nvmet_rdma_use_inline_sg(rsp, len, 0);
-       } else {
-               status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
-                               len);
-               if (status)
-                       return status;
-       }
+       status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
+                       len);
+       if (status)
+               return status;
 
        ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
                        rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -982,9 +978,13 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
                container_of(w, struct nvmet_rdma_queue, release_work);
        struct rdma_cm_id *cm_id = queue->cm_id;
        struct nvmet_rdma_device *dev = queue->dev;
+       enum nvmet_rdma_queue_state state = queue->state;
 
        nvmet_rdma_free_queue(queue);
-       rdma_destroy_id(cm_id);
+
+       if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
+               rdma_destroy_id(cm_id);
+
        kref_put(&dev->ref, nvmet_rdma_free_dev);
 }
 
@@ -1004,10 +1004,10 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
        queue->host_qid = le16_to_cpu(req->qid);
 
        /*
-        * req->hsqsize corresponds to our recv queue size
+        * req->hsqsize corresponds to our recv queue size plus 1
         * req->hrqsize corresponds to our send queue size
         */
-       queue->recv_queue_size = le16_to_cpu(req->hsqsize);
+       queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
        queue->send_queue_size = le16_to_cpu(req->hrqsize);
 
        if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
@@ -1233,8 +1233,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
        switch (queue->state) {
        case NVMET_RDMA_Q_CONNECTING:
        case NVMET_RDMA_Q_LIVE:
-               disconnect = true;
                queue->state = NVMET_RDMA_Q_DISCONNECTING;
+       case NVMET_RDMA_IN_DEVICE_REMOVAL:
+               disconnect = true;
                break;
        case NVMET_RDMA_Q_DISCONNECTING:
                break;
@@ -1272,6 +1273,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
        schedule_work(&queue->release_work);
 }
 
+/**
+ * nvme_rdma_device_removal() - Handle RDMA device removal
+ * @queue:      nvmet rdma queue (cm id qp_context)
+ * @addr:      nvmet address (cm_id context)
+ *
+ * DEVICE_REMOVAL event notifies us that the RDMA device is about
+ * to unplug so we should take care of destroying our RDMA resources.
+ * This event will be generated for each allocated cm_id.
+ *
+ * Note that this event can be generated on a normal queue cm_id
+ * and/or a device bound listener cm_id (where in this case
+ * queue will be null).
+ *
+ * we claim ownership on destroying the cm_id. For queues we move
+ * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
+ * we nullify the priv to prevent double cm_id destruction and destroying
+ * the cm_id implicitely by returning a non-zero rc to the callout.
+ */
+static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
+               struct nvmet_rdma_queue *queue)
+{
+       unsigned long flags;
+
+       if (!queue) {
+               struct nvmet_port *port = cm_id->context;
+
+               /*
+                * This is a listener cm_id. Make sure that
+                * future remove_port won't invoke a double
+                * cm_id destroy. use atomic xchg to make sure
+                * we don't compete with remove_port.
+                */
+               if (xchg(&port->priv, NULL) != cm_id)
+                       return 0;
+       } else {
+               /*
+                * This is a queue cm_id. Make sure that
+                * release queue will not destroy the cm_id
+                * and schedule all ctrl queues removal (only
+                * if the queue is not disconnecting already).
+                */
+               spin_lock_irqsave(&queue->state_lock, flags);
+               if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
+                       queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
+               spin_unlock_irqrestore(&queue->state_lock, flags);
+               nvmet_rdma_queue_disconnect(queue);
+               flush_scheduled_work();
+       }
+
+       /*
+        * We need to return 1 so that the core will destroy
+        * it's own ID.  What a great API design..
+        */
+       return 1;
+}
+
 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
                struct rdma_cm_event *event)
 {
@@ -1294,20 +1351,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
                break;
        case RDMA_CM_EVENT_ADDR_CHANGE:
        case RDMA_CM_EVENT_DISCONNECTED:
-       case RDMA_CM_EVENT_DEVICE_REMOVAL:
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:
-               /*
-                * We can get the device removal callback even for a
-                * CM ID that we aren't actually using.  In that case
-                * the context pointer is NULL, so we shouldn't try
-                * to disconnect a non-existing queue.  But we also
-                * need to return 1 so that the core will destroy
-                * it's own ID.  What a great API design..
-                */
-               if (queue)
-                       nvmet_rdma_queue_disconnect(queue);
-               else
-                       ret = 1;
+               nvmet_rdma_queue_disconnect(queue);
+               break;
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               ret = nvmet_rdma_device_removal(cm_id, queue);
                break;
        case RDMA_CM_EVENT_REJECTED:
        case RDMA_CM_EVENT_UNREACHABLE:
@@ -1396,9 +1444,10 @@ out_destroy_id:
 
 static void nvmet_rdma_remove_port(struct nvmet_port *port)
 {
-       struct rdma_cm_id *cm_id = port->priv;
+       struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
 
-       rdma_destroy_id(cm_id);
+       if (cm_id)
+               rdma_destroy_id(cm_id);
 }
 
 static struct nvmet_fabrics_ops nvmet_rdma_ops = {
index 7792266db2597b29f8158d87fdb61904eec64fe4..3ce69536a7b3c3832f652ff7b2c43bd9f0db506e 100644 (file)
@@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
         */
 
  err:
-       if (it.node)
-               of_node_put(it.node);
+       of_node_put(it.node);
        return rc;
 }
 
@@ -2343,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs(
        const struct device_node *parent, int port_reg, int reg)
 {
        struct of_endpoint endpoint;
-       struct device_node *node, *prev_node = NULL;
-
-       while (1) {
-               node = of_graph_get_next_endpoint(parent, prev_node);
-               of_node_put(prev_node);
-               if (!node)
-                       break;
+       struct device_node *node = NULL;
 
+       for_each_endpoint_of_node(parent, node) {
                of_graph_parse_endpoint(node, &endpoint);
                if (((port_reg == -1) || (endpoint.port == port_reg)) &&
                        ((reg == -1) || (endpoint.id == reg)))
                        return node;
-
-               prev_node = node;
        }
 
        return NULL;
index 55f1b839114924aeaae2baf1f10a2db145037cca..085c6389afd135b25987add40f31725e33f4db0d 100644 (file)
@@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob,
                pr_warning("End of tree marker overwritten: %08x\n",
                           be32_to_cpup(mem + size));
 
-       if (detached) {
+       if (detached && mynodes) {
                of_node_set_flag(*mynodes, OF_DETACHED);
                pr_debug("unflattened tree is detached\n");
        }
index 89a71c6074fc9d23fbc3dc2ac5bed7e4d69ce49b..a2e68f740edacbe900af35b5875cb65e6e40771a 100644 (file)
@@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches)
 
                        list_del(&desc->list);
 
+                       of_node_set_flag(desc->dev, OF_POPULATED);
+
                        pr_debug("of_irq_init: init %s (%p), parent %p\n",
                                 desc->dev->full_name,
                                 desc->dev, desc->interrupt_parent);
                        ret = desc->irq_init_cb(desc->dev,
                                                desc->interrupt_parent);
                        if (ret) {
+                               of_node_clear_flag(desc->dev, OF_POPULATED);
                                kfree(desc);
                                continue;
                        }
@@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches)
                         * its children can get processed in a subsequent pass.
                         */
                        list_add_tail(&desc->list, &intc_parent_list);
-
-                       of_node_set_flag(desc->dev, OF_POPULATED);
                }
 
                /* Get the next pending parent that might have children */
index 8aa19769107437b8d3bfada2d4fbd35b4b4fc297..f39ccd5aa70125a1fc22529fff0b3de185aa5cd6 100644 (file)
@@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root,
 }
 EXPORT_SYMBOL_GPL(of_platform_default_populate);
 
+#ifndef CONFIG_PPC
 static int __init of_platform_default_populate_init(void)
 {
        struct device_node *node;
@@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void)
        return 0;
 }
 arch_initcall_sync(of_platform_default_populate_init);
+#endif
 
 static int of_platform_device_destroy(struct device *dev, void *data)
 {
index 5f4a2e04c8d7cf237535d0cafb0274ba4df904e5..add66236215c66887fb2f9682d72118a9f786849 100644 (file)
@@ -44,6 +44,7 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
        bridge->release_fn = release_fn;
        bridge->release_data = release_data;
 }
+EXPORT_SYMBOL_GPL(pci_set_host_bridge_release);
 
 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
                             struct resource *res)
index a02981efdad570148e39925cfbe4a8579ca7f7ca..98f12223c734f15835edf438a94d7eccf15c991d 100644 (file)
@@ -1069,7 +1069,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
                nvec = maxvec;
 
        for (;;) {
-               if (!(flags & PCI_IRQ_NOAFFINITY)) {
+               if (flags & PCI_IRQ_AFFINITY) {
                        dev->irq_affinity = irq_create_affinity_mask(&nvec);
                        if (nvec < minvec)
                                return -ENOSPC;
@@ -1105,7 +1105,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
  **/
 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
 {
-       return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY);
+       return __pci_enable_msi_range(dev, minvec, maxvec, 0);
 }
 EXPORT_SYMBOL(pci_enable_msi_range);
 
@@ -1120,7 +1120,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
                return -ERANGE;
 
        for (;;) {
-               if (!(flags & PCI_IRQ_NOAFFINITY)) {
+               if (flags & PCI_IRQ_AFFINITY) {
                        dev->irq_affinity = irq_create_affinity_mask(&nvec);
                        if (nvec < minvec)
                                return -ENOSPC;
@@ -1160,8 +1160,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
                int minvec, int maxvec)
 {
-       return __pci_enable_msix_range(dev, entries, minvec, maxvec,
-                       PCI_IRQ_NOAFFINITY);
+       return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
 }
 EXPORT_SYMBOL(pci_enable_msix_range);
 
@@ -1187,22 +1186,25 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
 {
        int vecs = -ENOSPC;
 
-       if (!(flags & PCI_IRQ_NOMSIX)) {
+       if (flags & PCI_IRQ_MSIX) {
                vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
                                flags);
                if (vecs > 0)
                        return vecs;
        }
 
-       if (!(flags & PCI_IRQ_NOMSI)) {
+       if (flags & PCI_IRQ_MSI) {
                vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
                if (vecs > 0)
                        return vecs;
        }
 
        /* use legacy irq if allowed */
-       if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1)
+       if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
+               pci_intx(dev, 1);
                return 1;
+       }
+
        return vecs;
 }
 EXPORT_SYMBOL(pci_alloc_irq_vectors);
@@ -1411,6 +1413,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
        if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
                pci_msi_domain_update_chip_ops(info);
 
+       info->flags |= MSI_FLAG_ACTIVATE_EARLY;
+
        domain = msi_create_irq_domain(fwnode, info, parent);
        if (!domain)
                return NULL;
index 37ff0158e45f1a3aa453e5da757d21a11e1c3267..44e0ff37480b8284e8044cd66db6f143c0de370e 100644 (file)
@@ -3327,9 +3327,9 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
        if (nhi->vendor != PCI_VENDOR_ID_INTEL
                    || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
                        nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+                       nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
                        nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
-                   || nhi->subsystem_vendor != 0x2222
-                   || nhi->subsystem_device != 0x1111)
+                   || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
                goto out;
        dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
        device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3343,6 +3343,9 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
                               PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
                               quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+                              PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
+                              quirk_apple_wait_for_thunderbolt);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
                               PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
                               quirk_apple_wait_for_thunderbolt);
index d1ef7acf69307f1377782544bde6011af4e54df2..f9357e09e9b30131d631361a9d0092a1e19980bb 100644 (file)
@@ -40,6 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
        list_del(&dev->bus_list);
        up_write(&pci_bus_sem);
 
+       pci_bridge_d3_device_removed(dev);
        pci_free_resources(dev);
        put_device(&dev->dev);
 }
@@ -96,8 +97,6 @@ static void pci_remove_bus_device(struct pci_dev *dev)
                dev->subordinate = NULL;
        }
 
-       pci_bridge_d3_device_removed(dev);
-
        pci_destroy_dev(dev);
 }
 
index 489ea1098c96170ad532a636bf93a148b1228a97..69b5e811ea2b2c2c02a902434063196f31425892 100644 (file)
@@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 /************************ runtime PM support ***************************/
 
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
+static int pcmcia_dev_suspend(struct device *dev);
 static int pcmcia_dev_resume(struct device *dev);
 
 static int runtime_suspend(struct device *dev)
@@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev)
        int rc;
 
        device_lock(dev);
-       rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
+       rc = pcmcia_dev_suspend(dev);
        device_unlock(dev);
        return rc;
 }
@@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev);
 
 /* PM support, also needed for reset */
 
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
+static int pcmcia_dev_suspend(struct device *dev)
 {
        struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
        struct pcmcia_driver *p_drv = NULL;
@@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = {
        .remove_dev = &pcmcia_bus_remove_socket,
 };
 
+static const struct dev_pm_ops pcmcia_bus_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
+};
 
 struct bus_type pcmcia_bus_type = {
        .name = "pcmcia",
@@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = {
        .dev_groups = pcmcia_dev_groups,
        .probe = pcmcia_device_probe,
        .remove = pcmcia_device_remove,
-       .suspend = pcmcia_dev_suspend,
-       .resume = pcmcia_dev_resume,
+       .pm = &pcmcia_bus_pm_ops,
 };
 
 
index 483f919e0d2e2431636a73cacd14463f845c8d0c..91b5f5724cba6dde6466cb50cfada71875873ad0 100644 (file)
@@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
 }
 #endif
 
-void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
 {
-       struct pcmcia_low_level *ops = dev->platform_data;
        /*
         * We have at least one socket, so set MECR:CIT
         * (Card Is There)
@@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
                        goto err1;
        }
 
-       pxa2xx_configure_sockets(&dev->dev);
+       pxa2xx_configure_sockets(&dev->dev, ops);
        dev_set_drvdata(&dev->dev, sinfo);
 
        return 0;
@@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
 
 static int pxa2xx_drv_pcmcia_resume(struct device *dev)
 {
-       pxa2xx_configure_sockets(dev);
+       struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data;
+
+       pxa2xx_configure_sockets(dev, ops);
        return 0;
 }
 
index b609b45469ed71179852378f49354ef1c798d433..e58c7a41541880cd4bf461cd32f995bb27f8aae7 100644 (file)
@@ -1,4 +1,4 @@
 int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
 void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
-void pxa2xx_configure_sockets(struct device *dev);
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops);
 
index 12f0dd0914772ae6032282d8a1cf545f023e1857..2f490930430d46a2f7056aa9b5576266d21a4937 100644 (file)
@@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
 
 int pcmcia_badge4_init(struct sa1111_dev *dev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_badge4()) {
-               printk(KERN_INFO
-                      "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
-                      __func__,
-                      badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
-
-               sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
-               ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       printk(KERN_INFO
+              "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
+              __func__,
+              badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
+
+       sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
+       return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
 
 static int __init pcmv_setup(char *s)
index a1531feb8460c3041ec55f629dcc26dcfca55d9b..3d95dffcff7a307e6475614d545d7fa3ff556305 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <mach/hardware.h>
 #include <asm/hardware/sa1111.h>
+#include <asm/mach-types.h>
 #include <asm/irq.h>
 
 #include "sa1111_generic.h"
@@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev)
        sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
        sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
 
+       ret = -ENODEV;
 #ifdef CONFIG_SA1100_BADGE4
-       pcmcia_badge4_init(dev);
+       if (machine_is_badge4())
+               ret = pcmcia_badge4_init(dev);
 #endif
 #ifdef CONFIG_SA1100_JORNADA720
-       pcmcia_jornada720_init(dev);
+       if (machine_is_jornada720())
+               ret = pcmcia_jornada720_init(dev);
 #endif
 #ifdef CONFIG_ARCH_LUBBOCK
-       pcmcia_lubbock_init(dev);
+       if (machine_is_lubbock())
+               ret = pcmcia_lubbock_init(dev);
 #endif
 #ifdef CONFIG_ASSABET_NEPONSET
-       pcmcia_neponset_init(dev);
+       if (machine_is_assabet())
+               ret = pcmcia_neponset_init(dev);
 #endif
-       return 0;
+
+       if (ret) {
+               release_mem_region(dev->res.start, 512);
+               sa1111_disable_device(dev);
+       }
+
+       return ret;
 }
 
 static int pcmcia_remove(struct sa1111_dev *dev)
index c2c30580c83f6316013595185185b10c078990d2..480a3ede27c8b59b5cb573d30c5cead55d5ca481 100644 (file)
@@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
 
 int pcmcia_jornada720_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
+       unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
 
-       if (machine_is_jornada720()) {
-               unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
+       /* Fixme: why messing around with SA11x0's GPIO1? */
+       GRER |= 0x00000002;
 
-               GRER |= 0x00000002;
+       /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
+       sa1111_set_io_dir(sadev, pin, 0, 0);
+       sa1111_set_io(sadev, pin, 0);
+       sa1111_set_sleep_io(sadev, pin, 0);
 
-               /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
-               sa1111_set_io_dir(sadev, pin, 0, 0);
-               sa1111_set_io(sadev, pin, 0);
-               sa1111_set_sleep_io(sadev, pin, 0);
-
-               sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
-               ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
index c5caf579045145c07df4c9946141bea45f259075..e741f499c875314d40855190186ee939f0dba2e7 100644 (file)
@@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = {
 
 int pcmcia_lubbock_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_lubbock()) {
-               /*
-                * Set GPIO_A<3:0> to be outputs for the MAX1600,
-                * and switch to standby mode.
-                */
-               sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
-               sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-
-               /* Set CF Socket 1 power to standby mode. */
-               lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
+       /*
+        * Set GPIO_A<3:0> to be outputs for the MAX1600,
+        * and switch to standby mode.
+        */
+       sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+       sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
 
-               pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
-               pxa2xx_configure_sockets(&sadev->dev);
-               ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
-                               pxa2xx_drv_pcmcia_add_one);
-       }
+       /* Set CF Socket 1 power to standby mode. */
+       lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
 
-       return ret;
+       pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+       pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
+                                pxa2xx_drv_pcmcia_add_one);
 }
 
 MODULE_LICENSE("GPL");
index 1d78739c4c07699b44f1479165660c229eaa462b..019c395eb4bf8cd44b4127b0efeed797e48b0ab5 100644 (file)
@@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
 
 int pcmcia_neponset_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_assabet()) {
-               /*
-                * Set GPIO_A<3:0> to be outputs for the MAX1600,
-                * and switch to standby mode.
-                */
-               sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
-               sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
-               ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       /*
+        * Set GPIO_A<3:0> to be outputs for the MAX1600,
+        * and switch to standby mode.
+        */
+       sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+       sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
index 9f6ec87b9f9e13e7a1ff8f49338ec66849b17b57..48140ac73ed632613dfdb4a682abbe2583d57bc3 100644 (file)
@@ -144,19 +144,19 @@ static int
 sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
 {
        struct soc_pcmcia_timing timing;
-       unsigned int clock = clk_get_rate(skt->clk);
+       unsigned int clock = clk_get_rate(skt->clk) / 1000;
        unsigned long mecr = MECR;
        char *p = buf;
 
        soc_common_pcmcia_get_timing(skt, &timing);
 
-       p+=sprintf(p, "I/O      : %u (%u)\n", timing.io,
+       p+=sprintf(p, "I/O      : %uns (%uns)\n", timing.io,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
 
-       p+=sprintf(p, "attribute: %u (%u)\n", timing.attr,
+       p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
 
-       p+=sprintf(p, "common   : %u (%u)\n", timing.mem,
+       p+=sprintf(p, "common   : %uns (%uns)\n", timing.mem,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
 
        return p - buf;
index eed5e9c05353c2fafb30b4b0cc8745472b44aff8..d5ca760c4eb294101521bddc82260ff90b81e5c7 100644 (file)
@@ -235,7 +235,7 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
        stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
 
        if (skt->cs_state.flags & SS_IOCARD)
-               stat |= state.bvd1 ? SS_STSCHG : 0;
+               stat |= state.bvd1 ? 0 : SS_STSCHG;
        else {
                if (state.bvd1 == 0)
                        stat |= SS_BATDEAD;
index 6ccb994bdfcbd160148c535f18ea656ea7ee13e6..f5e1008a223df7e5a1c16d45e9a33b59382e3276 100644 (file)
@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
        return 0;
 }
 
-static DEFINE_MUTEX(arm_pmu_mutex);
+static DEFINE_SPINLOCK(arm_pmu_lock);
 static LIST_HEAD(arm_pmu_list);
 
 /*
@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
 {
        struct arm_pmu *pmu;
 
-       mutex_lock(&arm_pmu_mutex);
+       spin_lock(&arm_pmu_lock);
        list_for_each_entry(pmu, &arm_pmu_list, entry) {
 
                if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
                if (pmu->reset)
                        pmu->reset(pmu);
        }
-       mutex_unlock(&arm_pmu_mutex);
+       spin_unlock(&arm_pmu_lock);
        return 0;
 }
 
@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
        if (!cpu_hw_events)
                return -ENOMEM;
 
-       mutex_lock(&arm_pmu_mutex);
+       spin_lock(&arm_pmu_lock);
        list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
-       mutex_unlock(&arm_pmu_mutex);
+       spin_unlock(&arm_pmu_lock);
 
        err = cpu_pm_pmu_register(cpu_pmu);
        if (err)
@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
        return 0;
 
 out_unregister:
-       mutex_lock(&arm_pmu_mutex);
+       spin_lock(&arm_pmu_lock);
        list_del(&cpu_pmu->entry);
-       mutex_unlock(&arm_pmu_mutex);
+       spin_unlock(&arm_pmu_lock);
        free_percpu(cpu_hw_events);
        return err;
 }
@@ -869,9 +869,9 @@ out_unregister:
 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 {
        cpu_pm_pmu_unregister(cpu_pmu);
-       mutex_lock(&arm_pmu_mutex);
+       spin_lock(&arm_pmu_lock);
        list_del(&cpu_pmu->entry);
-       mutex_unlock(&arm_pmu_mutex);
+       spin_unlock(&arm_pmu_lock);
        free_percpu(cpu_pmu->hw_events);
 }
 
@@ -925,6 +925,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
                        if (i > 0 && spi != using_spi) {
                                pr_err("PPI/SPI IRQ type mismatch for %s!\n",
                                        dn->name);
+                               of_node_put(dn);
                                kfree(irqs);
                                return -EINVAL;
                        }
@@ -967,11 +968,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
 
        /* If we didn't manage to parse anything, try the interrupt affinity */
        if (cpumask_weight(&pmu->supported_cpus) == 0) {
-               if (!using_spi) {
+               int irq = platform_get_irq(pdev, 0);
+
+               if (irq >= 0 && irq_is_percpu(irq)) {
                        /* If using PPIs, check the affinity of the partition */
-                       int ret, irq;
+                       int ret;
 
-                       irq = platform_get_irq(pdev, 0);
                        ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
                        if (ret) {
                                kfree(irqs);
index 18d662610075b3e312bb6e5e8fccb51020a2f13d..8ffc44afdb75baaceab39801b811d3c3314604e6 100644 (file)
@@ -367,7 +367,7 @@ static int brcm_sata_phy_init(struct phy *phy)
                rc = -ENODEV;
        };
 
-       return 0;
+       return rc;
 }
 
 static const struct phy_ops phy_ops = {
index 0a45bc6088aeb14e3da89b1240d95173019e20f9..8c7eb335622ee1781a6b83d155740153b00d4717 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/power_supply.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
+#include <linux/usb/of.h>
 #include <linux/workqueue.h>
 
 #define REG_ISCR                       0x00
@@ -110,6 +111,7 @@ struct sun4i_usb_phy_cfg {
 struct sun4i_usb_phy_data {
        void __iomem *base;
        const struct sun4i_usb_phy_cfg *cfg;
+       enum usb_dr_mode dr_mode;
        struct mutex mutex;
        struct sun4i_usb_phy {
                struct phy *phy;
@@ -120,6 +122,7 @@ struct sun4i_usb_phy_data {
                bool regulator_on;
                int index;
        } phys[MAX_PHYS];
+       int first_phy;
        /* phy0 / otg related variables */
        struct extcon_dev *extcon;
        bool phy0_init;
@@ -285,16 +288,10 @@ static int sun4i_usb_phy_init(struct phy *_phy)
                sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
                sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
 
-               if (data->id_det_gpio) {
-                       /* OTG mode, force ISCR and cable state updates */
-                       data->id_det = -1;
-                       data->vbus_det = -1;
-                       queue_delayed_work(system_wq, &data->detect, 0);
-               } else {
-                       /* Host only mode */
-                       sun4i_usb_phy0_set_id_detect(_phy, 0);
-                       sun4i_usb_phy0_set_vbus_detect(_phy, 1);
-               }
+               /* Force ISCR and cable state updates */
+               data->id_det = -1;
+               data->vbus_det = -1;
+               queue_delayed_work(system_wq, &data->detect, 0);
        }
 
        return 0;
@@ -319,6 +316,19 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
        return 0;
 }
 
+static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data)
+{
+       switch (data->dr_mode) {
+       case USB_DR_MODE_OTG:
+               return gpiod_get_value_cansleep(data->id_det_gpio);
+       case USB_DR_MODE_HOST:
+               return 0;
+       case USB_DR_MODE_PERIPHERAL:
+       default:
+               return 1;
+       }
+}
+
 static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
 {
        if (data->vbus_det_gpio)
@@ -432,7 +442,10 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
        struct phy *phy0 = data->phys[0].phy;
        int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
 
-       id_det = gpiod_get_value_cansleep(data->id_det_gpio);
+       if (phy0 == NULL)
+               return;
+
+       id_det = sun4i_usb_phy0_get_id_det(data);
        vbus_det = sun4i_usb_phy0_get_vbus_det(data);
 
        mutex_lock(&phy0->mutex);
@@ -448,7 +461,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
                 * without vbus detection report vbus low for long enough for
                 * the musb-ip to end the current device session.
                 */
-               if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
+               if (data->dr_mode == USB_DR_MODE_OTG &&
+                   !sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
                        sun4i_usb_phy0_set_vbus_detect(phy0, 0);
                        msleep(200);
                        sun4i_usb_phy0_set_vbus_detect(phy0, 1);
@@ -474,7 +488,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
                 * without vbus detection report vbus low for long enough to
                 * the musb-ip to end the current host session.
                 */
-               if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
+               if (data->dr_mode == USB_DR_MODE_OTG &&
+                   !sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
                        mutex_lock(&phy0->mutex);
                        sun4i_usb_phy0_set_vbus_detect(phy0, 0);
                        msleep(1000);
@@ -519,7 +534,8 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
 {
        struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
 
-       if (args->args[0] >= data->cfg->num_phys)
+       if (args->args[0] < data->first_phy ||
+           args->args[0] >= data->cfg->num_phys)
                return ERR_PTR(-ENODEV);
 
        return data->phys[args->args[0]].phy;
@@ -593,13 +609,17 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                        return -EPROBE_DEFER;
        }
 
-       /* vbus_det without id_det makes no sense, and is not supported */
-       if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) {
-               dev_err(dev, "usb0_id_det missing or invalid\n");
-               return -ENODEV;
-       }
-
-       if (data->id_det_gpio) {
+       data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0);
+       switch (data->dr_mode) {
+       case USB_DR_MODE_OTG:
+               /* otg without id_det makes no sense, and is not supported */
+               if (!data->id_det_gpio) {
+                       dev_err(dev, "usb0_id_det missing or invalid\n");
+                       return -ENODEV;
+               }
+               /* fall through */
+       case USB_DR_MODE_HOST:
+       case USB_DR_MODE_PERIPHERAL:
                data->extcon = devm_extcon_dev_allocate(dev,
                                                        sun4i_usb_phy0_cable);
                if (IS_ERR(data->extcon))
@@ -610,9 +630,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                        dev_err(dev, "failed to register extcon: %d\n", ret);
                        return ret;
                }
+               break;
+       default:
+               dev_info(dev, "dr_mode unknown, not registering usb phy0\n");
+               data->first_phy = 1;
        }
 
-       for (i = 0; i < data->cfg->num_phys; i++) {
+       for (i = data->first_phy; i < data->cfg->num_phys; i++) {
                struct sun4i_usb_phy *phy = data->phys + i;
                char name[16];
 
index ac4f31abefe33e45a4b1cdf5636c7c1ba05f716d..28fce4bce638e57a0c97f7c57ee781e2fe153240 100644 (file)
@@ -141,9 +141,9 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev)
                }
 
                phy->hsic_clk = devm_clk_get(dev, "hsic_12M");
-               if (IS_ERR(phy->clk)) {
+               if (IS_ERR(phy->hsic_clk)) {
                        dev_err(dev, "failed to get hsic_12M clock\n");
-                       return PTR_ERR(phy->clk);
+                       return PTR_ERR(phy->hsic_clk);
                }
 
                phy->reset = devm_reset_control_get(dev, "hsic");
index 5749a4eee746da45fdef1d4f39cabab607160408..0fe8fad25e4d1fb4c93f9716a2fa0d300f4e9290 100644 (file)
@@ -1539,12 +1539,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                offset += range->npins;
        }
 
-       /* Mask and clear all interrupts */
-       chv_writel(0, pctrl->regs + CHV_INTMASK);
+       /* Clear all interrupts */
        chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
        ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
-                                  handle_simple_irq, IRQ_TYPE_NONE);
+                                  handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add IRQ chip\n");
                goto fail;
index eb4990ff26ca581fa8040e2e028111107524c8f9..7fb765642ee78bc320a30c04bf3bf8d1a4724878 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/bitops.h>
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/pinctrl/pinconf.h>
index 11623c6b0cb30270ea267068b495d3227dcf3d96..44e69c963f5da21ea63a9e558aca58aaebc3134c 100644 (file)
@@ -727,13 +727,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
                return PTR_ERR(pc->pcdev);
        }
 
-       ret = meson_gpiolib_register(pc);
-       if (ret) {
-               pinctrl_unregister(pc->pcdev);
-               return ret;
-       }
-
-       return 0;
+       return meson_gpiolib_register(pc);
 }
 
 static struct platform_driver meson_pinctrl_driver = {
index 634b4d30eefb1e3c14c4ed6243ca3382fc4f5b66..b3e772390ab66397dccf9e8823c58a0e6b40d0a4 100644 (file)
@@ -43,17 +43,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
 
        spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + offset * 4);
-       /*
-        * Suppose BIOS or Bootloader sets specific debounce for the
-        * GPIO. if not, set debounce to be  2.75ms and remove glitch.
-       */
-       if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
-               pin_reg |= 0xf;
-               pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
-               pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-               pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
-       }
-
        pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
        writel(pin_reg, gpio_dev->base + offset * 4);
        spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -326,15 +315,6 @@ static void amd_gpio_irq_enable(struct irq_data *d)
 
        spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
-       /*
-               Suppose BIOS or Bootloader sets specific debounce for the
-               GPIO. if not, set debounce to be  2.75ms.
-       */
-       if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
-               pin_reg |= 0xf;
-               pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
-               pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
-       }
        pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
        pin_reg |= BIT(INTERRUPT_MASK_OFF);
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
index c6d410ef8de08ec9bdaf6ee9ce803f1879b288a1..55375b1b3cc81b20cf6fc83722c89cfaa9ecefc2 100644 (file)
@@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = {
                           PADS_FUNCTION_SELECT2, 12, 0x3),
        MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
                           PADS_FUNCTION_SELECT2, 14, 0x3),
-       MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
+       MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
                           PADS_FUNCTION_SELECT2, 16, 0x3),
-       MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+       MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
                           PADS_FUNCTION_SELECT2, 18, 0x3),
-       MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+       MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
                           PADS_FUNCTION_SELECT2, 20, 0x3),
-       MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG,
+       MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG,
                           PADS_FUNCTION_SELECT2, 22, 0x3),
-       MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG,
+       MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG,
                           PADS_FUNCTION_SELECT2, 24, 0x3),
-       MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5,
+       MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5,
                           PADS_FUNCTION_SELECT2, 26, 0x3),
        PIN_GROUP(TCK, "tck"),
        PIN_GROUP(TRSTN, "trstn"),
@@ -1432,7 +1432,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
 {
        struct pistachio_pinctrl *pctl;
        struct resource *res;
-       int ret;
 
        pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
        if (!pctl)
@@ -1464,13 +1463,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
                return PTR_ERR(pctl->pctldev);
        }
 
-       ret = pistachio_gpio_register(pctl);
-       if (ret < 0) {
-               pinctrl_unregister(pctl->pctldev);
-               return ret;
-       }
-
-       return 0;
+       return pistachio_gpio_register(pctl);
 }
 
 static struct platform_driver pistachio_pinctrl_driver = {
index ce483b03a2631f18b25569cde3b60352f9eda53f..f9d661e5c14abb56c94493f29d0f028ba9b1b078 100644 (file)
@@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* RTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* RTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)),  /* PG_EINT8 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* CTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* CTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)),  /* PG_EINT9 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
index 3040abe6f73a1ad1b40991821f26de1c1db711d5..3131cac2b76ffcc830948fbff94ed4ad57669c84 100644 (file)
@@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* RTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* RTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)),  /* PG_EINT8 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* CTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* CTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)),  /* PG_EINT9 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
index f99b183d5296f7981c9cfbf3bb25d5f5c236e46f..374a8028fec772b406aef640fdc062fc5dd7d9b8 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * Generic driver for the OLPC Embedded Controller.
  *
+ * Author: Andres Salomon <dilinger@queued.net>
+ *
  * Copyright (C) 2011-2012 One Laptop per Child Foundation.
  *
  * Licensed under the GPL v2 or later.
@@ -12,7 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/olpc-ec.h>
 #include <asm/olpc.h>
@@ -326,8 +328,4 @@ static int __init olpc_ec_init_module(void)
 {
        return platform_driver_register(&olpc_ec_plat_driver);
 }
-
 arch_initcall(olpc_ec_init_module);
-
-MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
-MODULE_LICENSE("GPL");
index d2bc092defd77732f780acb531931f165d440498..da2fe18162e1196b932eb42baeea1ad47f411ee3 100644 (file)
@@ -110,8 +110,8 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
        /* BIOS error detected */
        { KE_IGNORE, 0xe00d, { KEY_RESERVED } },
 
-       /* Unknown, defined in ACPI DSDT */
-       /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */
+       /* Battery was removed or inserted */
+       { KE_IGNORE, 0xe00e, { KEY_RESERVED } },
 
        /* Wifi Catcher */
        { KE_KEY,    0xe011, { KEY_PROG2 } },
index 63b371d6ee55b46f03b0f6a9cfedc35f417ffc33..91ae58510d92698c63252a099a865c01a9b23c2c 100644 (file)
@@ -1,6 +1,8 @@
 /* Moorestown PMIC GPIO (access through IPC) driver
  * Copyright (c) 2008 - 2009, Intel Corporation.
  *
+ * Author: Alek Du <alek.du@intel.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -21,7 +23,6 @@
 
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -322,9 +323,4 @@ static int __init platform_pmic_gpio_init(void)
 {
        return platform_driver_register(&platform_pmic_gpio_driver);
 }
-
 subsys_initcall(platform_pmic_gpio_init);
-
-MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
-MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver");
-MODULE_LICENSE("GPL v2");
index 9c65f134d4474d843ffa907051191e2d8c484a24..da7a75f824891200f9db4c9dd4d274c5125d3dd8 100644 (file)
@@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip,
 }
 
 static inline void max17042_read_model_data(struct max17042_chip *chip,
-                                       u8 addr, u32 *data, int size)
+                                       u8 addr, u16 *data, int size)
 {
        struct regmap *map = chip->regmap;
        int i;
+       u32 tmp;
 
-       for (i = 0; i < size; i++)
-               regmap_read(map, addr + i, &data[i]);
+       for (i = 0; i < size; i++) {
+               regmap_read(map, addr + i, &tmp);
+               data[i] = (u16)tmp;
+       }
 }
 
 static inline int max17042_model_data_compare(struct max17042_chip *chip,
@@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip)
 {
        int ret;
        int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
-       u32 *temp_data;
+       u16 *temp_data;
 
        temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
        if (!temp_data)
@@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip)
        ret = max17042_model_data_compare(
                chip,
                chip->pdata->config_data->cell_char_tbl,
-               (u16 *)temp_data,
+               temp_data,
                table_size);
 
        max10742_lock_model(chip);
@@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
 {
        int i;
        int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
-       u32 *temp_data;
+       u16 *temp_data;
        int ret = 0;
 
        temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
index 3bfac539334b5c75ed78f5c3541ffe476e962f1b..c74c3f67b8da01fad62328ca31f8c9b8bb09c794 100644 (file)
@@ -200,8 +200,8 @@ config REBOOT_MODE
 config SYSCON_REBOOT_MODE
        tristate "Generic SYSCON regmap reboot mode driver"
        depends on OF
+       depends on MFD_SYSCON
        select REBOOT_MODE
-       select MFD_SYSCON
        help
          Say y here will enable reboot mode driver. This will
          get reboot mode arguments and store it in SYSCON mapped
index 9ab7f562a83ba6538054e1e39a8b758489a4f8d7..f69387e12c1e545a3cedcb126247050401c8996a 100644 (file)
@@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev)
 
        if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
                pr_err("failed to find reboot-offset property\n");
+               iounmap(base);
                return -EINVAL;
        }
 
        err = register_restart_handler(&hisi_restart_nb);
-       if (err)
+       if (err) {
                dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
                        err);
+               iounmap(base);
+       }
 
        return err;
 }
index 73dfae41def8a659978eec9a738448f604cb1973..4c56e54af6ace4b11133d7a1944a54ca0725ddf6 100644 (file)
@@ -206,6 +206,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
        if (!charger)
                return -ENOMEM;
 
+       platform_set_drvdata(pdev, charger);
        charger->tps = tps;
        charger->dev = &pdev->dev;
 
index 32f0f014a06735c5ea51cf4f5185c5fbc7bdae60..9d19b9a62011b376be541b247336d455952bb42b 100644 (file)
@@ -1161,7 +1161,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
                } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
                           (ibw_start + ibw_size) > ib_win->rstart) {
                        /* Return error if address translation involved */
-                       if (direct && ib_win->xlat) {
+                       if (!direct || ib_win->xlat) {
                                ret = -EFAULT;
                                break;
                        }
index cecc15a880de6928fed5ee0d35588fbae03423dd..cebc296463ad17efe25fc6fa8b4bde593f1fdff3 100644 (file)
@@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch)
 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
                                           long timeout)
 {
-       struct rio_channel *ch = NULL;
-       struct rio_channel *new_ch = NULL;
+       struct rio_channel *ch;
+       struct rio_channel *new_ch;
        struct conn_req *req;
        struct cm_peer *peer;
        int found = 0;
@@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
 
        spin_unlock_bh(&ch->lock);
        riocm_put_channel(ch);
+       ch = NULL;
        kfree(req);
 
        down_read(&rdev_sem);
@@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
        if (!found) {
                /* If peer device object not found, simply ignore the request */
                err = -ENODEV;
-               goto err_nodev;
+               goto err_put_new_ch;
        }
 
        new_ch->rdev = peer->rdev;
@@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
 
        *new_ch_id = new_ch->id;
        return new_ch;
+
+err_put_new_ch:
+       spin_lock_bh(&idr_lock);
+       idr_remove(&ch_idr, new_ch->id);
+       spin_unlock_bh(&idr_lock);
+       riocm_put_channel(new_ch);
+
 err_put:
-       riocm_put_channel(ch);
-err_nodev:
-       if (new_ch) {
-               spin_lock_bh(&idr_lock);
-               idr_remove(&ch_idr, new_ch->id);
-               spin_unlock_bh(&idr_lock);
-               riocm_put_channel(new_ch);
-       }
+       if (ch)
+               riocm_put_channel(ch);
        *new_ch_id = 0;
        return ERR_PTR(err);
 }
@@ -2245,17 +2247,30 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
 {
        struct rio_channel *ch;
        unsigned int i;
+       LIST_HEAD(list);
 
        riocm_debug(EXIT, ".");
 
+       /*
+        * If there are any channels left in connected state send
+        * close notification to the connection partner.
+        * First build a list of channels that require a closing
+        * notification because function riocm_send_close() should
+        * be called outside of spinlock protected code.
+        */
        spin_lock_bh(&idr_lock);
        idr_for_each_entry(&ch_idr, ch, i) {
-               riocm_debug(EXIT, "close ch %d", ch->id);
-               if (ch->state == RIO_CM_CONNECTED)
-                       riocm_send_close(ch);
+               if (ch->state == RIO_CM_CONNECTED) {
+                       riocm_debug(EXIT, "close ch %d", ch->id);
+                       idr_remove(&ch_idr, ch->id);
+                       list_add(&ch->ch_node, &list);
+               }
        }
        spin_unlock_bh(&idr_lock);
 
+       list_for_each_entry(ch, &list, ch_node)
+               riocm_send_close(ch);
+
        return NOTIFY_DONE;
 }
 
index b2daa6641417caecedbd43fde480ef8159cd3652..c9ff2619971166f0f7b853242fca65e0323c1381 100644 (file)
@@ -2,7 +2,7 @@
  * max14577.c - Regulator driver for the Maxim 14577/77836
  *
  * Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -331,7 +331,7 @@ static void __exit max14577_regulator_exit(void)
 }
 module_exit(max14577_regulator_exit);
 
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:max14577-regulator");
index de730fd3f8a5df6cf7786be8f8b64c52b3786820..cfbb9512e48623429899cbe0785332b0a1171734 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2013-2015 Samsung Electronics
  * Jonghwa Lee <jonghwa3.lee@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski.k@gmail.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -314,5 +314,5 @@ module_exit(max77693_pmic_cleanup);
 
 MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
 MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_LICENSE("GPL");
index 5022fa8d10c6bc399970c26b2392fcea5dd54385..8ed46a9a55c8469e38c0b14f7c5c8c95fb4fe569 100644 (file)
@@ -178,20 +178,21 @@ static const struct regulator_desc pma8084_hfsmps = {
 static const struct regulator_desc pma8084_ftsmps = {
        .linear_ranges = (struct regulator_linear_range[]) {
                REGULATOR_LINEAR_RANGE(350000,  0, 184, 5000),
-               REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+               REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
        },
        .n_linear_ranges = 2,
-       .n_voltages = 340,
+       .n_voltages = 262,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pma8084_pldo = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE(750000,  0,  30, 25000),
-               REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+               REGULATOR_LINEAR_RANGE( 750000,  0,  63, 12500),
+               REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+               REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
        },
-       .n_linear_ranges = 2,
-       .n_voltages = 100,
+       .n_linear_ranges = 3,
+       .n_voltages = 164,
        .ops = &rpm_smps_ldo_ops,
 };
 
@@ -221,29 +222,30 @@ static const struct regulator_desc pm8x41_hfsmps = {
 static const struct regulator_desc pm8841_ftsmps = {
        .linear_ranges = (struct regulator_linear_range[]) {
                REGULATOR_LINEAR_RANGE(350000,  0, 184, 5000),
-               REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+               REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
        },
        .n_linear_ranges = 2,
-       .n_voltages = 340,
+       .n_voltages = 262,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pm8941_boost = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
+               REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
        },
        .n_linear_ranges = 1,
-       .n_voltages = 16,
+       .n_voltages = 31,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pm8941_pldo = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE( 750000,  0,  30, 25000),
-               REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+               REGULATOR_LINEAR_RANGE( 750000,  0,  63, 12500),
+               REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+               REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
        },
-       .n_linear_ranges = 2,
-       .n_voltages = 100,
+       .n_linear_ranges = 3,
+       .n_voltages = 164,
        .ops = &rpm_smps_ldo_ops,
 };
 
index 8973d34ce5ba0f786afbe228e201fa36befe9f37..fb1b56a714753f208c29511c9cc6cea0550daffd 100644 (file)
@@ -1643,9 +1643,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
        u8 *sense = NULL;
        int expires;
 
+       cqr = (struct dasd_ccw_req *) intparm;
        if (IS_ERR(irb)) {
                switch (PTR_ERR(irb)) {
                case -EIO:
+                       if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
+                               device = (struct dasd_device *) cqr->startdev;
+                               cqr->status = DASD_CQR_CLEARED;
+                               dasd_device_clear_timer(device);
+                               wake_up(&dasd_flush_wq);
+                               dasd_schedule_device_bh(device);
+                               return;
+                       }
                        break;
                case -ETIMEDOUT:
                        DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
@@ -1661,7 +1670,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
        }
 
        now = get_tod_clock();
-       cqr = (struct dasd_ccw_req *) intparm;
        /* check for conditions that should be handled immediately */
        if (!cqr ||
            !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
index fd2eff44009806c039ad18bd5d546d452c4d81fa..98bbec44bcd05c0c37da4d853c17e2c525767c3e 100644 (file)
@@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
                return PTR_ERR(cqr);
        }
 
+       cqr->lpm = lpum;
+retry:
        cqr->startdev = device;
        cqr->memdev = device;
        cqr->block = NULL;
@@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
                        (prssdp + 1);
                memcpy(messages, message_buf,
                       sizeof(struct dasd_rssd_messages));
+       } else if (cqr->lpm) {
+               /*
+                * on z/VM we might not be able to do I/O on the requested path
+                * but instead we get the required information on any path
+                * so retry with open path mask
+                */
+               cqr->lpm = 0;
+               goto retry;
        } else
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
                                "Reading messages failed with rc=%d\n"
index 7ada078ffdd04b09c81cc45ddfab4057f397158d..6a58bc8f46e2a20bd170fb458c53b2f77871e3f6 100644 (file)
@@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
        priv->state = DEV_STATE_NOT_OPER;
        priv->dev_id.devno = sch->schib.pmcw.dev;
        priv->dev_id.ssid = sch->schid.ssid;
-       priv->schid = sch->schid;
 
        INIT_WORK(&priv->todo_work, ccw_device_todo);
        INIT_LIST_HEAD(&priv->cmb_list);
@@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
        put_device(&old_sch->dev);
        /* Initialize new subchannel. */
        spin_lock_irq(sch->lock);
-       cdev->private->schid = sch->schid;
        cdev->ccwlock = sch->lock;
        if (!sch_is_pseudo_sch(sch))
                sch_set_cdev(sch, cdev);
index 15b56a15db151cf4cfdfbc1e251a57400fb0706c..9bc3512374c903980299c014947d86ba4cefcffa 100644 (file)
@@ -26,6 +26,7 @@
 static void
 ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
 {
+       struct subchannel *sch = to_subchannel(cdev->dev.parent);
        char dbf_text[15];
 
        if (!scsw_is_valid_cstat(&irb->scsw) ||
@@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
                      "received"
                      " ... device %04x on subchannel 0.%x.%04x, dev_stat "
                      ": %02X sch_stat : %02X\n",
-                     cdev->private->dev_id.devno, cdev->private->schid.ssid,
-                     cdev->private->schid.sch_no,
+                     cdev->private->dev_id.devno, sch->schid.ssid,
+                     sch->schid.sch_no,
                      scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
-       sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
+       sprintf(dbf_text, "chk%x", sch->schid.sch_no);
        CIO_TRACE_EVENT(0, dbf_text);
        CIO_HEX_EVENT(0, irb, sizeof(struct irb));
 }
index 8975060af96cb4640012fa0b427b14ce3893f1a0..220f49145b2f9bf48e29f92dc06f94ab7a9a0ef2 100644 (file)
@@ -120,7 +120,6 @@ struct ccw_device_private {
        int state;              /* device state */
        atomic_t onoff;
        struct ccw_dev_id dev_id;       /* device id */
-       struct subchannel_id schid;     /* subchannel number */
        struct ccw_request req;         /* internal I/O request */
        int iretry;
        u8 pgid_valid_mask;     /* mask of valid PGIDs */
index 4bb5262f7aee705e7bf3b74c5ee950632dfbfd5a..71bf9bded48519c72a5d540a6ee8c1f89efc7bf2 100644 (file)
@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q)
        q->qdio_error = 0;
 }
 
+static inline int qdio_tasklet_schedule(struct qdio_q *q)
+{
+       if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
+               tasklet_schedule(&q->tasklet);
+               return 0;
+       }
+       return -EPERM;
+}
+
 static void __qdio_inbound_processing(struct qdio_q *q)
 {
        qperf_inc(q, tasklet_inbound);
@@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q)
        if (!qdio_inbound_q_done(q)) {
                /* means poll time is not yet over */
                qperf_inc(q, tasklet_inbound_resched);
-               if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
-                       tasklet_schedule(&q->tasklet);
+               if (!qdio_tasklet_schedule(q))
                        return;
-               }
        }
 
        qdio_stop_polling(q);
@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q)
         */
        if (!qdio_inbound_q_done(q)) {
                qperf_inc(q, tasklet_inbound_resched2);
-               if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
-                       tasklet_schedule(&q->tasklet);
+               qdio_tasklet_schedule(q);
        }
 }
 
@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q)
         * is noticed and outbound_handler is called after some time.
         */
        if (qdio_outbound_q_done(q))
-               del_timer(&q->u.out.timer);
+               del_timer_sync(&q->u.out.timer);
        else
-               if (!timer_pending(&q->u.out.timer))
+               if (!timer_pending(&q->u.out.timer) &&
+                   likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
                        mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
        return;
 
 sched:
-       if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
-               return;
-       tasklet_schedule(&q->tasklet);
+       qdio_tasklet_schedule(q);
 }
 
 /* outbound tasklet */
@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data)
 {
        struct qdio_q *q = (struct qdio_q *)data;
 
-       if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
-               return;
-       tasklet_schedule(&q->tasklet);
+       qdio_tasklet_schedule(q);
 }
 
 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
 
        for_each_output_queue(q->irq_ptr, out, i)
                if (!qdio_outbound_q_done(out))
-                       tasklet_schedule(&out->tasklet);
+                       qdio_tasklet_schedule(out);
 }
 
 static void __tiqdio_inbound_processing(struct qdio_q *q)
@@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
 
        if (!qdio_inbound_q_done(q)) {
                qperf_inc(q, tasklet_inbound_resched);
-               if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
-                       tasklet_schedule(&q->tasklet);
+               if (!qdio_tasklet_schedule(q))
                        return;
-               }
        }
 
        qdio_stop_polling(q);
@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
         */
        if (!qdio_inbound_q_done(q)) {
                qperf_inc(q, tasklet_inbound_resched2);
-               if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
-                       tasklet_schedule(&q->tasklet);
+               qdio_tasklet_schedule(q);
        }
 }
 
@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
        int i;
        struct qdio_q *q;
 
-       if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+       if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
                return;
 
        for_each_input_queue(irq_ptr, q, i) {
@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
                        continue;
                if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
                        qdio_siga_sync_q(q);
-               tasklet_schedule(&q->tasklet);
+               qdio_tasklet_schedule(q);
        }
 }
 
@@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
                      struct irb *irb)
 {
        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+       struct subchannel_id schid;
        int cstat, dstat;
 
        if (!intparm || !irq_ptr) {
-               DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
+               ccw_device_get_schid(cdev, &schid);
+               DBF_ERROR("qint:%4x", schid.sch_no);
                return;
        }
 
@@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
 int qdio_get_ssqd_desc(struct ccw_device *cdev,
                       struct qdio_ssqd_desc *data)
 {
+       struct subchannel_id schid;
 
        if (!cdev || !cdev->private)
                return -EINVAL;
 
-       DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
-       return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
+       ccw_device_get_schid(cdev, &schid);
+       DBF_EVENT("get ssqd:%4x", schid.sch_no);
+       return qdio_setup_get_ssqd(NULL, &schid, data);
 }
 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
 
@@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
                tasklet_kill(&q->tasklet);
 
        for_each_output_queue(irq_ptr, q, i) {
-               del_timer(&q->u.out.timer);
+               del_timer_sync(&q->u.out.timer);
                tasklet_kill(&q->tasklet);
        }
 }
@@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
 int qdio_shutdown(struct ccw_device *cdev, int how)
 {
        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+       struct subchannel_id schid;
        int rc;
-       unsigned long flags;
 
        if (!irq_ptr)
                return -ENODEV;
 
        WARN_ON_ONCE(irqs_disabled());
-       DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
+       ccw_device_get_schid(cdev, &schid);
+       DBF_EVENT("qshutdown:%4x", schid.sch_no);
 
        mutex_lock(&irq_ptr->setup_mutex);
        /*
@@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
        qdio_shutdown_debug_entries(irq_ptr);
 
        /* cleanup subchannel */
-       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+       spin_lock_irq(get_ccwdev_lock(cdev));
 
        if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
                rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
@@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
        }
 
        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
-       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+       spin_unlock_irq(get_ccwdev_lock(cdev));
        wait_event_interruptible_timeout(cdev->private->wait_q,
                irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
                irq_ptr->state == QDIO_IRQ_STATE_ERR,
                10 * HZ);
-       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+       spin_lock_irq(get_ccwdev_lock(cdev));
 
 no_cleanup:
        qdio_shutdown_thinint(irq_ptr);
@@ -1211,7 +1216,7 @@ no_cleanup:
        /* restore interrupt handler */
        if ((void *)cdev->handler == (void *)qdio_int_handler)
                cdev->handler = irq_ptr->orig_handler;
-       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+       spin_unlock_irq(get_ccwdev_lock(cdev));
 
        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
        mutex_unlock(&irq_ptr->setup_mutex);
@@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown);
 int qdio_free(struct ccw_device *cdev)
 {
        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+       struct subchannel_id schid;
 
        if (!irq_ptr)
                return -ENODEV;
 
-       DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+       ccw_device_get_schid(cdev, &schid);
+       DBF_EVENT("qfree:%4x", schid.sch_no);
        DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
        mutex_lock(&irq_ptr->setup_mutex);
 
@@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free);
  */
 int qdio_allocate(struct qdio_initialize *init_data)
 {
+       struct subchannel_id schid;
        struct qdio_irq *irq_ptr;
 
-       DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
+       ccw_device_get_schid(init_data->cdev, &schid);
+       DBF_EVENT("qallocate:%4x", schid.sch_no);
 
        if ((init_data->no_input_qs && !init_data->input_handler) ||
            (init_data->no_output_qs && !init_data->output_handler))
@@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
  */
 int qdio_establish(struct qdio_initialize *init_data)
 {
-       struct qdio_irq *irq_ptr;
        struct ccw_device *cdev = init_data->cdev;
-       unsigned long saveflags;
+       struct subchannel_id schid;
+       struct qdio_irq *irq_ptr;
        int rc;
 
-       DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
+       ccw_device_get_schid(cdev, &schid);
+       DBF_EVENT("qestablish:%4x", schid.sch_no);
 
        irq_ptr = cdev->private->qdio_data;
        if (!irq_ptr)
                return -ENODEV;
 
-       if (cdev->private->state != DEV_STATE_ONLINE)
-               return -EINVAL;
-
        mutex_lock(&irq_ptr->setup_mutex);
        qdio_setup_irq(init_data);
 
@@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data)
        irq_ptr->ccw.count = irq_ptr->equeue.count;
        irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
 
-       spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+       spin_lock_irq(get_ccwdev_lock(cdev));
        ccw_device_set_options_mask(cdev, 0);
 
        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+       spin_unlock_irq(get_ccwdev_lock(cdev));
        if (rc) {
                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
                DBF_ERROR("rc:%4x", rc);
-       }
-       spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
-
-       if (rc) {
                mutex_unlock(&irq_ptr->setup_mutex);
                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
                return rc;
@@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish);
  */
 int qdio_activate(struct ccw_device *cdev)
 {
+       struct subchannel_id schid;
        struct qdio_irq *irq_ptr;
        int rc;
-       unsigned long saveflags;
 
-       DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
+       ccw_device_get_schid(cdev, &schid);
+       DBF_EVENT("qactivate:%4x", schid.sch_no);
 
        irq_ptr = cdev->private->qdio_data;
        if (!irq_ptr)
                return -ENODEV;
 
-       if (cdev->private->state != DEV_STATE_ONLINE)
-               return -EINVAL;
-
        mutex_lock(&irq_ptr->setup_mutex);
        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
                rc = -EBUSY;
@@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev)
        irq_ptr->ccw.count = irq_ptr->aqueue.count;
        irq_ptr->ccw.cda = 0;
 
-       spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+       spin_lock_irq(get_ccwdev_lock(cdev));
        ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
 
        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
                              0, DOIO_DENY_PREFETCH);
+       spin_unlock_irq(get_ccwdev_lock(cdev));
        if (rc) {
                DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
                DBF_ERROR("rc:%4x", rc);
-       }
-       spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
-
-       if (rc)
                goto out;
+       }
 
        if (is_thinint_irq(irq_ptr))
                tiqdio_add_input_queues(irq_ptr);
@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
 
        /* in case of SIGA errors we must process the error immediately */
        if (used >= q->u.out.scan_threshold || rc)
-               tasklet_schedule(&q->tasklet);
+               qdio_tasklet_schedule(q);
        else
                /* free the SBALs in case of no further traffic */
-               if (!timer_pending(&q->u.out.timer))
+               if (!timer_pending(&q->u.out.timer) &&
+                   likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
                        mod_timer(&q->u.out.timer, jiffies + HZ);
        return rc;
 }
index bf40063de202ff8280ce7503f103ce3c5db349b9..6d4b68c483f3dffed55d4f8508564d6a2bf9bef2 100644 (file)
@@ -999,6 +999,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 __u16, __u16,
                                                 enum qeth_prot_versions);
 int qeth_set_features(struct net_device *, netdev_features_t);
+int qeth_recover_features(struct net_device *);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
 
 /* exports for OSN */
index 7dba6c8537a16c18e550f311361b3bdf6231bd38..20cf29613043a7f87a29c444ff2be88d87ae3a53 100644 (file)
@@ -3619,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
                int e;
 
                e = 0;
-               while (buffer->element[e].addr) {
+               while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
+                      buffer->element[e].addr) {
                        unsigned long phys_aob_addr;
 
                        phys_aob_addr = (unsigned long) buffer->element[e].addr;
@@ -6131,6 +6132,35 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
        return rc;
 }
 
+/* try to restore device features on a device after recovery */
+int qeth_recover_features(struct net_device *dev)
+{
+       struct qeth_card *card = dev->ml_priv;
+       netdev_features_t recover = dev->features;
+
+       if (recover & NETIF_F_IP_CSUM) {
+               if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM))
+                       recover ^= NETIF_F_IP_CSUM;
+       }
+       if (recover & NETIF_F_RXCSUM) {
+               if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM))
+                       recover ^= NETIF_F_RXCSUM;
+       }
+       if (recover & NETIF_F_TSO) {
+               if (qeth_set_ipa_tso(card, 1))
+                       recover ^= NETIF_F_TSO;
+       }
+
+       if (recover == dev->features)
+               return 0;
+
+       dev_warn(&card->gdev->dev,
+                "Device recovery failed to restore all offload features\n");
+       dev->features = recover;
+       return -EIO;
+}
+EXPORT_SYMBOL_GPL(qeth_recover_features);
+
 int qeth_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct qeth_card *card = dev->ml_priv;
index 7bc20c5188bc67f32a2f5ace493a5827d216c8e6..bb27058fa9f089bdb61a7f4727148b2d8a9bdeed 100644 (file)
@@ -1124,14 +1124,11 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
                        card->dev->hw_features |= NETIF_F_RXCSUM;
                        card->dev->vlan_features |= NETIF_F_RXCSUM;
                }
-               /* Turn on SG per default */
-               card->dev->features |= NETIF_F_SG;
        }
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
        card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
                                  PAGE_SIZE;
-       card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
@@ -1246,6 +1243,9 @@ contin:
                }
                /* this also sets saved unicast addresses */
                qeth_l2_set_rx_mode(card->dev);
+               rtnl_lock();
+               qeth_recover_features(card->dev);
+               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
index 72934666fedf0193bef37fcc5378127ed2a540c7..272d9e7419be31f5f19297839f04f8fce88f1eac 100644 (file)
@@ -257,6 +257,11 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
        if (addr->in_progress)
                return -EINPROGRESS;
 
+       if (!qeth_card_hw_is_reachable(card)) {
+               addr->disp_flag = QETH_DISP_ADDR_DELETE;
+               return 0;
+       }
+
        rc = qeth_l3_deregister_addr_entry(card, addr);
 
        hash_del(&addr->hnode);
@@ -296,6 +301,11 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
                hash_add(card->ip_htable, &addr->hnode,
                                qeth_l3_ipaddr_hash(addr));
 
+               if (!qeth_card_hw_is_reachable(card)) {
+                       addr->disp_flag = QETH_DISP_ADDR_ADD;
+                       return 0;
+               }
+
                /* qeth_l3_register_addr_entry can go to sleep
                 * if we add a IPV4 addr. It is caused by the reason
                 * that SETIP ipa cmd starts ARP staff for IPV4 addr.
@@ -390,12 +400,16 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
        int i;
        int rc;
 
-       QETH_CARD_TEXT(card, 4, "recoverip");
+       QETH_CARD_TEXT(card, 4, "recovrip");
 
        spin_lock_bh(&card->ip_lock);
 
        hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
-               if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+               if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
+                       qeth_l3_deregister_addr_entry(card, addr);
+                       hash_del(&addr->hnode);
+                       kfree(addr);
+               } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
                        if (addr->proto == QETH_PROT_IPV4) {
                                addr->in_progress = 1;
                                spin_unlock_bh(&card->ip_lock);
@@ -407,10 +421,8 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
 
                        if (!rc) {
                                addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
-                               if (addr->ref_counter < 1) {
+                               if (addr->ref_counter < 1)
                                        qeth_l3_delete_ip(card, addr);
-                                       kfree(addr);
-                               }
                        } else {
                                hash_del(&addr->hnode);
                                kfree(addr);
@@ -689,7 +701,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 
        spin_lock_bh(&card->ip_lock);
 
-       if (!qeth_l3_ip_from_hash(card, ipaddr))
+       if (qeth_l3_ip_from_hash(card, ipaddr))
                rc = -EEXIST;
        else
                qeth_l3_add_ip(card, ipaddr);
@@ -757,7 +769,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 
        spin_lock_bh(&card->ip_lock);
 
-       if (!qeth_l3_ip_from_hash(card, ipaddr))
+       if (qeth_l3_ip_from_hash(card, ipaddr))
                rc = -EEXIST;
        else
                qeth_l3_add_ip(card, ipaddr);
@@ -3108,7 +3120,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                card->dev->vlan_features = NETIF_F_SG |
                                        NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
                                        NETIF_F_TSO;
-                               card->dev->features = NETIF_F_SG;
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3136,7 +3147,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
        netif_keep_dst(card->dev);
        card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
                                  PAGE_SIZE;
-       card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
@@ -3269,6 +3279,7 @@ contin:
                else
                        dev_open(card->dev);
                qeth_l3_set_multicast_list(card->dev);
+               qeth_recover_features(card->dev);
                rtnl_unlock();
        }
        qeth_trace_features(card);
index 65645b11fc19763295c52fa96fd49608c465ed78..0e00a5ce0f000f292ffe77584b031b97ba12f553 100644 (file)
@@ -297,7 +297,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                addr->u.a6.pfxlen = 0;
                addr->type = QETH_IP_TYPE_NORMAL;
 
+               spin_lock_bh(&card->ip_lock);
                qeth_l3_delete_ip(card, addr);
+               spin_unlock_bh(&card->ip_lock);
                kfree(addr);
        }
 
@@ -329,7 +331,10 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                addr->type = QETH_IP_TYPE_NORMAL;
        } else
                return -ENOMEM;
+
+       spin_lock_bh(&card->ip_lock);
        qeth_l3_add_ip(card, addr);
+       spin_unlock_bh(&card->ip_lock);
        kfree(addr);
 
        return count;
index 241891a57caf8e97637d3e6c2ce6baecd021589a..df40692a9011ceb2cb2481af2eaa58a9ff92136e 100644 (file)
@@ -6,4 +6,8 @@
 # it under the terms of the GNU General Public License (version 2 only)
 # as published by the Free Software Foundation.
 
-obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o
+s390-virtio-objs := virtio_ccw.o
+ifdef CONFIG_S390_GUEST_OLD_TRANSPORT
+s390-virtio-objs += kvm_virtio.o
+endif
+obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
index 1d060fd293a3b8e8a4d4095b2ad84241913272d5..5e5c11f37b2420cbb406ff5591ad15fe615f5ed8 100644 (file)
@@ -458,6 +458,8 @@ static int __init kvm_devices_init(void)
        if (test_devices_support(total_memory_size) < 0)
                return -ENODEV;
 
+       pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n");
+
        rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
        if (rc)
                return rc;
@@ -482,7 +484,7 @@ static int __init kvm_devices_init(void)
 }
 
 /* code for early console output with virtio_console */
-static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+static int early_put_chars(u32 vtermno, const char *buf, int count)
 {
        char scratch[17];
        unsigned int len = count;
index b381b3718a98f5d484b1769fc4ef727f5fd4c0bd..5648b715fed9c2d4e448c9f477953ecb83e94641 100644 (file)
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
        struct fib *fibptr;
        struct hw_fib * hw_fib = (struct hw_fib *)0;
        dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
-       unsigned size;
+       unsigned int size, osize;
        int retval;
 
        if (dev->in_reset) {
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
         *      will not overrun the buffer when we copy the memory. Return
         *      an error if we would.
         */
-       size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
+       osize = size = le16_to_cpu(kfib->header.Size) +
+               sizeof(struct aac_fibhdr);
        if (size < le16_to_cpu(kfib->header.SenderSize))
                size = le16_to_cpu(kfib->header.SenderSize);
        if (size > dev->max_fib_size) {
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
                goto cleanup;
        }
 
+       /* Sanity check the second copy */
+       if ((osize != le16_to_cpu(kfib->header.Size) +
+               sizeof(struct aac_fibhdr))
+               || (size < le16_to_cpu(kfib->header.SenderSize))) {
+               retval = -EINVAL;
+               goto cleanup;
+       }
+
        if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
                aac_adapter_interrupt(dev);
                /*
index 83458f7a28248045bdca6f1b5603fac7ce6a84f2..6dc96c8dfe75c229e522db72b587bd1af5852686 100644 (file)
@@ -361,8 +361,9 @@ static const char * const snstext[] = {
 
 /* Get sense key string or NULL if not available */
 const char *
-scsi_sense_key_string(unsigned char key) {
-       if (key <= 0xE)
+scsi_sense_key_string(unsigned char key)
+{
+       if (key < ARRAY_SIZE(snstext))
                return snstext[key];
        return NULL;
 }
index a569c65f22b18fbc247c1ab8c0e5c361328ccacb..dcf36537a767c72d9a2e956115e09f7d5f82dde9 100644 (file)
@@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
        mutex_unlock(&fip->ctlr_mutex);
 
 drop:
-       kfree(skb);
+       kfree_skb(skb);
        return rc;
 }
 
index bf85974be8621e16a130ffd711e18697231da3da..17d04c702e1ba13a1642d36a1de98fb7831665eb 100644 (file)
@@ -10410,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
                __ipr_remove(pdev);
                return rc;
        }
+       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+       ioa_cfg->scan_enabled = 1;
+       schedule_work(&ioa_cfg->work_q);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 
-       scsi_scan_host(ioa_cfg->host);
        ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
 
        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
@@ -10421,10 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
                }
        }
 
-       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-       ioa_cfg->scan_enabled = 1;
-       schedule_work(&ioa_cfg->work_q);
-       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+       scsi_scan_host(ioa_cfg->host);
+
        return 0;
 }
 
index 2dab3dc2aa69bf44c4f92a3cd539314711964081..c1ed25adb17ec1f11b039854ffa62e590032f9d6 100644 (file)
@@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
        /* Find first memory bar */
        bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
        instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
-       if (pci_request_selected_regions(instance->pdev, instance->bar,
+       if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
                                         "megasas: LSI")) {
                dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
                return -EBUSY;
@@ -5339,7 +5339,7 @@ fail_ready_state:
        iounmap(instance->reg_set);
 
       fail_ioremap:
-       pci_release_selected_regions(instance->pdev, instance->bar);
+       pci_release_selected_regions(instance->pdev, 1<<instance->bar);
 
        return -EINVAL;
 }
@@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
 
        iounmap(instance->reg_set);
 
-       pci_release_selected_regions(instance->pdev, instance->bar);
+       pci_release_selected_regions(instance->pdev, 1<<instance->bar);
 }
 
 /**
index ec837544f78479e9fad9b08070aca968b448706d..52d8bbf7feb5c50efe361aa2e57f30277f38a0df 100644 (file)
@@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance)
 
        iounmap(instance->reg_set);
 
-       pci_release_selected_regions(instance->pdev, instance->bar);
+       pci_release_selected_regions(instance->pdev, 1<<instance->bar);
 }
 
 /**
index 751f13edece010177d162995e4f8b344722560dc..750f82c339d4d6c1d07a11d1bba7d14aa0a56b10 100644 (file)
@@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
        } else
                ioc->msix96_vector = 0;
 
+       if (ioc->is_warpdrive) {
+               ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+                   &ioc->chip->ReplyPostHostIndex;
+
+               for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+                       ioc->reply_post_host_index[i] =
+                       (resource_size_t __iomem *)
+                       ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+                       * 4)));
+       }
+
        list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
                pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
                    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        if (r)
                goto out_free_resources;
 
-       if (ioc->is_warpdrive) {
-               ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
-                   &ioc->chip->ReplyPostHostIndex;
-
-               for (i = 1; i < ioc->cpu_msix_table_sz; i++)
-                       ioc->reply_post_host_index[i] =
-                       (resource_size_t __iomem *)
-                       ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
-                       * 4)));
-       }
-
        pci_set_drvdata(ioc->pdev, ioc->shost);
        r = _base_get_ioc_facts(ioc, CAN_SLEEP);
        if (r)
index eaccd651ccda0d239af91ebfb6dfdbd97ac340e3..2464569253350b61fef59ed0583e8b4cd1a25944 100644 (file)
@@ -246,6 +246,10 @@ static struct {
        {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
        {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
        {"SONY", "TSL", NULL, BLIST_FORCELUN},          /* DDS3 & DDS4 autoloaders */
index 3f0ff072184b648ad383831413cc80213e5f88fc..60b651bfaa01e91456a1c294704cb2193305aec8 100644 (file)
@@ -340,22 +340,6 @@ static int do_sas_phy_delete(struct device *dev, void *data)
        return 0;
 }
 
-/**
- * is_sas_attached - check if device is SAS attached
- * @sdev: scsi device to check
- *
- * returns true if the device is SAS attached
- */
-int is_sas_attached(struct scsi_device *sdev)
-{
-       struct Scsi_Host *shost = sdev->host;
-
-       return shost->transportt->host_attrs.ac.class ==
-               &sas_host_class.class;
-}
-EXPORT_SYMBOL(is_sas_attached);
-
-
 /**
  * sas_remove_children  -  tear down a devices SAS data structures
  * @dev:       device belonging to the sas object
index 53ef1cb6418e33f0ce9542468359d2ad0297444d..8c9a35c91705e42fcbc07e3721d0522f96d496dc 100644 (file)
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
 
        ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
 
-       if (is_sas_attached(sdev))
+       if (scsi_is_sas_rphy(&sdev->sdev_gendev))
                efd.addr = sas_get_address(sdev);
 
        if (efd.addr) {
@@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
        if (!edev)
                return;
 
+       enclosure_unregister(edev);
+
        ses_dev = edev->scratch;
        edev->scratch = NULL;
 
@@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
        kfree(edev->component[0].scratch);
 
        put_device(&edev->edev);
-       enclosure_unregister(edev);
 }
 
 static void ses_intf_remove(struct device *cdev,
index e3da1a2fdb6664597cc7e639e5a8d61da5a13fd1..2a9da2e0ea6baf7448be18120191ec7a4041aaa4 100644 (file)
@@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev)
        scsi_host_put(sh);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = {
+static const struct pci_device_id wd719x_pci_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
        {}
 };
index 823cbc92d1e75447b1487158ac097e1de3d50470..7a37090dabbea55308b555d03f19dd56c4c3c15c 100644 (file)
@@ -720,8 +720,6 @@ static int img_spfi_remove(struct platform_device *pdev)
                clk_disable_unprepare(spfi->sys_clk);
        }
 
-       spi_master_put(master);
-
        return 0;
 }
 
index 0be89e052428fe7185f56409cd9ac8282d29b036..899d7a8f0889eaf39388ad458b28a1abbeeba671 100644 (file)
@@ -685,7 +685,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
        mtk_spi_reset(mdata);
-       spi_master_put(master);
 
        return 0;
 }
index f3df522db93bac9fec31dc5f27ebcdddda57c87a..58d2d48e16a530869528288d50f3a6581c3ea311 100644 (file)
@@ -214,6 +214,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
                return PTR_ERR(ssp->clk);
 
        memset(&pi, 0, sizeof(pi));
+       pi.fwnode = dev->dev.fwnode;
        pi.parent = &dev->dev;
        pi.name = "pxa2xx-spi";
        pi.id = ssp->port_id;
index c338ef1136f6c6052b72b9394f74ef89b58273a5..7f1555621f8ec262459a0ebd65ffb680b24fbae8 100644 (file)
@@ -1030,7 +1030,6 @@ static int spi_qup_remove(struct platform_device *pdev)
 
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       spi_master_put(master);
 
        return 0;
 }
index 0f83ad1d5a5858dd6ff808773fe359479a54b796..1de3a772eb7d23a8a90b9b77704033ac4945e364 100644 (file)
@@ -262,6 +262,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 
        for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
                brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+               /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+               if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
+                       continue;
                if (brps <= 32) /* max of brdv is 32 */
                        break;
        }
index 51ad42fad567913339ab8d9deaf531b779865a87..200ca228d8851980d5e755eed5a82c6df4c6dc9b 100644 (file)
@@ -960,7 +960,7 @@ static int spi_transfer_one_message(struct spi_master *master,
        struct spi_transfer *xfer;
        bool keep_cs = false;
        int ret = 0;
-       unsigned long ms = 1;
+       unsigned long long ms = 1;
        struct spi_statistics *statm = &master->statistics;
        struct spi_statistics *stats = &msg->spi->statistics;
 
@@ -991,9 +991,13 @@ static int spi_transfer_one_message(struct spi_master *master,
 
                        if (ret > 0) {
                                ret = 0;
-                               ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+                               ms = 8LL * 1000LL * xfer->len;
+                               do_div(ms, xfer->speed_hz);
                                ms += ms + 100; /* some tolerance */
 
+                               if (ms > UINT_MAX)
+                                       ms = UINT_MAX;
+
                                ms = wait_for_completion_timeout(&master->xfer_completion,
                                                                 msecs_to_jiffies(ms));
                        }
@@ -1159,6 +1163,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
                if (ret < 0) {
                        dev_err(&master->dev, "Failed to power device: %d\n",
                                ret);
+                       mutex_unlock(&master->io_mutex);
                        return;
                }
        }
@@ -1174,6 +1179,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 
                        if (master->auto_runtime_pm)
                                pm_runtime_put(master->dev.parent);
+                       mutex_unlock(&master->io_mutex);
                        return;
                }
        }
index 4c5a85595a85b7d6339804d20a80a42b0aa5eabf..961a94bf156c01716beebf4af2d4a709bcb4016e 100644 (file)
@@ -135,10 +135,16 @@ static void sync_print_sync_file(struct seq_file *s,
        int i;
 
        seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
-                  sync_status_str(atomic_read(&sync_file->status)));
+                  sync_status_str(!fence_is_signaled(sync_file->fence)));
 
-       for (i = 0; i < sync_file->num_fences; ++i)
-               sync_print_fence(s, sync_file->cbs[i].fence, true);
+       if (fence_is_array(sync_file->fence)) {
+               struct fence_array *array = to_fence_array(sync_file->fence);
+
+               for (i = 0; i < array->num_fences; ++i)
+                       sync_print_fence(s, array->fences[i], true);
+       } else {
+               sync_print_fence(s, sync_file->fence, true);
+       }
 }
 
 static int sync_debugfs_show(struct seq_file *s, void *unused)
index d7dd1e55e347be5f3037d33ba81e343c18e13521..9f525ff7290c69b764503a232fcd9d7a7de338b3 100644 (file)
@@ -196,6 +196,7 @@ static int pci1760_pwm_ns_to_div(unsigned int flags, unsigned int ns)
                break;
        case CMDF_ROUND_DOWN:
                divisor = ns / PCI1760_PWM_TIMEBASE;
+               break;
        default:
                return -EINVAL;
        }
index 4ab186669f0c27d503a3e33b6e3bdd5fb0e12c7d..ec5b9a23494d4b28538f29e2efddcb199485c502 100644 (file)
 
 #define N_CHANS 8
 
-enum waveform_state_bits {
-       WAVEFORM_AI_RUNNING,
-       WAVEFORM_AO_RUNNING
-};
-
 /* Data unique to this driver */
 struct waveform_private {
        struct timer_list ai_timer;     /* timer for AI commands */
@@ -68,7 +63,6 @@ struct waveform_private {
        unsigned int wf_amplitude;      /* waveform amplitude in microvolts */
        unsigned int wf_period;         /* waveform period in microseconds */
        unsigned int wf_current;        /* current time in waveform period */
-       unsigned long state_bits;
        unsigned int ai_scan_period;    /* AI scan period in usec */
        unsigned int ai_convert_period; /* AI conversion period in usec */
        struct timer_list ao_timer;     /* timer for AO commands */
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg)
        unsigned int nsamples;
        unsigned int time_increment;
 
-       /* check command is still active */
-       if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
-               return;
-
        now = ktime_to_us(ktime_get());
        nsamples = comedi_nsamples_left(s, UINT_MAX);
 
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
         */
        devpriv->ai_timer.expires =
                jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
-
-       /* mark command as active */
-       smp_mb__before_atomic();
-       set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
        add_timer(&devpriv->ai_timer);
        return 0;
 }
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
 {
        struct waveform_private *devpriv = dev->private;
 
-       /* mark command as no longer active */
-       clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
-       /* cannot call del_timer_sync() as may be called from timer routine */
-       del_timer(&devpriv->ai_timer);
+       if (in_softirq()) {
+               /* Assume we were called from the timer routine itself. */
+               del_timer(&devpriv->ai_timer);
+       } else {
+               del_timer_sync(&devpriv->ai_timer);
+       }
        return 0;
 }
 
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg)
        u64 scans_since;
        unsigned int scans_avail = 0;
 
-       /* check command is still active */
-       if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits))
-               return;
-
        /* determine number of scan periods since last time */
        now = ktime_to_us(ktime_get());
        scans_since = now - devpriv->ao_last_scan_time;
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
        devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
        devpriv->ao_timer.expires =
                jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
-
-       /* mark command as active */
-       smp_mb__before_atomic();
-       set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
        add_timer(&devpriv->ao_timer);
 
        return 1;
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev,
        struct waveform_private *devpriv = dev->private;
 
        s->async->inttrig = NULL;
-       /* mark command as no longer active */
-       clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
-       /* cannot call del_timer_sync() as may be called from timer routine */
-       del_timer(&devpriv->ao_timer);
+       if (in_softirq()) {
+               /* Assume we were called from the timer routine itself. */
+               del_timer(&devpriv->ao_timer);
+       } else {
+               del_timer_sync(&devpriv->ao_timer);
+       }
        return 0;
 }
 
index 65daef0c00d5209c61e2a59d20338ed2afe7c198..0f4eb954aa80fcbef0f7e4f5a9be4a4e46dd03f6 100644 (file)
@@ -634,7 +634,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
        const struct daq200_boardtype *board;
        int i;
 
-       if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
+       if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
                return NULL;
 
        for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
index 904f637797b6300de12acc98404d9b9d18ca3a43..8bbd938143408a70b1b364f606f4df6768ab6f00 100644 (file)
@@ -588,8 +588,8 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
        s = &dev->subdevices[0];
        s->type         = COMEDI_SUBD_AI;
        s->subdev_flags = SDF_READABLE |
-                         (it->options[2] == 1) ? SDF_DIFF :
-                         (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND;
+                         ((it->options[2] == 1) ? SDF_DIFF :
+                          (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND);
        s->n_chan       = (it->options[2] == 1) ? 8 : 16;
        s->maxdata      = 0x0fff;
        s->range_table  = board->is_pgh ? &dt2811_pgh_ai_ranges
index 8dabb19519a5ac6475e13dcf30c058842312c11a..0f97d7b611d720b7fd774ab102adfd3a607494fa 100644 (file)
@@ -2772,7 +2772,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
        int i;
        static const int timeout = 1000;
 
-       if (trig_num != cmd->start_arg)
+       /*
+        * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+        * For backwards compatibility, also allow trig_num == 0 when
+        * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+        * in that case, the internal trigger is being used as a pre-trigger
+        * before the external trigger.
+        */
+       if (!(trig_num == cmd->start_arg ||
+             (trig_num == 0 && cmd->start_src != TRIG_INT)))
                return -EINVAL;
 
        /*
@@ -5480,7 +5488,7 @@ static int ni_E_init(struct comedi_device *dev,
                s->maxdata      = (devpriv->is_m_series) ? 0xffffffff
                                                         : 0x00ffffff;
                s->insn_read    = ni_tio_insn_read;
-               s->insn_write   = ni_tio_insn_read;
+               s->insn_write   = ni_tio_insn_write;
                s->insn_config  = ni_tio_insn_config;
 #ifdef PCIDMA
                if (dev->irq && devpriv->mite) {
index 170ac980abcb84f1c4079b99187d6a18262ca1a4..24c348d2f5bb03f7fd7b3f1c8e8ec5c8d4b6e54b 100644 (file)
@@ -419,6 +419,7 @@ static ssize_t ad5933_store(struct device *dev,
        mutex_lock(&indio_dev->mlock);
        switch ((u32)this_attr->address) {
        case AD5933_OUT_RANGE:
+               ret = -EINVAL;
                for (i = 0; i < 4; i++)
                        if (val == st->range_avail[i]) {
                                st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
@@ -426,7 +427,6 @@ static ssize_t ad5933_store(struct device *dev,
                                ret = ad5933_cmd(st, 0);
                                break;
                        }
-               ret = -EINVAL;
                break;
        case AD5933_IN_PGA_GAIN:
                if (sysfs_streq(buf, "1")) {
index 3664bfd0178b34b09c1791cfd1190f6b9c035ea2..2c4dc69731e8586e9cd0ae7f2afe8675715479e7 100644 (file)
@@ -388,6 +388,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
        struct inode *inode = NULL;
        __u64 bits = 0;
        int rc = 0;
+       struct dentry *alias;
 
        /* NB 1 request reference will be taken away by ll_intent_lock()
         * when I return
@@ -412,26 +413,12 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
                 */
        }
 
-       /* Only hash *de if it is unhashed (new dentry).
-        * Atoimc_open may passing hashed dentries for open.
-        */
-       if (d_unhashed(*de)) {
-               struct dentry *alias;
-
-               alias = ll_splice_alias(inode, *de);
-               if (IS_ERR(alias)) {
-                       rc = PTR_ERR(alias);
-                       goto out;
-               }
-               *de = alias;
-       } else if (!it_disposition(it, DISP_LOOKUP_NEG)  &&
-                  !it_disposition(it, DISP_OPEN_CREATE)) {
-               /* With DISP_OPEN_CREATE dentry will be
-                * instantiated in ll_create_it.
-                */
-               LASSERT(!d_inode(*de));
-               d_instantiate(*de, inode);
+       alias = ll_splice_alias(inode, *de);
+       if (IS_ERR(alias)) {
+               rc = PTR_ERR(alias);
+               goto out;
        }
+       *de = alias;
 
        if (!it_disposition(it, DISP_LOOKUP_NEG)) {
                /* we have lookup look - unhide dentry */
@@ -587,6 +574,24 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
               dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
               *opened);
 
+       /* Only negative dentries enter here */
+       LASSERT(!d_inode(dentry));
+
+       if (!d_in_lookup(dentry)) {
+               /* A valid negative dentry that just passed revalidation,
+                * there's little point to try and open it server-side,
+                * even though there's a minuscle chance it might succeed.
+                * Either way it's a valid race to just return -ENOENT here.
+                */
+               if (!(open_flags & O_CREAT))
+                       return -ENOENT;
+
+               /* Otherwise we just unhash it to be rehashed afresh via
+                * lookup if necessary
+                */
+               d_drop(dentry);
+       }
+
        it = kzalloc(sizeof(*it), GFP_NOFS);
        if (!it)
                return -ENOMEM;
index a10d4f82b95434e565cfddf7e822b9e78bfc25f3..13224694a8aef5e8bf5f48dc57cb40e9b70ae6b0 100644 (file)
@@ -12,6 +12,7 @@ Hopefully this will happen later in 2016.
 
 Other TODOs:
 
+- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
 - Add a flag to inhibit passing CEC RC messages to the rc subsystem.
   Applications should be able to choose this when calling S_LOG_ADDRS.
 - If the reply field of cec_msg is set then when the reply arrives it
index b2393bbacb261551f16ecd2efd44cbfe18220fdd..946986f3ac0d7538ed920ad4a519534a1c2624d5 100644 (file)
@@ -124,10 +124,10 @@ static void cec_queue_event(struct cec_adapter *adap,
        u64 ts = ktime_get_ns();
        struct cec_fh *fh;
 
-       mutex_lock(&adap->devnode.fhs_lock);
+       mutex_lock(&adap->devnode.lock);
        list_for_each_entry(fh, &adap->devnode.fhs, list)
                cec_queue_event_fh(fh, ev, ts);
-       mutex_unlock(&adap->devnode.fhs_lock);
+       mutex_unlock(&adap->devnode.lock);
 }
 
 /*
@@ -191,12 +191,12 @@ static void cec_queue_msg_monitor(struct cec_adapter *adap,
        u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
                                      CEC_MODE_MONITOR_ALL;
 
-       mutex_lock(&adap->devnode.fhs_lock);
+       mutex_lock(&adap->devnode.lock);
        list_for_each_entry(fh, &adap->devnode.fhs, list) {
                if (fh->mode_follower >= monitor_mode)
                        cec_queue_msg_fh(fh, msg);
        }
-       mutex_unlock(&adap->devnode.fhs_lock);
+       mutex_unlock(&adap->devnode.lock);
 }
 
 /*
@@ -207,12 +207,12 @@ static void cec_queue_msg_followers(struct cec_adapter *adap,
 {
        struct cec_fh *fh;
 
-       mutex_lock(&adap->devnode.fhs_lock);
+       mutex_lock(&adap->devnode.lock);
        list_for_each_entry(fh, &adap->devnode.fhs, list) {
                if (fh->mode_follower == CEC_MODE_FOLLOWER)
                        cec_queue_msg_fh(fh, msg);
        }
-       mutex_unlock(&adap->devnode.fhs_lock);
+       mutex_unlock(&adap->devnode.lock);
 }
 
 /* Notify userspace of an adapter state change. */
@@ -851,6 +851,9 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
        if (!valid_la || msg->len <= 1)
                return;
 
+       if (adap->log_addrs.log_addr_mask == 0)
+               return;
+
        /*
         * Process the message on the protocol level. If is_reply is true,
         * then cec_receive_notify() won't pass on the reply to the listener(s)
@@ -1047,11 +1050,17 @@ static int cec_config_thread_func(void *arg)
                        dprintk(1, "could not claim LA %d\n", i);
        }
 
+       if (adap->log_addrs.log_addr_mask == 0 &&
+           !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
+               goto unconfigure;
+
 configured:
        if (adap->log_addrs.log_addr_mask == 0) {
                /* Fall back to unregistered */
                las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
                las->log_addr_mask = 1 << las->log_addr[0];
+               for (i = 1; i < las->num_log_addrs; i++)
+                       las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        }
        adap->is_configured = true;
        adap->is_configuring = false;
@@ -1070,6 +1079,8 @@ configured:
                        cec_report_features(adap, i);
                cec_report_phys_addr(adap, i);
        }
+       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        mutex_lock(&adap->lock);
        adap->kthread_config = NULL;
        mutex_unlock(&adap->lock);
@@ -1398,7 +1409,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
        u8 init_laddr = cec_msg_initiator(msg);
        u8 devtype = cec_log_addr2dev(adap, dest_laddr);
        int la_idx = cec_log_addr2idx(adap, dest_laddr);
-       bool is_directed = la_idx >= 0;
        bool from_unregistered = init_laddr == 0xf;
        struct cec_msg tx_cec_msg = { };
 
@@ -1560,7 +1570,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
                 * Unprocessed messages are aborted if userspace isn't doing
                 * any processing either.
                 */
-               if (is_directed && !is_reply && !adap->follower_cnt &&
+               if (!is_broadcast && !is_reply && !adap->follower_cnt &&
                    !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
                        return cec_feature_abort(adap, msg);
                break;
index 7be7615a0fdf0e865b3d0433d731f8d1089efe0d..e274e2f223986bc4ed075447c9a17186b5de0dae 100644 (file)
@@ -162,7 +162,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
                return -ENOTTY;
        if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
                return -EFAULT;
-       log_addrs.flags = 0;
+       log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
        mutex_lock(&adap->lock);
        if (!adap->is_configuring &&
            (!log_addrs.num_log_addrs || !adap->is_configured) &&
@@ -435,7 +435,7 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        void __user *parg = (void __user *)arg;
 
        if (!devnode->registered)
-               return -EIO;
+               return -ENODEV;
 
        switch (cmd) {
        case CEC_ADAP_G_CAPS:
@@ -508,14 +508,14 @@ static int cec_open(struct inode *inode, struct file *filp)
 
        filp->private_data = fh;
 
-       mutex_lock(&devnode->fhs_lock);
+       mutex_lock(&devnode->lock);
        /* Queue up initial state events */
        ev_state.state_change.phys_addr = adap->phys_addr;
        ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
        cec_queue_event_fh(fh, &ev_state, 0);
 
        list_add(&fh->list, &devnode->fhs);
-       mutex_unlock(&devnode->fhs_lock);
+       mutex_unlock(&devnode->lock);
 
        return 0;
 }
@@ -540,9 +540,9 @@ static int cec_release(struct inode *inode, struct file *filp)
                cec_monitor_all_cnt_dec(adap);
        mutex_unlock(&adap->lock);
 
-       mutex_lock(&devnode->fhs_lock);
+       mutex_lock(&devnode->lock);
        list_del(&fh->list);
-       mutex_unlock(&devnode->fhs_lock);
+       mutex_unlock(&devnode->lock);
 
        /* Unhook pending transmits from this filehandle. */
        mutex_lock(&adap->lock);
index 112a5fae12f5dd1c11f592c1e3a087d3dc45e84c..3b1e4d2b190d1d01aa44e525bff57bd9ac905047 100644 (file)
@@ -51,31 +51,29 @@ int cec_get_device(struct cec_devnode *devnode)
 {
        /*
         * Check if the cec device is available. This needs to be done with
-        * the cec_devnode_lock held to prevent an open/unregister race:
+        * the devnode->lock held to prevent an open/unregister race:
         * without the lock, the device could be unregistered and freed between
         * the devnode->registered check and get_device() calls, leading to
         * a crash.
         */
-       mutex_lock(&cec_devnode_lock);
+       mutex_lock(&devnode->lock);
        /*
         * return ENXIO if the cec device has been removed
         * already or if it is not registered anymore.
         */
        if (!devnode->registered) {
-               mutex_unlock(&cec_devnode_lock);
+               mutex_unlock(&devnode->lock);
                return -ENXIO;
        }
        /* and increase the device refcount */
        get_device(&devnode->dev);
-       mutex_unlock(&cec_devnode_lock);
+       mutex_unlock(&devnode->lock);
        return 0;
 }
 
 void cec_put_device(struct cec_devnode *devnode)
 {
-       mutex_lock(&cec_devnode_lock);
        put_device(&devnode->dev);
-       mutex_unlock(&cec_devnode_lock);
 }
 
 /* Called when the last user of the cec device exits. */
@@ -84,11 +82,10 @@ static void cec_devnode_release(struct device *cd)
        struct cec_devnode *devnode = to_cec_devnode(cd);
 
        mutex_lock(&cec_devnode_lock);
-
        /* Mark device node number as free */
        clear_bit(devnode->minor, cec_devnode_nums);
-
        mutex_unlock(&cec_devnode_lock);
+
        cec_delete_adapter(to_cec_adapter(devnode));
 }
 
@@ -117,7 +114,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
 
        /* Initialization */
        INIT_LIST_HEAD(&devnode->fhs);
-       mutex_init(&devnode->fhs_lock);
+       mutex_init(&devnode->lock);
 
        /* Part 1: Find a free minor number */
        mutex_lock(&cec_devnode_lock);
@@ -160,7 +157,9 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
 cdev_del:
        cdev_del(&devnode->cdev);
 clr_bit:
+       mutex_lock(&cec_devnode_lock);
        clear_bit(devnode->minor, cec_devnode_nums);
+       mutex_unlock(&cec_devnode_lock);
        return ret;
 }
 
@@ -177,17 +176,21 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
 {
        struct cec_fh *fh;
 
+       mutex_lock(&devnode->lock);
+
        /* Check if devnode was never registered or already unregistered */
-       if (!devnode->registered || devnode->unregistered)
+       if (!devnode->registered || devnode->unregistered) {
+               mutex_unlock(&devnode->lock);
                return;
+       }
 
-       mutex_lock(&devnode->fhs_lock);
        list_for_each_entry(fh, &devnode->fhs, list)
                wake_up_interruptible(&fh->wait);
-       mutex_unlock(&devnode->fhs_lock);
 
        devnode->registered = false;
        devnode->unregistered = true;
+       mutex_unlock(&devnode->lock);
+
        device_del(&devnode->dev);
        cdev_del(&devnode->cdev);
        put_device(&devnode->dev);
index 94f8590492dcd1d8b771956d1720f4a7b1191a00..ed8bd95ad6d06a9f357cfd4054e3919fcd88876b 100644 (file)
@@ -114,14 +114,11 @@ static void pulse8_irq_work_handler(struct work_struct *work)
                cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
                                  0, 0, 0, 0);
                break;
-       case MSGCODE_TRANSMIT_FAILED_LINE:
-               cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ARB_LOST,
-                                 1, 0, 0, 0);
-               break;
        case MSGCODE_TRANSMIT_FAILED_ACK:
                cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
                                  0, 1, 0, 0);
                break;
+       case MSGCODE_TRANSMIT_FAILED_LINE:
        case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
        case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
                cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
@@ -170,6 +167,9 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
                case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
                        schedule_work(&pulse8->work);
                        break;
+               case MSGCODE_HIGH_ERROR:
+               case MSGCODE_LOW_ERROR:
+               case MSGCODE_RECEIVE_FAILED:
                case MSGCODE_TIMEOUT_ERROR:
                        break;
                case MSGCODE_COMMAND_ACCEPTED:
@@ -388,7 +388,7 @@ static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
        int err;
 
        cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
-       cmd[1] = 3;
+       cmd[1] = signal_free_time;
        err = pulse8_send_and_wait(pulse8, cmd, 2,
                                   MSGCODE_COMMAND_ACCEPTED, 1);
        cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
index 0b1760cba6e35a77fc391f44f9cfc7c263b9e254..78f524fcd2142dd486d20aba5fe75f461cc5f466 100644 (file)
@@ -3363,7 +3363,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
                if (!hif_workqueue) {
                        netdev_err(vif->ndev, "Failed to create workqueue\n");
                        result = -ENOMEM;
-                       goto _fail_mq_;
+                       goto _fail_;
                }
 
                setup_timer(&periodic_rssi, GetPeriodicRSSI,
@@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
 
        clients_count++;
 
-_fail_mq_:
        destroy_workqueue(hif_workqueue);
 _fail_:
        return result;
index 3a66255f14fc77a7376eedee063452c0b45c06be..32215110d597b8889023d7a2d7408c0ad94bcdf7 100644 (file)
@@ -648,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev)
                        mutex_unlock(&wl->hif_cs);
                }
                if (&wl->txq_event)
-                       wait_for_completion(&wl->txq_event);
+                       complete(&wl->txq_event);
 
                wlan_deinitialize_threads(dev);
                deinit_irq(dev);
index 9092600a1794ea97e10c6c834e465836c76c64b5..2c2e8aca8305d3f77f15b9d637b544dbd7c4ac83 100644 (file)
@@ -1191,7 +1191,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
        struct wilc_priv *priv;
        struct wilc_vif *vif;
        u32 i = 0;
-       u32 associatedsta = 0;
+       u32 associatedsta = ~0;
        u32 inactive_time = 0;
        priv = wiphy_priv(wiphy);
        vif = netdev_priv(dev);
@@ -1204,7 +1204,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
                        }
                }
 
-               if (associatedsta == -1) {
+               if (associatedsta == ~0) {
                        netdev_err(dev, "sta required is not associated\n");
                        return -ENOENT;
                }
index 1b4ff0f4c7168f578253d248507cb57fc02245c2..ed5dd0e8865746fe928f08459218779b97227103 100644 (file)
@@ -426,6 +426,7 @@ clock_cooling_register(struct device *dev, const char *clock_name)
        if (!ccdev)
                return ERR_PTR(-ENOMEM);
 
+       mutex_init(&ccdev->lock);
        ccdev->dev = dev;
        ccdev->clk = devm_clk_get(dev, clock_name);
        if (IS_ERR(ccdev->clk))
index 3788ed74c9abe59eaed8e8ce4621cac7ecaffe35..a32b41783b7778b7d32cd8fc44026fb8476ace0d 100644 (file)
@@ -740,12 +740,22 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
 }
 
 /* Bind cpufreq callbacks to thermal cooling device ops */
+
 static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
        .get_max_state = cpufreq_get_max_state,
        .get_cur_state = cpufreq_get_cur_state,
        .set_cur_state = cpufreq_set_cur_state,
 };
 
+static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
+       .get_max_state          = cpufreq_get_max_state,
+       .get_cur_state          = cpufreq_get_cur_state,
+       .set_cur_state          = cpufreq_set_cur_state,
+       .get_requested_power    = cpufreq_get_requested_power,
+       .state2power            = cpufreq_state2power,
+       .power2state            = cpufreq_power2state,
+};
+
 /* Notifier for cpufreq policy change */
 static struct notifier_block thermal_cpufreq_notifier_block = {
        .notifier_call = cpufreq_thermal_notifier,
@@ -795,6 +805,7 @@ __cpufreq_cooling_register(struct device_node *np,
        struct cpumask temp_mask;
        unsigned int freq, i, num_cpus;
        int ret;
+       struct thermal_cooling_device_ops *cooling_ops;
 
        cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
        policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
@@ -850,10 +861,6 @@ __cpufreq_cooling_register(struct device_node *np,
        cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
 
        if (capacitance) {
-               cpufreq_cooling_ops.get_requested_power =
-                       cpufreq_get_requested_power;
-               cpufreq_cooling_ops.state2power = cpufreq_state2power;
-               cpufreq_cooling_ops.power2state = cpufreq_power2state;
                cpufreq_dev->plat_get_static_power = plat_static_func;
 
                ret = build_dyn_power_table(cpufreq_dev, capacitance);
@@ -861,6 +868,10 @@ __cpufreq_cooling_register(struct device_node *np,
                        cool_dev = ERR_PTR(ret);
                        goto free_table;
                }
+
+               cooling_ops = &cpufreq_power_cooling_ops;
+       } else {
+               cooling_ops = &cpufreq_cooling_ops;
        }
 
        ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
@@ -885,7 +896,7 @@ __cpufreq_cooling_register(struct device_node *np,
                 cpufreq_dev->id);
 
        cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
-                                                     &cpufreq_cooling_ops);
+                                                     cooling_ops);
        if (IS_ERR(cool_dev))
                goto remove_idr;
 
index 34fe36504a552cdaf112a6982483655f25dc2238..68bd1b56911850ee41dd7c508c7cad7f7c278ddf 100644 (file)
@@ -116,7 +116,9 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
                instance->target = get_target_state(tz, cdev, percentage,
                                                    cur_trip_level);
 
+               mutex_lock(&instance->cdev->lock);
                instance->cdev->updated = false;
+               mutex_unlock(&instance->cdev->lock);
                thermal_cdev_update(cdev);
        }
        return 0;
index fc52016d4e85be590e15f4e1976b8378d2c456d2..bb118a152cbbde3c570f90fb5355bc04afe54beb 100644 (file)
@@ -71,7 +71,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                dev_dbg(&instance->cdev->device, "target=%d\n",
                                        (int)instance->target);
 
+               mutex_lock(&instance->cdev->lock);
                instance->cdev->updated = false; /* cdev needs update */
+               mutex_unlock(&instance->cdev->lock);
        }
 
        mutex_unlock(&tz->lock);
index c5547bd711dbe2b4d681e54213e25e992afb51af..e473548b5d289d7dda47bdd8c423209fc1ab245f 100644 (file)
@@ -471,8 +471,6 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
 
 static int imx_thermal_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *of_id =
-               of_match_device(of_imx_thermal_match, &pdev->dev);
        struct imx_thermal_data *data;
        struct regmap *map;
        int measure_freq;
@@ -490,7 +488,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
        }
        data->tempmon = map;
 
-       data->socdata = of_id->data;
+       data->socdata = of_device_get_match_data(&pdev->dev);
 
        /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */
        if (data->socdata->version == TEMPMON_IMX6SX) {
index a578cd257db4b57ef64751a2d85a5078355d5d1b..1891f34ab7fcd746405fb6f7b7696c36c54133f2 100644 (file)
@@ -225,7 +225,6 @@ static struct platform_driver int3406_thermal_driver = {
        .remove = int3406_thermal_remove,
        .driver = {
                   .name = "int3406 thermal",
-                  .owner = THIS_MODULE,
                   .acpi_match_table = int3406_thermal_match,
                   },
 };
index 6a6ec1c95a7a2d04c4afd6b68af03d3335627f55..9b4815e81b0df01cf2160d752499b670c4a2d731 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/thermal.h>
+#include <linux/pm.h>
 
 /* Intel PCH thermal Device IDs */
 #define PCH_THERMAL_DID_WPT    0x9CA4 /* Wildcat Point */
@@ -65,6 +66,7 @@ struct pch_thermal_device {
        unsigned long crt_temp;
        int hot_trip_id;
        unsigned long hot_temp;
+       bool bios_enabled;
 };
 
 static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
@@ -75,8 +77,10 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
        *nr_trips = 0;
 
        /* Check if BIOS has already enabled thermal sensor */
-       if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))
+       if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) {
+               ptd->bios_enabled = true;
                goto read_trips;
+       }
 
        tsel = readb(ptd->hw_base + WPT_TSEL);
        /*
@@ -130,9 +134,39 @@ static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
        return 0;
 }
 
+static int pch_wpt_suspend(struct pch_thermal_device *ptd)
+{
+       u8 tsel;
+
+       if (ptd->bios_enabled)
+               return 0;
+
+       tsel = readb(ptd->hw_base + WPT_TSEL);
+
+       writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL);
+
+       return 0;
+}
+
+static int pch_wpt_resume(struct pch_thermal_device *ptd)
+{
+       u8 tsel;
+
+       if (ptd->bios_enabled)
+               return 0;
+
+       tsel = readb(ptd->hw_base + WPT_TSEL);
+
+       writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
+
+       return 0;
+}
+
 struct pch_dev_ops {
        int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
        int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
+       int (*suspend)(struct pch_thermal_device *ptd);
+       int (*resume)(struct pch_thermal_device *ptd);
 };
 
 
@@ -140,6 +174,8 @@ struct pch_dev_ops {
 static const struct pch_dev_ops pch_dev_ops_wpt = {
        .hw_init = pch_wpt_init,
        .get_temp = pch_wpt_get_temp,
+       .suspend = pch_wpt_suspend,
+       .resume = pch_wpt_resume,
 };
 
 static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
@@ -269,6 +305,22 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+static int intel_pch_thermal_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+       return ptd->ops->suspend(ptd);
+}
+
+static int intel_pch_thermal_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+       return ptd->ops->resume(ptd);
+}
+
 static struct pci_device_id intel_pch_thermal_id[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
@@ -276,11 +328,17 @@ static struct pci_device_id intel_pch_thermal_id[] = {
 };
 MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
 
+static const struct dev_pm_ops intel_pch_pm_ops = {
+       .suspend = intel_pch_thermal_suspend,
+       .resume = intel_pch_thermal_resume,
+};
+
 static struct pci_driver intel_pch_thermal_driver = {
        .name           = "intel_pch_thermal",
        .id_table       = intel_pch_thermal_id,
        .probe          = intel_pch_thermal_probe,
        .remove         = intel_pch_thermal_remove,
+       .driver.pm      = &intel_pch_pm_ops,
 };
 
 module_pci_driver(intel_pch_thermal_driver);
index 015ce2eb6eb7ba0a254e2918c8d9710011283fda..0e4dc0afcfd244d510b003575249c4c3ce1d16bd 100644 (file)
@@ -388,7 +388,7 @@ static int clamp_thread(void *arg)
                int sleeptime;
                unsigned long target_jiffies;
                unsigned int guard;
-               unsigned int compensation = 0;
+               unsigned int compensated_ratio;
                int interval; /* jiffies to sleep for each attempt */
                unsigned int duration_jiffies = msecs_to_jiffies(duration);
                unsigned int window_size_now;
@@ -409,8 +409,11 @@ static int clamp_thread(void *arg)
                 * c-states, thus we need to compensate the injected idle ratio
                 * to achieve the actual target reported by the HW.
                 */
-               compensation = get_compensation(target_ratio);
-               interval = duration_jiffies*100/(target_ratio+compensation);
+               compensated_ratio = target_ratio +
+                       get_compensation(target_ratio);
+               if (compensated_ratio <= 0)
+                       compensated_ratio = 1;
+               interval = duration_jiffies * 100 / compensated_ratio;
 
                /* align idle time */
                target_jiffies = roundup(jiffies, interval);
@@ -647,8 +650,8 @@ static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
                goto exit_set;
        } else  if (set_target_ratio > 0 && new_target_ratio == 0) {
                pr_info("Stop forced idle injection\n");
-               set_target_ratio = 0;
                end_power_clamp();
+               set_target_ratio = 0;
        } else  /* adjust currently running */ {
                set_target_ratio = new_target_ratio;
                /* make new set_target_ratio visible to other cpus */
index 2f1a863a8e15bc834e007fea264d262d180a04cf..b4d3116cfdafe81767b2b1c91fcab4a034f29041 100644 (file)
@@ -529,7 +529,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
                        continue;
 
                instance->target = 0;
+               mutex_lock(&instance->cdev->lock);
                instance->cdev->updated = false;
+               mutex_unlock(&instance->cdev->lock);
                thermal_cdev_update(instance->cdev);
        }
 }
index 71a339271fa5fe9d796d86be9077ebbc13d4f0dc..5f817923f374f5f08412bda03876c831da68b697 100644 (file)
@@ -504,6 +504,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                if (IS_ERR(priv->zone)) {
                        dev_err(dev, "can't register thermal zone\n");
                        ret = PTR_ERR(priv->zone);
+                       priv->zone = NULL;
                        goto error_unregister;
                }
 
index ea9366ad3e6bb285e52e368691a0d495cbb3429f..bcef2e7c4ec96f1cfc662019ccf0440d52d26328 100644 (file)
@@ -175,7 +175,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                        update_passive_instance(tz, trip_type, -1);
 
                instance->initialized = true;
+               mutex_lock(&instance->cdev->lock);
                instance->cdev->updated = false; /* cdev needs update */
+               mutex_unlock(&instance->cdev->lock);
        }
 
        mutex_unlock(&tz->lock);
index 5133cd1e10b7ae99d838823af2eef272aa00ab76..e2fc6161dded9650c300cf829a381c0a2d14b38c 100644 (file)
@@ -1093,7 +1093,9 @@ int power_actor_set_power(struct thermal_cooling_device *cdev,
                return ret;
 
        instance->target = state;
+       mutex_lock(&cdev->lock);
        cdev->updated = false;
+       mutex_unlock(&cdev->lock);
        thermal_cdev_update(cdev);
 
        return 0;
@@ -1623,11 +1625,13 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
        struct thermal_instance *instance;
        unsigned long target = 0;
 
+       mutex_lock(&cdev->lock);
        /* cooling device is updated*/
-       if (cdev->updated)
+       if (cdev->updated) {
+               mutex_unlock(&cdev->lock);
                return;
+       }
 
-       mutex_lock(&cdev->lock);
        /* Make sure cdev enters the deepest cooling state */
        list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
                dev_dbg(&cdev->device, "zone%d->target=%lu\n",
@@ -1637,9 +1641,9 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
                if (instance->target > target)
                        target = instance->target;
        }
-       mutex_unlock(&cdev->lock);
        cdev->ops->set_cur_state(cdev, target);
        cdev->updated = true;
+       mutex_unlock(&cdev->lock);
        trace_cdev_update(cdev, target);
        dev_dbg(&cdev->device, "set to state %lu\n", target);
 }
index 06fd2ed9ef9d13bf0ab09f727020f873152b8da9..c41c7742903ab43b2132241574376b85849666b9 100644 (file)
@@ -232,6 +232,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 
        return result;
 }
+EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs);
 
 void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
 {
@@ -270,3 +271,4 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
        hwmon_device_unregister(hwmon->device);
        kfree(hwmon);
 }
+EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
index 9c15344b657acebb240e8555af47e572314acd5c..a8c20413dbda9711e1828a064f8a940900442e31 100644 (file)
@@ -648,6 +648,12 @@ static struct pci_device_id nhi_ids[] = {
                .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
                .subvendor = 0x2222, .subdevice = 0x1111,
        },
+       {
+               .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+               .vendor = PCI_VENDOR_ID_INTEL,
+               .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
+               .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+       },
        {
                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
                .vendor = PCI_VENDOR_ID_INTEL,
index 1e116f53d6dddb254c465ebd5362d2c6162066a1..9840fdecb73b5226f36ae0b61c4761f23d4dfbff 100644 (file)
@@ -372,7 +372,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
 
        if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
            sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
-           sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE)
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE &&
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE &&
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE)
                tb_sw_warn(sw, "unsupported switch device id %#x\n",
                           sw->config.device_id);
 
index 122e0e4029fee9e42f4b1ba37f0439728f0d54a4..1a16feac9a36070715bbe26974661b823e3c08ab 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/serial_reg.h>
 #include <linux/dmaengine.h>
 
-#include "../serial_mctrl_gpio.h"
-
 struct uart_8250_dma {
        int (*tx_dma)(struct uart_8250_port *p);
        int (*rx_dma)(struct uart_8250_port *p);
@@ -133,43 +131,12 @@ void serial8250_em485_destroy(struct uart_8250_port *p);
 
 static inline void serial8250_out_MCR(struct uart_8250_port *up, int value)
 {
-       int mctrl_gpio = 0;
-
        serial_out(up, UART_MCR, value);
-
-       if (value & UART_MCR_RTS)
-               mctrl_gpio |= TIOCM_RTS;
-       if (value & UART_MCR_DTR)
-               mctrl_gpio |= TIOCM_DTR;
-
-       mctrl_gpio_set(up->gpios, mctrl_gpio);
 }
 
 static inline int serial8250_in_MCR(struct uart_8250_port *up)
 {
-       int mctrl, mctrl_gpio = 0;
-
-       mctrl = serial_in(up, UART_MCR);
-
-       /* save current MCR values */
-       if (mctrl & UART_MCR_RTS)
-               mctrl_gpio |= TIOCM_RTS;
-       if (mctrl & UART_MCR_DTR)
-               mctrl_gpio |= TIOCM_DTR;
-
-       mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio);
-
-       if (mctrl_gpio & TIOCM_RTS)
-               mctrl |= UART_MCR_RTS;
-       else
-               mctrl &= ~UART_MCR_RTS;
-
-       if (mctrl_gpio & TIOCM_DTR)
-               mctrl |= UART_MCR_DTR;
-       else
-               mctrl &= ~UART_MCR_DTR;
-
-       return mctrl;
+       return serial_in(up, UART_MCR);
 }
 
 #if defined(__alpha__) && !defined(CONFIG_PCI)
index 13ad5c3d2e681893aeaa0d675d17c9f04f06b967..dcf43f66404f12d2bd290a30adfe494dd056162e 100644 (file)
@@ -974,8 +974,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
 
        uart = serial8250_find_match_or_unused(&up->port);
        if (uart && uart->port.type != PORT_8250_CIR) {
-               struct mctrl_gpios *gpios;
-
                if (uart->port.dev)
                        uart_remove_one_port(&serial8250_reg, &uart->port);
 
@@ -1013,13 +1011,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
                if (up->port.flags & UPF_FIXED_TYPE)
                        uart->port.type = up->port.type;
 
-               gpios = mctrl_gpio_init(&uart->port, 0);
-               if (IS_ERR(gpios)) {
-                       if (PTR_ERR(gpios) != -ENOSYS)
-                               return PTR_ERR(gpios);
-               } else
-                       uart->gpios = gpios;
-
                serial8250_set_defaults(uart);
 
                /* Possibly override default I/O functions.  */
index 737b4b3957b0bc2470cde5574eed1e5f7230c90a..0facc789fe7d4133e1c5e32d0a471db53bb3baa5 100644 (file)
@@ -31,7 +31,7 @@
 #define IO_ADDR2 0x60
 #define LDN 0x7
 
-#define IRQ_MODE       0x70
+#define FINTEK_IRQ_MODE        0x70
 #define IRQ_SHARE      BIT(4)
 #define IRQ_MODE_MASK  (BIT(6) | BIT(5))
 #define IRQ_LEVEL_LOW  0
@@ -195,7 +195,7 @@ static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
        outb(LDN, pdata->base_port + ADDR_PORT);
        outb(pdata->index, pdata->base_port + DATA_PORT);
 
-       outb(IRQ_MODE, pdata->base_port + ADDR_PORT);
+       outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
        tmp = inb(pdata->base_port + DATA_PORT);
 
        tmp &= ~IRQ_MODE_MASK;
index 339de9cd086612c60d17ef3c581b59df34a0022a..20c5db2f426406a50fc41ef90f2e268deac18f72 100644 (file)
@@ -168,6 +168,9 @@ static void mid8250_set_termios(struct uart_port *p,
        unsigned long w = BIT(24) - 1;
        unsigned long mul, div;
 
+       /* Gracefully handle the B0 case: fall back to B9600 */
+       fuart = fuart ? fuart : 9600 * 16;
+
        if (mid->board->freq < fuart) {
                /* Find prescaler value that satisfies Fuart < Fref */
                if (mid->board->freq > baud)
index e14982f36a04dff107b11f1f5e75b2b7a587390a..61ad6c3b20a02b82e6aa95b46fc6d467edc86334 100644 (file)
@@ -134,21 +134,18 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
        serial8250_do_set_mctrl(port, mctrl);
 
-       if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
-                                               UART_GPIO_RTS))) {
-               /*
-                * Turn off autoRTS if RTS is lowered and restore autoRTS
-                * setting if RTS is raised
-                */
-               lcr = serial_in(up, UART_LCR);
-               serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-               if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
-                       priv->efr |= UART_EFR_RTS;
-               else
-                       priv->efr &= ~UART_EFR_RTS;
-               serial_out(up, UART_EFR, priv->efr);
-               serial_out(up, UART_LCR, lcr);
-       }
+       /*
+        * Turn off autoRTS if RTS is lowered and restore autoRTS setting
+        * if RTS is raised
+        */
+       lcr = serial_in(up, UART_LCR);
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+               priv->efr |= UART_EFR_RTS;
+       else
+               priv->efr &= ~UART_EFR_RTS;
+       serial_out(up, UART_EFR, priv->efr);
+       serial_out(up, UART_LCR, lcr);
 }
 
 /*
@@ -449,9 +446,7 @@ static void omap_8250_set_termios(struct uart_port *port,
        priv->efr = 0;
        up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
 
-       if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW
-               && IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
-                                                       UART_GPIO_RTS))) {
+       if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
                /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
                up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
                priv->efr |= UART_EFR_CTS;
index 20ebaea5c414d6d70f371aa9d367cd40327fa909..bc51b32b2774e961627d7778c43d8ddf591249b4 100644 (file)
@@ -1950,6 +1950,43 @@ pci_wch_ch38x_setup(struct serial_private *priv,
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7954       0x7954
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7958       0x7958
 
+#define PCI_VENDOR_ID_ACCESIO                  0x494f
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB    0x1051
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S     0x1053
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB    0x105C
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S     0x105E
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB  0x1091
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2   0x1093
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB  0x1099
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4   0x109B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB   0x10D1
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM    0x10D3
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB   0x10DA
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM    0x10DC
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1   0x1108
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2   0x1110
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2   0x1111
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4   0x1118
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4   0x1119
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S      0x1152
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S      0x115A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2    0x1190
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2   0x1191
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4    0x1198
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4   0x1199
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM     0x11D0
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4    0x105A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4    0x105B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8    0x106A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8    0x106B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4    0x1098
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8    0x10A9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM     0x10D9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM     0x10E9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM     0x11D8
+
+
+
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584        0x1584
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588        0x1588
@@ -5112,6 +5149,108 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0,
                0, pbn_pericom_PI7C9X7958 },
+       /*
+        * ACCES I/O Products quad
+        */
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
        /*
         * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
         */
index 7481b95c6d846770b10f77a64e8231c7b2ad4e8e..bdfa659b9606a93360fd6e978a6b9c89d9a3e251 100644 (file)
@@ -1618,8 +1618,6 @@ static void serial8250_disable_ms(struct uart_port *port)
        if (up->bugs & UART_BUG_NOMSR)
                return;
 
-       mctrl_gpio_disable_ms(up->gpios);
-
        up->ier &= ~UART_IER_MSI;
        serial_port_out(port, UART_IER, up->ier);
 }
@@ -1632,8 +1630,6 @@ static void serial8250_enable_ms(struct uart_port *port)
        if (up->bugs & UART_BUG_NOMSR)
                return;
 
-       mctrl_gpio_enable_ms(up->gpios);
-
        up->ier |= UART_IER_MSI;
 
        serial8250_rpm_get(up);
@@ -1917,8 +1913,7 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
                ret |= TIOCM_DSR;
        if (status & UART_MSR_CTS)
                ret |= TIOCM_CTS;
-
-       return mctrl_gpio_get(up->gpios, &ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
 
index c9ec839a5ddf447a6b7b3fe3158ad4104e3024c2..7c6f7afca5ddeed7b25fa6272246fc539bf31b04 100644 (file)
@@ -6,7 +6,6 @@
 config SERIAL_8250
        tristate "8250/16550 and compatible serial support"
        select SERIAL_CORE
-       select SERIAL_MCTRL_GPIO if GPIOLIB
        ---help---
          This selects whether you want to include the driver for the standard
          serial ports.  The standard answer is Y.  People who might say N
index 065f5d97aa6708ebb3239d7778bb185d19412e5b..b93356834bb5660bb451f712c7ef6f13018bc5d3 100644 (file)
@@ -949,6 +949,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci)
        int retval;
        struct ci_hw_ep *hwep;
 
+       /*
+        * Unexpected USB controller behavior, caused by bad signal integrity
+        * or ground reference problems, can lead to isr_setup_status_phase
+        * being called with ci->status equal to NULL.
+        * If this situation occurs, you should review your USB hardware design.
+        */
+       if (WARN_ON_ONCE(!ci->status))
+               return -EPIPE;
+
        hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
        ci->status->context = ci;
        ci->status->complete = isr_setup_status_complete;
@@ -1596,8 +1605,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
 {
        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
 
-       /* Data+ pullup controlled by OTG state machine in OTG fsm mode */
-       if (ci_otg_is_fsm_mode(ci))
+       /*
+        * Data+ pullup controlled by OTG state machine in OTG fsm mode;
+        * and don't touch Data+ in host mode for dual role config.
+        */
+       if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
                return 0;
 
        pm_runtime_get_sync(&ci->gadget.dev);
index 71912301ef7f804fb9698d7915f7136041617906..0f3f62e81e5b20b50140ab482df70e818de860a5 100644 (file)
@@ -1354,7 +1354,6 @@ made_compressed_probe:
        spin_lock_init(&acm->write_lock);
        spin_lock_init(&acm->read_lock);
        mutex_init(&acm->mutex);
-       acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
        acm->is_int_ep = usb_endpoint_xfer_int(epread);
        if (acm->is_int_ep)
                acm->bInterval = epread->bInterval;
@@ -1394,14 +1393,14 @@ made_compressed_probe:
                urb->transfer_dma = rb->dma;
                if (acm->is_int_ep) {
                        usb_fill_int_urb(urb, acm->dev,
-                                        acm->rx_endpoint,
+                                        usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
                                         rb->base,
                                         acm->readsize,
                                         acm_read_bulk_callback, rb,
                                         acm->bInterval);
                } else {
                        usb_fill_bulk_urb(urb, acm->dev,
-                                         acm->rx_endpoint,
+                                         usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
                                          rb->base,
                                          acm->readsize,
                                          acm_read_bulk_callback, rb);
index 05ce308d5d2afc10d91225e97e227396ace41cb0..1f1eabfd846280ada598b776504d7d01e5b69886 100644 (file)
@@ -96,7 +96,6 @@ struct acm {
        struct acm_rb read_buffers[ACM_NR];
        struct acm_wb *putbuffer;                       /* for acm_tty_put_char() */
        int rx_buflimit;
-       int rx_endpoint;
        spinlock_t read_lock;
        int write_used;                                 /* number of non-empty write buffers */
        int transmitting;
index 31ccdccd7a04fda36003cdc3ba58b82e25773fa9..a2d90aca779fa16fb07e1327f33a41129c719e63 100644 (file)
@@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
                                                        ep, buffer, size);
 }
 
+static const unsigned short low_speed_maxpacket_maxes[4] = {
+       [USB_ENDPOINT_XFER_CONTROL] = 8,
+       [USB_ENDPOINT_XFER_ISOC] = 0,
+       [USB_ENDPOINT_XFER_BULK] = 0,
+       [USB_ENDPOINT_XFER_INT] = 8,
+};
+static const unsigned short full_speed_maxpacket_maxes[4] = {
+       [USB_ENDPOINT_XFER_CONTROL] = 64,
+       [USB_ENDPOINT_XFER_ISOC] = 1023,
+       [USB_ENDPOINT_XFER_BULK] = 64,
+       [USB_ENDPOINT_XFER_INT] = 64,
+};
+static const unsigned short high_speed_maxpacket_maxes[4] = {
+       [USB_ENDPOINT_XFER_CONTROL] = 64,
+       [USB_ENDPOINT_XFER_ISOC] = 1024,
+       [USB_ENDPOINT_XFER_BULK] = 512,
+       [USB_ENDPOINT_XFER_INT] = 1024,
+};
+static const unsigned short super_speed_maxpacket_maxes[4] = {
+       [USB_ENDPOINT_XFER_CONTROL] = 512,
+       [USB_ENDPOINT_XFER_ISOC] = 1024,
+       [USB_ENDPOINT_XFER_BULK] = 1024,
+       [USB_ENDPOINT_XFER_INT] = 1024,
+};
+
 static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
     int asnum, struct usb_host_interface *ifp, int num_ep,
     unsigned char *buffer, int size)
@@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        struct usb_endpoint_descriptor *d;
        struct usb_host_endpoint *endpoint;
        int n, i, j, retval;
+       unsigned int maxp;
+       const unsigned short *maxpacket_maxes;
 
        d = (struct usb_endpoint_descriptor *) buffer;
        buffer += d->bLength;
@@ -213,8 +240,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        memcpy(&endpoint->desc, d, n);
        INIT_LIST_HEAD(&endpoint->urb_list);
 
-       /* Fix up bInterval values outside the legal range. Use 32 ms if no
-        * proper value can be guessed. */
+       /*
+        * Fix up bInterval values outside the legal range.
+        * Use 10 or 8 ms if no proper value can be guessed.
+        */
        i = 0;          /* i = min, j = max, n = default */
        j = 255;
        if (usb_endpoint_xfer_int(d)) {
@@ -223,13 +252,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                case USB_SPEED_SUPER_PLUS:
                case USB_SPEED_SUPER:
                case USB_SPEED_HIGH:
-                       /* Many device manufacturers are using full-speed
+                       /*
+                        * Many device manufacturers are using full-speed
                         * bInterval values in high-speed interrupt endpoint
-                        * descriptors. Try to fix those and fall back to a
-                        * 32 ms default value otherwise. */
+                        * descriptors. Try to fix those and fall back to an
+                        * 8-ms default value otherwise.
+                        */
                        n = fls(d->bInterval*8);
                        if (n == 0)
-                               n = 9;  /* 32 ms = 2^(9-1) uframes */
+                               n = 7;  /* 8 ms = 2^(7-1) uframes */
                        j = 16;
 
                        /*
@@ -244,10 +275,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                        }
                        break;
                default:                /* USB_SPEED_FULL or _LOW */
-                       /* For low-speed, 10 ms is the official minimum.
+                       /*
+                        * For low-speed, 10 ms is the official minimum.
                         * But some "overclocked" devices might want faster
-                        * polling so we'll allow it. */
-                       n = 32;
+                        * polling so we'll allow it.
+                        */
+                       n = 10;
                        break;
                }
        } else if (usb_endpoint_xfer_isoc(d)) {
@@ -255,10 +288,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                j = 16;
                switch (to_usb_device(ddev)->speed) {
                case USB_SPEED_HIGH:
-                       n = 9;          /* 32 ms = 2^(9-1) uframes */
+                       n = 7;          /* 8 ms = 2^(7-1) uframes */
                        break;
                default:                /* USB_SPEED_FULL */
-                       n = 6;          /* 32 ms = 2^(6-1) frames */
+                       n = 4;          /* 8 ms = 2^(4-1) frames */
                        break;
                }
        }
@@ -286,6 +319,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                        endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
        }
 
+       /* Validate the wMaxPacketSize field */
+       maxp = usb_endpoint_maxp(&endpoint->desc);
+
+       /* Find the highest legal maxpacket size for this endpoint */
+       i = 0;          /* additional transactions per microframe */
+       switch (to_usb_device(ddev)->speed) {
+       case USB_SPEED_LOW:
+               maxpacket_maxes = low_speed_maxpacket_maxes;
+               break;
+       case USB_SPEED_FULL:
+               maxpacket_maxes = full_speed_maxpacket_maxes;
+               break;
+       case USB_SPEED_HIGH:
+               /* Bits 12..11 are allowed only for HS periodic endpoints */
+               if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+                       i = maxp & (BIT(12) | BIT(11));
+                       maxp &= ~i;
+               }
+               /* fallthrough */
+       default:
+               maxpacket_maxes = high_speed_maxpacket_maxes;
+               break;
+       case USB_SPEED_SUPER:
+       case USB_SPEED_SUPER_PLUS:
+               maxpacket_maxes = super_speed_maxpacket_maxes;
+               break;
+       }
+       j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+
+       if (maxp > j) {
+               dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+                   cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+               maxp = j;
+               endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+       }
+
        /*
         * Some buggy high speed devices have bulk endpoints using
         * maxpacket sizes other than 512.  High speed HCDs may not
@@ -293,9 +362,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
         */
        if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
                        && usb_endpoint_xfer_bulk(d)) {
-               unsigned maxp;
-
-               maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
                if (maxp != 512)
                        dev_warn(ddev, "config %d interface %d altsetting %d "
                                "bulk endpoint 0x%X has invalid maxpacket %d\n",
index e9f5043a2167c2be13fb3cd5b5aab93efa7d9c23..09c8d9ca61aea24d7b3d6669baacd0b43b7f04f6 100644 (file)
@@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
                goto error_decrease_mem;
        }
 
-       mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle);
+       mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
+                       &dma_handle);
        if (!mem) {
                ret = -ENOMEM;
                goto error_free_usbm;
@@ -1708,11 +1709,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        as->urb->start_frame = uurb->start_frame;
        as->urb->number_of_packets = number_of_packets;
        as->urb->stream_id = stream_id;
-       if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
-                       ps->dev->speed == USB_SPEED_HIGH)
-               as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
-       else
-               as->urb->interval = ep->desc.bInterval;
+
+       if (ep->desc.bInterval) {
+               if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
+                               ps->dev->speed == USB_SPEED_HIGH ||
+                               ps->dev->speed >= USB_SPEED_SUPER)
+                       as->urb->interval = 1 <<
+                                       min(15, ep->desc.bInterval - 1);
+               else
+                       as->urb->interval = ep->desc.bInterval;
+       }
+
        as->urb->context = as;
        as->urb->complete = async_completed;
        for (totlen = u = 0; u < number_of_packets; u++) {
@@ -2582,7 +2589,9 @@ static unsigned int usbdev_poll(struct file *file,
        if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
                mask |= POLLOUT | POLLWRNORM;
        if (!connected(ps))
-               mask |= POLLERR | POLLHUP;
+               mask |= POLLHUP;
+       if (list_empty(&ps->list))
+               mask |= POLLERR;
        return mask;
 }
 
index bee13517676f9b2f00e3cd4e3fd4185f5aaa13c9..1d5fc32d06d007a6f64526a531da8ae69e8e6950 100644 (file)
@@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 
        /* Continue a partial initialization */
        if (type == HUB_INIT2 || type == HUB_INIT3) {
-               device_lock(hub->intfdev);
+               device_lock(&hdev->dev);
 
                /* Was the hub disconnected while we were waiting? */
-               if (hub->disconnected) {
-                       device_unlock(hub->intfdev);
-                       kref_put(&hub->kref, hub_release);
-                       return;
-               }
+               if (hub->disconnected)
+                       goto disconnected;
                if (type == HUB_INIT2)
                        goto init2;
                goto init3;
@@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                        queue_delayed_work(system_power_efficient_wq,
                                        &hub->init_work,
                                        msecs_to_jiffies(delay));
-                       device_unlock(hub->intfdev);
+                       device_unlock(&hdev->dev);
                        return;         /* Continues at init3: below */
                } else {
                        msleep(delay);
@@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
        /* Scan all ports that need attention */
        kick_hub_wq(hub);
 
-       /* Allow autosuspend if it was suppressed */
-       if (type <= HUB_INIT3)
+       if (type == HUB_INIT2 || type == HUB_INIT3) {
+               /* Allow autosuspend if it was suppressed */
+ disconnected:
                usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
-
-       if (type == HUB_INIT2 || type == HUB_INIT3)
-               device_unlock(hub->intfdev);
+               device_unlock(&hdev->dev);
+       }
 
        kref_put(&hub->kref, hub_release);
 }
@@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
        struct usb_device *hdev = hub->hdev;
        int i;
 
-       cancel_delayed_work_sync(&hub->init_work);
-
        /* hub_wq and related activity won't re-trigger */
        hub->quiescing = 1;
 
index 9fae0291cd69f8c3314f547eeb9d0557af517e49..d64551243789a2a1cd2ea045df62d8407a8fbde1 100644 (file)
@@ -868,6 +868,7 @@ struct dwc2_hsotg {
        void *priv;
        int     irq;
        struct clk *clk;
+       struct reset_control *reset;
 
        unsigned int queuing_high_bandwidth:1;
        unsigned int srp_success:1;
index fc6f5251de5d1e891db129e77f782766ea4b6ef9..530959a8a6d13952b18ab1f87f6e6d47f7aaeb49 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_data/s3c-hsotg.h>
+#include <linux/reset.h>
 
 #include <linux/usb/of.h>
 
@@ -337,6 +338,24 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
 {
        int i, ret;
 
+       hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
+       if (IS_ERR(hsotg->reset)) {
+               ret = PTR_ERR(hsotg->reset);
+               switch (ret) {
+               case -ENOENT:
+               case -ENOTSUPP:
+                       hsotg->reset = NULL;
+                       break;
+               default:
+                       dev_err(hsotg->dev, "error getting reset control %d\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       if (hsotg->reset)
+               reset_control_deassert(hsotg->reset);
+
        /* Set default UTMI width */
        hsotg->phyif = GUSBCFG_PHYIF16;
 
@@ -434,6 +453,9 @@ static int dwc2_driver_remove(struct platform_device *dev)
        if (hsotg->ll_hw_enabled)
                dwc2_lowlevel_hw_disable(hsotg);
 
+       if (hsotg->reset)
+               reset_control_assert(hsotg->reset);
+
        return 0;
 }
 
index 946643157b78988a5c45655d9afe55a91d960b3d..35d092456bec1da828ea41476e421d47809380a5 100644 (file)
@@ -1192,6 +1192,7 @@ static int dwc3_runtime_resume(struct device *dev)
        }
 
        pm_runtime_mark_last_busy(dev);
+       pm_runtime_put(dev);
 
        return 0;
 }
index 22dfc3dd6a13037e81561bba94b7c65256787f7f..33ab2a203c1bdc4a9d42d7bed9869ad83d2b4cb8 100644 (file)
@@ -192,7 +192,7 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
        int ret;
 
        ret = sprintf(str, "ep%d%s: ", epnum >> 1,
-                       (epnum & 1) ? "in" : "in");
+                       (epnum & 1) ? "in" : "out");
        if (ret < 0)
                return "UNKNOWN";
 
index 974335377d9f185b282295c4aa4c688d0798f9bc..e56d59b19a0ecacaf632cccdf7680ac5114075b7 100644 (file)
@@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
        if (!simple->clks)
                return -ENOMEM;
 
+       platform_set_drvdata(pdev, simple);
        simple->dev = dev;
 
        for (i = 0; i < simple->num_clocks; i++) {
index 45f5a232d9fb658b76845e19bb3ef4fa0a0aabf6..6df0f5dad9a4cdecd4c1841c6a277de167e1960e 100644 (file)
@@ -37,6 +37,7 @@
 #define PCI_DEVICE_ID_INTEL_BXT                        0x0aaa
 #define PCI_DEVICE_ID_INTEL_BXT_M              0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL                        0x5aaa
+#define PCI_DEVICE_ID_INTEL_KBP                        0xa2b0
 
 static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
 static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
@@ -241,6 +243,15 @@ static int dwc3_pci_runtime_suspend(struct device *dev)
        return -EBUSY;
 }
 
+static int dwc3_pci_runtime_resume(struct device *dev)
+{
+       struct platform_device *dwc3 = dev_get_drvdata(dev);
+
+       return pm_runtime_get(&dwc3->dev);
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
 static int dwc3_pci_pm_dummy(struct device *dev)
 {
        /*
@@ -253,11 +264,11 @@ static int dwc3_pci_pm_dummy(struct device *dev)
         */
        return 0;
 }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
-       SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_pm_dummy,
+       SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
                NULL)
 };
 
index 8f8c2157910e6b848561f75f192b7a45dd4225d1..122e64df2f4dc173123ddbfe7ef934d8630851a2 100644 (file)
@@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
        if (!req->request.no_interrupt && !chain)
                trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
 
-       if (last)
+       if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
                trb->ctrl |= DWC3_TRB_CTRL_LST;
 
        if (chain)
@@ -1433,7 +1433,7 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
 
 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
 {
-       unsigned long           timeout;
+       int                     retries;
 
        int                     ret;
        u32                     reg;
@@ -1484,9 +1484,9 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
        }
 
        /* poll until Link State changes to ON */
-       timeout = jiffies + msecs_to_jiffies(100);
+       retries = 20000;
 
-       while (!time_after(jiffies, timeout)) {
+       while (retries--) {
                reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 
                /* in HS, means ON */
@@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
 
 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
                struct dwc3_request *req, struct dwc3_trb *trb,
-               const struct dwc3_event_depevt *event, int status)
+               const struct dwc3_event_depevt *event, int status,
+               int chain)
 {
        unsigned int            count;
        unsigned int            s_pkt = 0;
@@ -1964,17 +1965,22 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
        dep->queued_requests--;
        trace_dwc3_complete_trb(dep, trb);
 
+       /*
+        * If we're in the middle of series of chained TRBs and we
+        * receive a short transfer along the way, DWC3 will skip
+        * through all TRBs including the last TRB in the chain (the
+        * where CHN bit is zero. DWC3 will also avoid clearing HWO
+        * bit and SW has to do it manually.
+        *
+        * We're going to do that here to avoid problems of HW trying
+        * to use bogus TRBs for transfers.
+        */
+       if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
+               trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+
        if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
-               /*
-                * We continue despite the error. There is not much we
-                * can do. If we don't clean it up we loop forever. If
-                * we skip the TRB then it gets overwritten after a
-                * while since we use them in a ring buffer. A BUG()
-                * would help. Lets hope that if this occurs, someone
-                * fixes the root cause instead of looking away :)
-                */
-               dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
-                               dep->name, trb);
+               return 1;
+
        count = trb->size & DWC3_TRB_SIZE_MASK;
 
        if (dep->direction) {
@@ -2013,15 +2019,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
                        s_pkt = 1;
        }
 
-       /*
-        * We assume here we will always receive the entire data block
-        * which we should receive. Meaning, if we program RX to
-        * receive 4K but we receive only 2K, we assume that's all we
-        * should receive and we simply bounce the request back to the
-        * gadget driver for further processing.
-        */
-       req->request.actual += req->request.length - count;
-       if (s_pkt)
+       if (s_pkt && !chain)
                return 1;
        if ((event->status & DEPEVT_STATUS_LST) &&
                        (trb->ctrl & (DWC3_TRB_CTRL_LST |
@@ -2040,13 +2038,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
        struct dwc3_trb         *trb;
        unsigned int            slot;
        unsigned int            i;
+       int                     count = 0;
        int                     ret;
 
        do {
+               int chain;
+
                req = next_request(&dep->started_list);
                if (WARN_ON_ONCE(!req))
                        return 1;
 
+               chain = req->request.num_mapped_sgs > 0;
                i = 0;
                do {
                        slot = req->first_trb_index + i;
@@ -2054,13 +2056,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
                                slot++;
                        slot %= DWC3_TRB_NUM;
                        trb = &dep->trb_pool[slot];
+                       count += trb->size & DWC3_TRB_SIZE_MASK;
 
                        ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
-                                       event, status);
+                                       event, status, chain);
                        if (ret)
                                break;
                } while (++i < req->request.num_mapped_sgs);
 
+               /*
+                * We assume here we will always receive the entire data block
+                * which we should receive. Meaning, if we program RX to
+                * receive 4K but we receive only 2K, we assume that's all we
+                * should receive and we simply bounce the request back to the
+                * gadget driver for further processing.
+                */
+               req->request.actual += req->request.length - count;
                dwc3_gadget_giveback(dep, req, status);
 
                if (ret)
index eb648485a58c571001c4e31b7c79aadaaee04445..5ebe6af7976ec4189363651dfe505cf6ec79fb12 100644 (file)
@@ -1913,6 +1913,8 @@ unknown:
                        break;
 
                case USB_RECIP_ENDPOINT:
+                       if (!cdev->config)
+                               break;
                        endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
                        list_for_each_entry(f, &cdev->config->functions, list) {
                                if (test_bit(endp, f->endpoints))
@@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
 
        cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL);
        if (!cdev->os_desc_req) {
-               ret = PTR_ERR(cdev->os_desc_req);
+               ret = -ENOMEM;
                goto end;
        }
 
        /* OS feature descriptor length <= 4kB */
        cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
        if (!cdev->os_desc_req->buf) {
-               ret = PTR_ERR(cdev->os_desc_req->buf);
+               ret = -ENOMEM;
                kfree(cdev->os_desc_req);
                goto end;
        }
index 70cf3477f951a11931032c2c73d59ace5b7a1163..f9237fe2be0565857a23c637fd7d878d85fe049a 100644 (file)
@@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item)
 {
        struct gadget_info *gi = to_gadget_info(item);
 
+       mutex_lock(&gi->lock);
        unregister_gadget(gi);
+       mutex_unlock(&gi->lock);
 }
 EXPORT_SYMBOL_GPL(unregister_gadget_item);
 
index d58bfc32be9ec7a20f52de5beb53b784a9475829..007ec6e4a5d42cf391060b3e99b36b8bf3ff2848 100644 (file)
@@ -341,11 +341,15 @@ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
 {
        struct sk_buff  *skb2 = NULL;
        struct usb_ep   *in = port->in_ep;
-       int             padlen = 0;
-       u16             len = skb->len;
+       int             headroom, tailroom, padlen = 0;
+       u16             len;
 
-       int headroom = skb_headroom(skb);
-       int tailroom = skb_tailroom(skb);
+       if (!skb)
+               return NULL;
+
+       len = skb->len;
+       headroom = skb_headroom(skb);
+       tailroom = skb_tailroom(skb);
 
        /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
         * stick two bytes of zero-length EEM packet on the end.
index c8005823b1905389710d2839249766939eff10b1..16562e46112179b419a353a6508b6629c7d10d7e 100644 (file)
@@ -374,6 +374,9 @@ static struct sk_buff *rndis_add_header(struct gether *port,
 {
        struct sk_buff *skb2;
 
+       if (!skb)
+               return NULL;
+
        skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
        rndis_add_hdr(skb2);
 
index 943c21aafd3b573affe0e61d608b20ec74dc79ee..ab6ac1b74ac0f59e4a09ec340a66a954934836b1 100644 (file)
@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params,
 {
        rndis_reset_cmplt_type *resp;
        rndis_resp_t *r;
+       u8 *xbuf;
+       u32 length;
+
+       /* drain the response queue */
+       while ((xbuf = rndis_get_next_response(params, &length)))
+               rndis_free_response(params, xbuf);
 
        r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
        if (!r)
index a3f7e7c55ebb18bd5045e27628c81075e484af37..5f562c1ec795718900e5e33de6d6e051eb2bb2ae 100644 (file)
@@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                        /* Multi frame CDC protocols may store the frame for
                         * later which is not a dropped frame.
                         */
-                       if (dev->port_usb->supports_multi_frame)
+                       if (dev->port_usb &&
+                                       dev->port_usb->supports_multi_frame)
                                goto multiframe;
                        goto drop;
                }
index 6ded6345cd09bde695c2cb83fb9a79a6a88de9af..e0cd1e4c88927cfc29e12f050ec2a4eb8555136b 100644 (file)
@@ -375,10 +375,15 @@ __acquires(&port->port_lock)
 */
 {
        struct list_head        *pool = &port->write_pool;
-       struct usb_ep           *in = port->port_usb->in;
+       struct usb_ep           *in;
        int                     status = 0;
        bool                    do_tty_wake = false;
 
+       if (!port->port_usb)
+               return status;
+
+       in = port->port_usb->in;
+
        while (!port->write_busy && !list_empty(pool)) {
                struct usb_request      *req;
                int                     len;
index 66753ba7a42eb803e64964c6a4a470dad6391242..31125a4a2658938cdc67ef4b24ee9bfbfeeadf44 100644 (file)
@@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
        if (!data) {
                kfree(*class_array);
                *class_array = NULL;
-               ret = PTR_ERR(data);
+               ret = -ENOMEM;
                goto unlock;
        }
        cl_arr = *class_array;
index aa3707bdebb4ad7fc2d56f50bbe80ab7093cf878..16104b5ebdcb73962edc506c5c5cb3e4c7b78108 100644 (file)
@@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb,
         */
        spin_lock_irq(&epdata->dev->lock);
        value = -ENODEV;
-       if (unlikely(epdata->ep))
+       if (unlikely(epdata->ep == NULL))
                goto fail;
 
        req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
@@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
        }
        if (is_sync_kiocb(iocb)) {
                value = ep_io(epdata, buf, len);
-               if (value >= 0 && copy_to_iter(buf, value, to))
+               if (value >= 0 && (copy_to_iter(buf, value, to) != value))
                        value = -EFAULT;
        } else {
                struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
index ff8685ea721936e3a67f63c0d44d0ec0ae4d943b..40c04bb25f2f9f2eee7c4b326e8585e6de3e4fb9 100644 (file)
@@ -827,7 +827,7 @@ void usb_gadget_unmap_request_by_dev(struct device *dev,
                return;
 
        if (req->num_mapped_sgs) {
-               dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
+               dma_unmap_sg(dev, req->sg, req->num_sgs,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
                req->num_mapped_sgs = 0;
@@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
                        if (ret != -EPROBE_DEFER)
                                list_del(&driver->pending);
                        if (ret)
-                               goto err4;
+                               goto err5;
                        break;
                }
        }
@@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 
        return 0;
 
+err5:
+       device_del(&udc->dev);
+
 err4:
        list_del(&udc->list);
        mutex_unlock(&udc_lock);
index 93d28cb00b76f15de7f281f465a1c791aa7edbbe..8bb011ea78f7df483f23a0325fc082f36fb8b900 100644 (file)
@@ -1878,11 +1878,8 @@ static int qe_get_frame(struct usb_gadget *gadget)
 
        tmp = in_be16(&udc->usb_param->frame_n);
        if (tmp & 0x8000)
-               tmp = tmp & 0x07ff;
-       else
-               tmp = -EINVAL;
-
-       return (int)tmp;
+               return tmp & 0x07ff;
+       return -EINVAL;
 }
 
 static int fsl_qe_start(struct usb_gadget *gadget,
@@ -2053,7 +2050,7 @@ static void setup_received_handle(struct qe_udc *udc,
                        struct qe_ep *ep;
 
                        if (wValue != 0 || wLength != 0
-                               || pipe > USB_MAX_ENDPOINTS)
+                               || pipe >= USB_MAX_ENDPOINTS)
                                break;
                        ep = &udc->eps[pipe];
 
index 93a3bec81df78006f1ba3871aece717fe69e05b3..fb8fc34827aba12f5132b3232f0ffca3c24e89a8 100644 (file)
 
 /* DRD_CON */
 #define DRD_CON_PERI_CON       BIT(24)
+#define DRD_CON_VBOUT          BIT(0)
 
 /* USB_INT_ENA_1 and USB_INT_STA_1 */
 #define USB_INT_1_B3_PLLWKUP   BIT(31)
@@ -363,6 +364,7 @@ static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
 {
        /* FIXME: How to change host / peripheral mode as well? */
        usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+       usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
 
        usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
        usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
index a962b89b65a665df737e8ebd4f48646064805334..1e5f529d51a21f4a17db61cf0f8b7f48d8234386 100644 (file)
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
        int     port = HCS_N_PORTS(ehci->hcs_params);
 
        while (port--) {
-               ehci_writel(ehci, PORT_RWC_BITS,
-                               &ehci->regs->port_status[port]);
                spin_unlock_irq(&ehci->lock);
                ehci_port_power(ehci, port, false);
                spin_lock_irq(&ehci->lock);
+               ehci_writel(ehci, PORT_RWC_BITS,
+                               &ehci->regs->port_status[port]);
        }
 }
 
index c369c29e496d735e77f3be32226c479e6871e846..2f7690092a7ffb6e67ca893d9d491f48da6be8d3 100644 (file)
@@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
        if (pin_number > 7)
                return;
 
-       mask = 1u << pin_number;
+       mask = 1u << (pin_number % 4);
        idx = pin_number / 4;
 
        if (value)
index d61fcc48099ed68f0a6084eef673ae33371cab95..730b9fd266852db5812e98456c9ff8299aa40ae6 100644 (file)
@@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
 
        ret = 0;
        virt_dev = xhci->devs[slot_id];
+       if (!virt_dev)
+               return -ENODEV;
+
        cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
        if (!cmd) {
                xhci_dbg(xhci, "Couldn't allocate command structure.\n");
index 4fd041bec332c154604d6e1c262a70f791604fae..d7b0f97abbad608200cbfb5b59d0faacfa1b8b43 100644 (file)
@@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev)
                usb_remove_hcd(xhci->shared_hcd);
                usb_put_hcd(xhci->shared_hcd);
        }
-       usb_hcd_pci_remove(dev);
 
        /* Workaround for spurious wakeups at shutdown with HSW */
        if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
                pci_set_power_state(dev, PCI_D3hot);
+
+       usb_hcd_pci_remove(dev);
 }
 
 #ifdef CONFIG_PM
index 918e0c739b795ec3577ae1853708abf6249ac9e5..797137e26549b566474d99e1d3e6efe277aa4648 100644 (file)
@@ -850,6 +850,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        spin_lock_irqsave(&xhci->lock, flags);
 
        ep->stop_cmds_pending--;
+       if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               return;
+       }
        if (xhci->xhc_state & XHCI_STATE_DYING) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "Stop EP timer ran, but another timer marked "
@@ -903,7 +907,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        spin_unlock_irqrestore(&xhci->lock, flags);
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Calling usb_hc_died()");
-       usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+       usb_hc_died(xhci_to_hcd(xhci));
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "xHCI host controller is dead.");
 }
@@ -1334,12 +1338,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 
        cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
 
-       if (cmd->command_trb != xhci->cmd_ring->dequeue) {
-               xhci_err(xhci,
-                        "Command completion event does not match command\n");
-               return;
-       }
-
        del_timer(&xhci->cmd_timer);
 
        trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1351,6 +1349,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                xhci_handle_stopped_cmd_ring(xhci, cmd);
                return;
        }
+
+       if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+               xhci_err(xhci,
+                        "Command completion event does not match command\n");
+               return;
+       }
+
        /*
         * Host aborted the command ring, check if the current command was
         * supposed to be aborted, otherwise continue normally.
@@ -3243,7 +3248,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        send_addr = addr;
 
        /* Queue the TRBs, even if they are zero-length */
-       for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) {
+       for (enqd_len = 0; first_trb || enqd_len < full_len;
+                       enqd_len += trb_buff_len) {
                field = TRB_TYPE(TRB_NORMAL);
 
                /* TRB buffer should not cross 64KB boundaries */
index 52c27cab78c3e44d1927c7bcacebc7fbcf1f7c61..9b5b3b2281cae47afb0b88042a5c956a826498f6 100644 (file)
@@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer,
 {
        char data[30 *3 + 4];
        char *d = data;
-       int m = (sizeof(data) - 1) / 3;
+       int m = (sizeof(data) - 1) / 3 - 1;
        int bytes_read = 0;
        int retry_on_empty = 10;
        int retry_on_timeout = 5;
@@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) {
                        int i = 0;
                        char data[30 *3 + 4];
                        char *d = data;
-                       int m = (sizeof(data) - 1) / 3;
+                       int m = (sizeof(data) - 1) / 3 - 1;
                        int l = 0;
                        struct u132_target *target = &ftdi->target[ed];
                        struct u132_command *command = &ftdi->command[
@@ -1876,7 +1876,7 @@ more:{
                if (packet_bytes > 2) {
                        char diag[30 *3 + 4];
                        char *d = diag;
-                       int m = (sizeof(diag) - 1) / 3;
+                       int m = (sizeof(diag) - 1) / 3 - 1;
                        char *b = ftdi->bulk_in_buffer;
                        int bytes_read = 0;
                        diag[0] = 0;
@@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi)
                        if (packet_bytes > 2) {
                                char diag[30 *3 + 4];
                                char *d = diag;
-                               int m = (sizeof(diag) - 1) / 3;
+                               int m = (sizeof(diag) - 1) / 3 - 1;
                                char *b = ftdi->bulk_in_buffer;
                                int bytes_read = 0;
                                unsigned char c = 0;
@@ -2155,7 +2155,7 @@ more:{
                if (packet_bytes > 2) {
                        char diag[30 *3 + 4];
                        char *d = diag;
-                       int m = (sizeof(diag) - 1) / 3;
+                       int m = (sizeof(diag) - 1) / 3 - 1;
                        char *b = ftdi->bulk_in_buffer;
                        int bytes_read = 0;
                        diag[0] = 0;
index 6b978f04b8d7217d72373ffe6e586a908e464471..5c8210dc6fd9cf9c0b39003d92b59a1440ddb0c2 100644 (file)
@@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req)
 {
        struct usb_sg_request   *req = (struct usb_sg_request *) _req;
 
-       req->status = -ETIMEDOUT;
        usb_sg_cancel(req);
 }
 
@@ -616,8 +615,10 @@ static int perform_sglist(
                mod_timer(&sg_timer, jiffies +
                                msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
                usb_sg_wait(req);
-               del_timer_sync(&sg_timer);
-               retval = req->status;
+               if (!del_timer_sync(&sg_timer))
+                       retval = -ETIMEDOUT;
+               else
+                       retval = req->status;
 
                /* FIXME check resulting data pattern */
 
@@ -2602,7 +2603,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
        ktime_get_ts64(&start);
 
        retval = usbtest_do_ioctl(intf, param_32);
-       if (retval)
+       if (retval < 0)
                goto free_mutex;
 
        ktime_get_ts64(&end);
index 886526b5fcddbfcae620b2587f1c1a1125a4933c..73cfa13fc0dc6aef1cbb283ba8462faea7540a29 100644 (file)
@@ -87,7 +87,7 @@ config USB_MUSB_DA8XX
 config USB_MUSB_TUSB6010
        tristate "TUSB6010"
        depends on HAS_IOMEM
-       depends on ARCH_OMAP2PLUS || COMPILE_TEST
+       depends on (ARCH_OMAP2PLUS || COMPILE_TEST) && !BLACKFIN
        depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
 
 config USB_MUSB_OMAP2PLUS
index 192248f974ec58c88851441e7569d82a4c867f14..fe08e776fec3e874aaa06abf6c7f301f019d2672 100644 (file)
@@ -290,6 +290,7 @@ int musb_hub_control(
        u32             temp;
        int             retval = 0;
        unsigned long   flags;
+       bool            start_musb = false;
 
        spin_lock_irqsave(&musb->lock, flags);
 
@@ -390,7 +391,7 @@ int musb_hub_control(
                         * logic relating to VBUS power-up.
                         */
                        if (!hcd->self.is_b_host && musb_has_gadget(musb))
-                               musb_start(musb);
+                               start_musb = true;
                        break;
                case USB_PORT_FEAT_RESET:
                        musb_port_reset(musb, true);
@@ -451,5 +452,9 @@ error:
                retval = -EPIPE;
        }
        spin_unlock_irqrestore(&musb->lock, flags);
+
+       if (start_musb)
+               musb_start(musb);
+
        return retval;
 }
index 980c9dee09eb3b64ed3b0676090b2d86d2928845..427efb5eebae9b386c2437d0da82477462cd184f 100644 (file)
@@ -144,14 +144,18 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
 int usb_gen_phy_init(struct usb_phy *phy)
 {
        struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
+       int ret;
 
        if (!IS_ERR(nop->vcc)) {
                if (regulator_enable(nop->vcc))
                        dev_err(phy->dev, "Failed to enable power\n");
        }
 
-       if (!IS_ERR(nop->clk))
-               clk_prepare_enable(nop->clk);
+       if (!IS_ERR(nop->clk)) {
+               ret = clk_prepare_enable(nop->clk);
+               if (ret)
+                       return ret;
+       }
 
        nop_reset(nop);
 
index 6f6d2a7fd5a079149c6587c709b473582b3e5458..6523af4f8f93fc48ab8d9b26b409719a61486e24 100644 (file)
@@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev)
                 (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id,
                 otg_dev->vbus);
 
+       platform_set_drvdata(pdev, otg_dev);
+
        return 0;
 }
 
index 8fbbc2d32371a973f4f3bd166c302cf0ece29267..ac67bab9124cc043e0c858a8858fc4adb7e5878d 100644 (file)
@@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
        if (gpio > 0)
                dparam->enable_gpio = gpio;
 
-       if (dparam->type == USBHS_TYPE_RCAR_GEN2)
+       if (dparam->type == USBHS_TYPE_RCAR_GEN2 ||
+           dparam->type == USBHS_TYPE_RCAR_GEN3)
                dparam->has_usb_dmac = 1;
 
        return info;
index 280ed5ff021bdb038f920562a0ccbed78e6ccff0..857e78337324b6488a77d8310da1a32c615e9349 100644 (file)
@@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
 
        /* use PIO if packet is less than pio_dma_border or pipe is DCP */
        if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
-           usbhs_pipe_is_dcp(pipe))
+           usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
                goto usbhsf_pio_prepare_push;
 
        /* check data length if this driver don't use USB-DMAC */
@@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
 
        /* use PIO if packet is less than pio_dma_border or pipe is DCP */
        if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
-           usbhs_pipe_is_dcp(pipe))
+           usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
                goto usbhsf_pio_prepare_pop;
 
        fifo = usbhsf_get_dma_fifo(priv, pkt);
index d4be5d5948960cd32353d3007be4373d914be114..28965ef4f824a0117e94fc8c271b347858d0a5e6 100644 (file)
@@ -282,9 +282,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
        if (usbhs_mod_is_host(priv))
                usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
 
-       usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
+       /*
+        * The driver should not clear the xxxSTS after the line of
+        * "call irq callback functions" because each "if" statement is
+        * possible to call the callback function for avoiding any side effects.
+        */
+       if (irq_state.intsts0 & BRDY)
+               usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
        usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
-       usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
+       if (irq_state.intsts0 & BEMP)
+               usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
 
        /*
         * call irq callback functions
index 50f3363cc382b8eaebfa6d1083f8f44c8c734b1d..c4c64740a3e72ebdfd0f74a190e2d44f4ec10b47 100644 (file)
@@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
                 * use dmaengine if possible.
                 * It will use pio handler if impossible.
                 */
-               if (usb_endpoint_dir_in(desc))
+               if (usb_endpoint_dir_in(desc)) {
                        pipe->handler = &usbhs_fifo_dma_push_handler;
-               else
+               } else {
                        pipe->handler = &usbhs_fifo_dma_pop_handler;
+                       usbhs_xxxsts_clear(priv, BRDYSTS,
+                                          usbhs_pipe_number(pipe));
+               }
 
                ret = 0;
        }
@@ -1073,7 +1076,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 
        gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
        dev_info(dev, "%stransceiver found\n",
-                gpriv->transceiver ? "" : "no ");
+                !IS_ERR(gpriv->transceiver) ? "" : "no ");
 
        /*
         * CAUTION
index 00820809139a0228e512e077aa20502d326737f7..b2d767e743fc2258c8b13e84401e5f34b60efcec 100644 (file)
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
        { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
        { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
        { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
        { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
        { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+       { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
        { }                                     /* Terminating entry */
 };
 
index c5d6c1e73e8e0450d46dc7140637146404f262bb..f87a938cf00571eb69edbd8d625f58041384d5fa 100644 (file)
 #define FTDI_4N_GALAXY_DE_2_PID        0xF3C1
 #define FTDI_4N_GALAXY_DE_3_PID        0xF3C2
 
+/*
+ * Ivium Technologies product IDs
+ */
+#define FTDI_PALMSENS_PID      0xf440
+#define FTDI_IVIUM_XSTAT_PID   0xf441
+
 /*
  * Linx Technologies product ids
  */
 #define INTREPID_VALUECAN_PID  0x0601
 #define INTREPID_NEOVI_PID     0x0701
 
+/*
+ * WICED USB UART
+ */
+#define WICED_VID              0x0A5C
+#define WICED_USB20706V2_PID   0x6422
+
 /*
  * Definitions for ID TECH (www.idt-net.com) devices
  */
index 5608af4a369d47f1f63402a22173e2971a51fdaf..de9992b492b08ba447fb1f594e0a4f12f1d3c9a7 100644 (file)
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
 
        if (urb->transfer_buffer == NULL) {
                urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
-                                              GFP_KERNEL);
+                                              GFP_ATOMIC);
                if (!urb->transfer_buffer)
                        goto exit;
        }
index ed378fb232e7cf7b703deca78e14c75143511100..57426d703a098dd2d45d67de11acc0ccb5b2f3a5 100644 (file)
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
        }
 
        if (urb->transfer_buffer == NULL) {
-               urb->transfer_buffer =
-                   kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
+               urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+                                              GFP_ATOMIC);
                if (!urb->transfer_buffer)
                        goto exit;
        }
index 8e07536c233a0c90b6a69ebac97cf9c38fe61416..9894e341c6ac93953a119c224729881a00682ef9 100644 (file)
@@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_LE920                    0x1200
 #define TELIT_PRODUCT_LE910                    0x1201
 #define TELIT_PRODUCT_LE910_USBCFG4            0x1206
+#define TELIT_PRODUCT_LE920A4_1207             0x1207
+#define TELIT_PRODUCT_LE920A4_1208             0x1208
+#define TELIT_PRODUCT_LE920A4_1211             0x1211
+#define TELIT_PRODUCT_LE920A4_1212             0x1212
+#define TELIT_PRODUCT_LE920A4_1213             0x1213
+#define TELIT_PRODUCT_LE920A4_1214             0x1214
 
 /* ZTE PRODUCTS */
 #define ZTE_VENDOR_ID                          0x19d2
@@ -519,6 +525,12 @@ static void option_instat_callback(struct urb *urb);
 #define VIATELECOM_VENDOR_ID                   0x15eb
 #define VIATELECOM_PRODUCT_CDS7                        0x0001
 
+/* WeTelecom products */
+#define WETELECOM_VENDOR_ID                    0x22de
+#define WETELECOM_PRODUCT_WMD200               0x6801
+#define WETELECOM_PRODUCT_6802                 0x6802
+#define WETELECOM_PRODUCT_WMD300               0x6803
+
 struct option_blacklist_info {
        /* bitmask of interface numbers blacklisted for send_setup */
        const unsigned long sendsetup;
@@ -628,6 +640,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
        .reserved = BIT(1) | BIT(5),
 };
 
+static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
+       .sendsetup = BIT(0),
+       .reserved = BIT(1),
+};
+
 static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
        .sendsetup = BIT(2),
        .reserved = BIT(0) | BIT(1) | BIT(3),
@@ -1203,6 +1220,16 @@ static const struct usb_device_id option_ids[] = {
                .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
                .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
+               .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
+               .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -1966,9 +1993,13 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
        { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index a204782ae530e1de8f7982f869a94f9b80ce9dad..e98b6e57b703dbd84c3b618fe56dc50eff3fd7ec 100644 (file)
@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
 /* Infineon Flashloader driver */
 #define FLASHLOADER_IDS()              \
        { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
-       { USB_DEVICE(0x8087, 0x0716) }
+       { USB_DEVICE(0x8087, 0x0716) }, \
+       { USB_DEVICE(0x8087, 0x0801) }
 DEVICE(flashloader, FLASHLOADER_IDS);
 
 /* Google Serial USB SubClass */
index b1b9bac4401611e0afa61322714e5f2b3ead8a68..d213cf44a7e45ef8ae692bf4a9e63d31088cae8b 100644 (file)
@@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
 
        rc = usb_register(udriver);
        if (rc)
-               return rc;
+               goto failed_usb_register;
 
        for (sd = serial_drivers; *sd; ++sd) {
                (*sd)->usb_driver = udriver;
@@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
        while (sd-- > serial_drivers)
                usb_serial_deregister(*sd);
        usb_deregister(udriver);
+failed_usb_register:
+       kfree(udriver);
        return rc;
 }
 EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
index 15ecfc9c5f6c59e14cdf4477e401700df47e19ff..152b43822ef1912c980292f7927286e6df76656c 100644 (file)
@@ -564,67 +564,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
 }
 
 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
-                                          uint32_t flags, void *data)
+                                          unsigned int count, uint32_t flags,
+                                          void *data)
 {
-       int32_t fd = *(int32_t *)data;
-
-       if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
-               return -EINVAL;
-
        /* DATA_NONE/DATA_BOOL enables loopback testing */
        if (flags & VFIO_IRQ_SET_DATA_NONE) {
-               if (*ctx)
-                       eventfd_signal(*ctx, 1);
-               return 0;
+               if (*ctx) {
+                       if (count) {
+                               eventfd_signal(*ctx, 1);
+                       } else {
+                               eventfd_ctx_put(*ctx);
+                               *ctx = NULL;
+                       }
+                       return 0;
+               }
        } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
-               uint8_t trigger = *(uint8_t *)data;
+               uint8_t trigger;
+
+               if (!count)
+                       return -EINVAL;
+
+               trigger = *(uint8_t *)data;
                if (trigger && *ctx)
                        eventfd_signal(*ctx, 1);
-               return 0;
-       }
 
-       /* Handle SET_DATA_EVENTFD */
-       if (fd == -1) {
-               if (*ctx)
-                       eventfd_ctx_put(*ctx);
-               *ctx = NULL;
                return 0;
-       } else if (fd >= 0) {
-               struct eventfd_ctx *efdctx;
-               efdctx = eventfd_ctx_fdget(fd);
-               if (IS_ERR(efdctx))
-                       return PTR_ERR(efdctx);
-               if (*ctx)
-                       eventfd_ctx_put(*ctx);
-               *ctx = efdctx;
+       } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+               int32_t fd;
+
+               if (!count)
+                       return -EINVAL;
+
+               fd = *(int32_t *)data;
+               if (fd == -1) {
+                       if (*ctx)
+                               eventfd_ctx_put(*ctx);
+                       *ctx = NULL;
+               } else if (fd >= 0) {
+                       struct eventfd_ctx *efdctx;
+
+                       efdctx = eventfd_ctx_fdget(fd);
+                       if (IS_ERR(efdctx))
+                               return PTR_ERR(efdctx);
+
+                       if (*ctx)
+                               eventfd_ctx_put(*ctx);
+
+                       *ctx = efdctx;
+               }
                return 0;
-       } else
-               return -EINVAL;
+       }
+
+       return -EINVAL;
 }
 
 static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
                                    unsigned index, unsigned start,
                                    unsigned count, uint32_t flags, void *data)
 {
-       if (index != VFIO_PCI_ERR_IRQ_INDEX)
+       if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
                return -EINVAL;
 
-       /*
-        * We should sanitize start & count, but that wasn't caught
-        * originally, so this IRQ index must forever ignore them :-(
-        */
-
-       return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+       return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
+                                              count, flags, data);
 }
 
 static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
                                    unsigned index, unsigned start,
                                    unsigned count, uint32_t flags, void *data)
 {
-       if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+       if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
                return -EINVAL;
 
-       return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+       return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
+                                              count, flags, data);
 }
 
 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
index 9d6320e8ff3e07f55f9229ea9138624a09b6b488..6e29d053843d0b7f29a1aed672ac88ef8150b9d7 100644 (file)
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd {
        struct scatterlist *tvc_prot_sgl;
        struct page **tvc_upages;
        /* Pointer to response header iovec */
-       struct iovec *tvc_resp_iov;
+       struct iovec tvc_resp_iov;
        /* Pointer to vhost_scsi for our device */
        struct vhost_scsi *tvc_vhost;
        /* Pointer to vhost_virtqueue for the cmd */
@@ -547,7 +547,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
                       se_cmd->scsi_sense_length);
 
-               iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+               iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
                              cmd->tvc_in_iovs, sizeof(v_rsp));
                ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
                if (likely(ret == sizeof(v_rsp))) {
@@ -1044,7 +1044,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                }
                cmd->tvc_vhost = vs;
                cmd->tvc_vq = vq;
-               cmd->tvc_resp_iov = &vq->iov[out];
+               cmd->tvc_resp_iov = vq->iov[out];
                cmd->tvc_in_iovs = in;
 
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
index 388eec4e1a90d3856c9f52910003808f65fa6e3f..97fb2f8fa93041a5a6854e580907854e283d80a8 100644 (file)
@@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n)
 {
        void *priv = NULL;
        long err;
-       struct vhost_memory *memory;
+       struct vhost_umem *umem;
 
        mutex_lock(&n->dev.mutex);
        err = vhost_dev_check_owner(&n->dev);
        if (err)
                goto done;
-       memory = vhost_dev_reset_owner_prepare();
-       if (!memory) {
+       umem = vhost_dev_reset_owner_prepare();
+       if (!umem) {
                err = -ENOMEM;
                goto done;
        }
        vhost_test_stop(n, &priv);
        vhost_test_flush(n);
-       vhost_dev_reset_owner(&n->dev, memory);
+       vhost_dev_reset_owner(&n->dev, umem);
 done:
        mutex_unlock(&n->dev.mutex);
        return err;
index 0ddf3a2dbfc490a58d150039a57136460e9a1e08..e3b30ea9ece5945c935791798ab27ed8f6c3dd11 100644 (file)
@@ -307,6 +307,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
 
        vhost_disable_notify(&vsock->dev, vq);
        for (;;) {
+               u32 len;
+
                if (!vhost_vsock_more_replies(vsock)) {
                        /* Stop tx until the device processes already
                         * pending replies.  Leave tx virtqueue
@@ -334,13 +336,15 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
                        continue;
                }
 
+               len = pkt->len;
+
                /* Only accept correctly addressed packets */
                if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
                        virtio_transport_recv_pkt(pkt);
                else
                        virtio_transport_free_pkt(pkt);
 
-               vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
+               vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
                added = true;
        }
 
index 114a0c88afb8bfad71dc7b4612ccf9ed66f59d54..ed9c9eeedfe5f83fd1b7b3ca475280643d8758f1 100644 (file)
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
  * making all of the arch DMA ops work on the vring device itself
  * is a mess.  For now, we use the parent device for DMA ops.
  */
-struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
 {
        return vq->vq.vdev->dev.parent;
 }
@@ -327,6 +327,8 @@ static inline int virtqueue_add(struct virtqueue *_vq,
                 * host should service the ring ASAP. */
                if (out_sgs)
                        vq->notify(&vq->vq);
+               if (indirect)
+                       kfree(desc);
                END_USE(vq);
                return -ENOSPC;
        }
@@ -426,6 +428,7 @@ unmap_release:
        if (indirect)
                kfree(desc);
 
+       END_USE(vq);
        return -EIO;
 }
 
index 7487971f9f788b12a637216c29cc853bb933c1ca..c1010f018bd857985b5bf0ada1f5286f0f68b2bf 100644 (file)
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
                        rc = -ENOMEM;
                        goto out;
                }
-       } else {
+       } else if (msg_type == XS_TRANSACTION_END) {
                list_for_each_entry(trans, &u->transactions, list)
                        if (trans->handle.id == u->u.msg.tx_id)
                                break;
index 4b0eff6da6740552043679764f0ebc76a47917a0..85737e96ab8b5a3d242f50dbf59a8f81c332e4dd 100644 (file)
@@ -189,11 +189,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
        case 1:
                _debug("extract FID count");
                ret = afs_extract_data(call, skb, last, &call->tmp, 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->count = ntohl(call->tmp);
                _debug("FID count: %u", call->count);
@@ -210,11 +207,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
                _debug("extract FID array");
                ret = afs_extract_data(call, skb, last, call->buffer,
                                       call->count * 3 * 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                _debug("unmarshall FID array");
                call->request = kcalloc(call->count,
@@ -239,11 +233,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
        case 3:
                _debug("extract CB count");
                ret = afs_extract_data(call, skb, last, &call->tmp, 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                tmp = ntohl(call->tmp);
                _debug("CB count: %u", tmp);
@@ -258,11 +249,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
                _debug("extract CB array");
                ret = afs_extract_data(call, skb, last, call->request,
                                       call->count * 3 * 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                _debug("unmarshall CB array");
                cb = call->request;
@@ -278,9 +266,9 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
                call->unmarshall++;
 
        case 5:
-               _debug("trailer");
-               if (skb->len != 0)
-                       return -EBADMSG;
+               ret = afs_data_complete(call, skb, last);
+               if (ret < 0)
+                       return ret;
 
                /* Record that the message was unmarshalled successfully so
                 * that the call destructor can know do the callback breaking
@@ -294,8 +282,6 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
                break;
        }
 
-       if (!last)
-               return 0;
 
        call->state = AFS_CALL_REPLYING;
 
@@ -335,13 +321,13 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
 {
        struct afs_server *server;
        struct in_addr addr;
+       int ret;
 
        _enter(",{%u},%d", skb->len, last);
 
-       if (skb->len > 0)
-               return -EBADMSG;
-       if (!last)
-               return 0;
+       ret = afs_data_complete(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* no unmarshalling required */
        call->state = AFS_CALL_REPLYING;
@@ -371,8 +357,10 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call,
 
        _enter(",{%u},%d", skb->len, last);
 
+       /* There are some arguments that we ignore */
+       afs_data_consumed(call, skb);
        if (!last)
-               return 0;
+               return -EAGAIN;
 
        /* no unmarshalling required */
        call->state = AFS_CALL_REPLYING;
@@ -408,12 +396,13 @@ static void SRXAFSCB_Probe(struct work_struct *work)
 static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
                                bool last)
 {
+       int ret;
+
        _enter(",{%u},%d", skb->len, last);
 
-       if (skb->len > 0)
-               return -EBADMSG;
-       if (!last)
-               return 0;
+       ret = afs_data_complete(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* no unmarshalling required */
        call->state = AFS_CALL_REPLYING;
@@ -460,10 +449,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
 
        _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       if (skb->len > 0)
-               return -EBADMSG;
-       if (!last)
-               return 0;
+       ret = afs_data_complete(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        switch (call->unmarshall) {
        case 0:
@@ -509,8 +497,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
                break;
        }
 
-       if (!last)
-               return 0;
+       ret = afs_data_complete(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        call->state = AFS_CALL_REPLYING;
 
@@ -588,12 +577,13 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
 static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call,
                                                 struct sk_buff *skb, bool last)
 {
+       int ret;
+
        _enter(",{%u},%d", skb->len, last);
 
-       if (skb->len > 0)
-               return -EBADMSG;
-       if (!last)
-               return 0;
+       ret = afs_data_complete(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* no unmarshalling required */
        call->state = AFS_CALL_REPLYING;
index c2e930ec288899cde31e96150dc828d8ec495b60..9312b92e54bedb3310e725a1d10249f2dee21337 100644 (file)
@@ -240,15 +240,13 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call,
 {
        struct afs_vnode *vnode = call->reply;
        const __be32 *bp;
+       int ret;
 
        _enter(",,%u", last);
 
-       afs_transfer_reply(call, skb);
-       if (!last)
-               return 0;
-
-       if (call->reply_size != call->reply_max)
-               return -EBADMSG;
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
@@ -335,11 +333,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
        case 1:
                _debug("extract data length (MSW)");
                ret = afs_extract_data(call, skb, last, &call->tmp, 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->count = ntohl(call->tmp);
                _debug("DATA length MSW: %u", call->count);
@@ -353,11 +348,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
        case 2:
                _debug("extract data length");
                ret = afs_extract_data(call, skb, last, &call->tmp, 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->count = ntohl(call->tmp);
                _debug("DATA length: %u", call->count);
@@ -375,11 +367,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
                        ret = afs_extract_data(call, skb, last, buffer,
                                               call->count);
                        kunmap_atomic(buffer);
-                       switch (ret) {
-                       case 0:         break;
-                       case -EAGAIN:   return 0;
-                       default:        return ret;
-                       }
+                       if (ret < 0)
+                               return ret;
                }
 
                call->offset = 0;
@@ -389,11 +378,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
        case 4:
                ret = afs_extract_data(call, skb, last, call->buffer,
                                       (21 + 3 + 6) * 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                bp = call->buffer;
                xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
@@ -405,15 +391,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
                call->unmarshall++;
 
        case 5:
-               _debug("trailer");
-               if (skb->len != 0)
-                       return -EBADMSG;
+               ret = afs_data_complete(call, skb, last);
+               if (ret < 0)
+                       return ret;
                break;
        }
 
-       if (!last)
-               return 0;
-
        if (call->count < PAGE_SIZE) {
                _debug("clear");
                page = call->reply3;
@@ -537,9 +520,8 @@ static int afs_deliver_fs_give_up_callbacks(struct afs_call *call,
 {
        _enter(",{%u},%d", skb->len, last);
 
-       if (skb->len > 0)
-               return -EBADMSG; /* shouldn't be any reply data */
-       return 0;
+       /* shouldn't be any reply data */
+       return afs_data_complete(call, skb, last);
 }
 
 /*
@@ -622,15 +604,13 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call,
 {
        struct afs_vnode *vnode = call->reply;
        const __be32 *bp;
+       int ret;
 
        _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       afs_transfer_reply(call, skb);
-       if (!last)
-               return 0;
-
-       if (call->reply_size != call->reply_max)
-               return -EBADMSG;
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
@@ -721,15 +701,13 @@ static int afs_deliver_fs_remove(struct afs_call *call,
 {
        struct afs_vnode *vnode = call->reply;
        const __be32 *bp;
+       int ret;
 
        _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       afs_transfer_reply(call, skb);
-       if (!last)
-               return 0;
-
-       if (call->reply_size != call->reply_max)
-               return -EBADMSG;
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
@@ -804,15 +782,13 @@ static int afs_deliver_fs_link(struct afs_call *call,
 {
        struct afs_vnode *dvnode = call->reply, *vnode = call->reply2;
        const __be32 *bp;
+       int ret;
 
        _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       afs_transfer_reply(call, skb);
-       if (!last)
-               return 0;
-
-       if (call->reply_size != call->reply_max)
-               return -EBADMSG;
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
@@ -892,15 +868,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call,
 {
        struct afs_vnode *vnode = call->reply;
        const __be32 *bp;
+       int ret;
 
        _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       afs_transfer_reply(call, skb);
-       if (!last)
-               return 0;
-
-       if (call->reply_size != call->reply_max)
-               return -EBADMSG;
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
@@ -999,15 +973,13 @@ static int afs_deliver_fs_rename(struct afs_call *call,
 {
        struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2;
        const __be32 *bp;
+       int ret;
 
        _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       afs_transfer_reply(call, skb);
-       if (!last)
-               return 0;
-
-       if (call->reply_size != call->reply_max)
-               return -EBADMSG;
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
@@ -1105,20 +1077,13 @@ static int afs_deliver_fs_store_data(struct afs_call *call,
 {
        struct afs_vnode *vnode = call->reply;
        const __be32 *bp;
+       int ret;
 
        _enter(",,%u", last);
 
-       afs_transfer_reply(call, skb);
-       if (!last) {
-               _leave(" = 0 [more]");
-               return 0;
-       }
-
-       if (call->reply_size != call->reply_max) {
-               _leave(" = -EBADMSG [%u != %u]",
-                      call->reply_size, call->reply_max);
-               return -EBADMSG;
-       }
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
@@ -1292,20 +1257,13 @@ static int afs_deliver_fs_store_status(struct afs_call *call,
        afs_dataversion_t *store_version;
        struct afs_vnode *vnode = call->reply;
        const __be32 *bp;
+       int ret;
 
        _enter(",,%u", last);
 
-       afs_transfer_reply(call, skb);
-       if (!last) {
-               _leave(" = 0 [more]");
-               return 0;
-       }
-
-       if (call->reply_size != call->reply_max) {
-               _leave(" = -EBADMSG [%u != %u]",
-                      call->reply_size, call->reply_max);
-               return -EBADMSG;
-       }
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        store_version = NULL;
@@ -1504,11 +1462,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
                _debug("extract status");
                ret = afs_extract_data(call, skb, last, call->buffer,
                                       12 * 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                bp = call->buffer;
                xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2);
@@ -1518,11 +1473,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
                /* extract the volume name length */
        case 2:
                ret = afs_extract_data(call, skb, last, &call->tmp, 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->count = ntohl(call->tmp);
                _debug("volname length: %u", call->count);
@@ -1537,11 +1489,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
                if (call->count > 0) {
                        ret = afs_extract_data(call, skb, last, call->reply3,
                                               call->count);
-                       switch (ret) {
-                       case 0:         break;
-                       case -EAGAIN:   return 0;
-                       default:        return ret;
-                       }
+                       if (ret < 0)
+                               return ret;
                }
 
                p = call->reply3;
@@ -1561,11 +1510,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
        case 4:
                ret = afs_extract_data(call, skb, last, call->buffer,
                                       call->count);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->offset = 0;
                call->unmarshall++;
@@ -1574,11 +1520,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
                /* extract the offline message length */
        case 5:
                ret = afs_extract_data(call, skb, last, &call->tmp, 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->count = ntohl(call->tmp);
                _debug("offline msg length: %u", call->count);
@@ -1593,11 +1536,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
                if (call->count > 0) {
                        ret = afs_extract_data(call, skb, last, call->reply3,
                                               call->count);
-                       switch (ret) {
-                       case 0:         break;
-                       case -EAGAIN:   return 0;
-                       default:        return ret;
-                       }
+                       if (ret < 0)
+                               return ret;
                }
 
                p = call->reply3;
@@ -1617,11 +1557,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
        case 7:
                ret = afs_extract_data(call, skb, last, call->buffer,
                                       call->count);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->offset = 0;
                call->unmarshall++;
@@ -1630,11 +1567,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
                /* extract the message of the day length */
        case 8:
                ret = afs_extract_data(call, skb, last, &call->tmp, 4);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->count = ntohl(call->tmp);
                _debug("motd length: %u", call->count);
@@ -1649,11 +1583,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
                if (call->count > 0) {
                        ret = afs_extract_data(call, skb, last, call->reply3,
                                               call->count);
-                       switch (ret) {
-                       case 0:         break;
-                       case -EAGAIN:   return 0;
-                       default:        return ret;
-                       }
+                       if (ret < 0)
+                               return ret;
                }
 
                p = call->reply3;
@@ -1673,26 +1604,20 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
        case 10:
                ret = afs_extract_data(call, skb, last, call->buffer,
                                       call->count);
-               switch (ret) {
-               case 0:         break;
-               case -EAGAIN:   return 0;
-               default:        return ret;
-               }
+               if (ret < 0)
+                       return ret;
 
                call->offset = 0;
                call->unmarshall++;
        no_motd_padding:
 
        case 11:
-               _debug("trailer %d", skb->len);
-               if (skb->len != 0)
-                       return -EBADMSG;
+               ret = afs_data_complete(call, skb, last);
+               if (ret < 0)
+                       return ret;
                break;
        }
 
-       if (!last)
-               return 0;
-
        _leave(" = 0 [done]");
        return 0;
 }
@@ -1764,15 +1689,13 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call,
                                    struct sk_buff *skb, bool last)
 {
        const __be32 *bp;
+       int ret;
 
        _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       afs_transfer_reply(call, skb);
-       if (!last)
-               return 0;
-
-       if (call->reply_size != call->reply_max)
-               return -EBADMSG;
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
index 71d5982312f3d11dd6e3dd23079e5c6bef7c23a6..df976b2a7f40fbccf708e572372e41098b31f43f 100644 (file)
@@ -609,17 +609,29 @@ extern void afs_proc_cell_remove(struct afs_cell *);
  */
 extern int afs_open_socket(void);
 extern void afs_close_socket(void);
+extern void afs_data_consumed(struct afs_call *, struct sk_buff *);
 extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
                         const struct afs_wait_mode *);
 extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
                                            size_t, size_t);
 extern void afs_flat_call_destructor(struct afs_call *);
-extern void afs_transfer_reply(struct afs_call *, struct sk_buff *);
+extern int afs_transfer_reply(struct afs_call *, struct sk_buff *, bool);
 extern void afs_send_empty_reply(struct afs_call *);
 extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
 extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
                            size_t);
 
+static inline int afs_data_complete(struct afs_call *call, struct sk_buff *skb,
+                                   bool last)
+{
+       if (skb->len > 0)
+               return -EBADMSG;
+       afs_data_consumed(call, skb);
+       if (!last)
+               return -EAGAIN;
+       return 0;
+}
+
 /*
  * security.c
  */
index 4832de84d52cba73f96466ae4e73ffc2906566bd..14d04c848465a68f2b107c87b22b90f69062e9c5 100644 (file)
@@ -150,10 +150,9 @@ void afs_close_socket(void)
 }
 
 /*
- * note that the data in a socket buffer is now delivered and that the buffer
- * should be freed
+ * Note that the data in a socket buffer is now consumed.
  */
-static void afs_data_delivered(struct sk_buff *skb)
+void afs_data_consumed(struct afs_call *call, struct sk_buff *skb)
 {
        if (!skb) {
                _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
@@ -161,9 +160,7 @@ static void afs_data_delivered(struct sk_buff *skb)
        } else {
                _debug("DLVR %p{%u} [%d]",
                       skb, skb->mark, atomic_read(&afs_outstanding_skbs));
-               if (atomic_dec_return(&afs_outstanding_skbs) == -1)
-                       BUG();
-               rxrpc_kernel_data_delivered(skb);
+               rxrpc_kernel_data_consumed(call->rxcall, skb);
        }
 }
 
@@ -489,9 +486,15 @@ static void afs_deliver_to_call(struct afs_call *call)
                        last = rxrpc_kernel_is_data_last(skb);
                        ret = call->type->deliver(call, skb, last);
                        switch (ret) {
+                       case -EAGAIN:
+                               if (last) {
+                                       _debug("short data");
+                                       goto unmarshal_error;
+                               }
+                               break;
                        case 0:
-                               if (last &&
-                                   call->state == AFS_CALL_AWAIT_REPLY)
+                               ASSERT(last);
+                               if (call->state == AFS_CALL_AWAIT_REPLY)
                                        call->state = AFS_CALL_COMPLETE;
                                break;
                        case -ENOTCONN:
@@ -501,6 +504,7 @@ static void afs_deliver_to_call(struct afs_call *call)
                                abort_code = RX_INVALID_OPERATION;
                                goto do_abort;
                        default:
+                       unmarshal_error:
                                abort_code = RXGEN_CC_UNMARSHAL;
                                if (call->state != AFS_CALL_AWAIT_REPLY)
                                        abort_code = RXGEN_SS_UNMARSHAL;
@@ -511,9 +515,7 @@ static void afs_deliver_to_call(struct afs_call *call)
                                call->state = AFS_CALL_ERROR;
                                break;
                        }
-                       afs_data_delivered(skb);
-                       skb = NULL;
-                       continue;
+                       break;
                case RXRPC_SKB_MARK_FINAL_ACK:
                        _debug("Rcv ACK");
                        call->state = AFS_CALL_COMPLETE;
@@ -685,15 +687,35 @@ static void afs_process_async_call(struct afs_call *call)
 }
 
 /*
- * empty a socket buffer into a flat reply buffer
+ * Empty a socket buffer into a flat reply buffer.
  */
-void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
+int afs_transfer_reply(struct afs_call *call, struct sk_buff *skb, bool last)
 {
        size_t len = skb->len;
 
-       if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0)
-               BUG();
-       call->reply_size += len;
+       if (len > call->reply_max - call->reply_size) {
+               _leave(" = -EBADMSG [%zu > %u]",
+                      len, call->reply_max - call->reply_size);
+               return -EBADMSG;
+       }
+
+       if (len > 0) {
+               if (skb_copy_bits(skb, 0, call->buffer + call->reply_size,
+                                 len) < 0)
+                       BUG();
+               call->reply_size += len;
+       }
+
+       afs_data_consumed(call, skb);
+       if (!last)
+               return -EAGAIN;
+
+       if (call->reply_size != call->reply_max) {
+               _leave(" = -EBADMSG [%u != %u]",
+                      call->reply_size, call->reply_max);
+               return -EBADMSG;
+       }
+       return 0;
 }
 
 /*
@@ -745,7 +767,8 @@ static void afs_collect_incoming_call(struct work_struct *work)
 }
 
 /*
- * grab the operation ID from an incoming cache manager call
+ * Grab the operation ID from an incoming cache manager call.  The socket
+ * buffer is discarded on error or if we don't yet have sufficient data.
  */
 static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
                                bool last)
@@ -766,12 +789,9 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
        call->offset += len;
 
        if (call->offset < 4) {
-               if (last) {
-                       _leave(" = -EBADMSG [op ID short]");
-                       return -EBADMSG;
-               }
-               _leave(" = 0 [incomplete]");
-               return 0;
+               afs_data_consumed(call, skb);
+               _leave(" = -EAGAIN");
+               return -EAGAIN;
        }
 
        call->state = AFS_CALL_AWAIT_REQUEST;
@@ -855,7 +875,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
 }
 
 /*
- * extract a piece of data from the received data socket buffers
+ * Extract a piece of data from the received data socket buffers.
  */
 int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
                     bool last, void *buf, size_t count)
@@ -873,10 +893,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
        call->offset += len;
 
        if (call->offset < count) {
-               if (last) {
-                       _leave(" = -EBADMSG [%d < %zu]", call->offset, count);
-                       return -EBADMSG;
-               }
+               afs_data_consumed(call, skb);
                _leave(" = -EAGAIN");
                return -EAGAIN;
        }
index 340afd0cd18290e319bcc1d9695c887b39284025..f94d1abdc3ebc47e5750098fddc4098fb9f3df2f 100644 (file)
@@ -64,16 +64,13 @@ static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call,
        struct afs_cache_vlocation *entry;
        __be32 *bp;
        u32 tmp;
-       int loop;
+       int loop, ret;
 
        _enter(",,%u", last);
 
-       afs_transfer_reply(call, skb);
-       if (!last)
-               return 0;
-
-       if (call->reply_size != call->reply_max)
-               return -EBADMSG;
+       ret = afs_transfer_reply(call, skb, last);
+       if (ret < 0)
+               return ret;
 
        /* unmarshall the reply once we've received all of it */
        entry = call->reply;
index fb8e45b88cd4ecf0aa4b3c485051d7f437ba7bd8..4fe81d1c60f962b53392a6b4b0d047f129509c87 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,7 +239,12 @@ static struct dentry *aio_mount(struct file_system_type *fs_type,
        static const struct dentry_operations ops = {
                .d_dname        = simple_dname,
        };
-       return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC);
+       struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
+                                          AIO_RING_MAGIC);
+
+       if (!IS_ERR(root))
+               root->d_sb->s_iflags |= SB_I_NOEXEC;
+       return root;
 }
 
 /* aio_setup
index b493909e74920be68574b849b426805fd37541d3..d8e6d421c27fb74eed07369e83278183bb394f82 100644 (file)
@@ -417,6 +417,7 @@ static struct dentry *should_expire(struct dentry *dentry,
        }
        return NULL;
 }
+
 /*
  * Find an eligible tree to time-out
  * A tree is eligible if :-
@@ -432,6 +433,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
        struct dentry *root = sb->s_root;
        struct dentry *dentry;
        struct dentry *expired;
+       struct dentry *found;
        struct autofs_info *ino;
 
        if (!root)
@@ -442,31 +444,46 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
 
        dentry = NULL;
        while ((dentry = get_next_positive_subdir(dentry, root))) {
+               int flags = how;
+
                spin_lock(&sbi->fs_lock);
                ino = autofs4_dentry_ino(dentry);
-               if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
-                       expired = NULL;
-               else
-                       expired = should_expire(dentry, mnt, timeout, how);
-               if (!expired) {
+               if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
                        spin_unlock(&sbi->fs_lock);
                        continue;
                }
+               spin_unlock(&sbi->fs_lock);
+
+               expired = should_expire(dentry, mnt, timeout, flags);
+               if (!expired)
+                       continue;
+
+               spin_lock(&sbi->fs_lock);
                ino = autofs4_dentry_ino(expired);
                ino->flags |= AUTOFS_INF_WANT_EXPIRE;
                spin_unlock(&sbi->fs_lock);
                synchronize_rcu();
-               spin_lock(&sbi->fs_lock);
-               if (should_expire(expired, mnt, timeout, how)) {
-                       if (expired != dentry)
-                               dput(dentry);
-                       goto found;
-               }
 
+               /* Make sure a reference is not taken on found if
+                * things have changed.
+                */
+               flags &= ~AUTOFS_EXP_LEAVES;
+               found = should_expire(expired, mnt, timeout, how);
+               if (!found || found != expired)
+                       /* Something has changed, continue */
+                       goto next;
+
+               if (expired != dentry)
+                       dput(dentry);
+
+               spin_lock(&sbi->fs_lock);
+               goto found;
+next:
+               spin_lock(&sbi->fs_lock);
                ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
+               spin_unlock(&sbi->fs_lock);
                if (expired != dentry)
                        dput(expired);
-               spin_unlock(&sbi->fs_lock);
        }
        return NULL;
 
@@ -483,6 +500,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
        struct autofs_info *ino = autofs4_dentry_ino(dentry);
        int status;
+       int state;
 
        /* Block on any pending expire */
        if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
@@ -490,8 +508,19 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
        if (rcu_walk)
                return -ECHILD;
 
+retry:
        spin_lock(&sbi->fs_lock);
-       if (ino->flags & AUTOFS_INF_EXPIRING) {
+       state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
+       if (state == AUTOFS_INF_WANT_EXPIRE) {
+               spin_unlock(&sbi->fs_lock);
+               /*
+                * Possibly being selected for expire, wait until
+                * it's selected or not.
+                */
+               schedule_timeout_uninterruptible(HZ/10);
+               goto retry;
+       }
+       if (state & AUTOFS_INF_EXPIRING) {
                spin_unlock(&sbi->fs_lock);
 
                pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
index 7f6aff3f72ebacb3ba6f71136abeaa2047da62ab..e5495f37c6ed50cc7b391b24227c637880532afc 100644 (file)
@@ -853,6 +853,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
                current->flags |= PF_RANDOMIZE;
 
        setup_new_exec(bprm);
+       install_exec_creds(bprm);
 
        /* Do this so that we can load the interpreter, if need be.  We will
           change some of these later */
@@ -1044,7 +1045,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
                goto out;
 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
 
-       install_exec_creds(bprm);
        retval = create_elf_tables(bprm, &loc->elf_ex,
                          load_addr, interp_load_addr);
        if (retval < 0)
index c3cdde87cc8c678542bab9c39f12477058986156..08ae99343d92f91586b49fbf56367906855415ff 100644 (file)
@@ -249,7 +249,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
                 * thaw_bdev drops it.
                 */
                sb = get_super(bdev);
-               drop_super(sb);
+               if (sb)
+                       drop_super(sb);
                mutex_unlock(&bdev->bd_fsfreeze_mutex);
                return sb;
        }
@@ -646,7 +647,7 @@ static struct dentry *bd_mount(struct file_system_type *fs_type,
 {
        struct dentry *dent;
        dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
-       if (dent)
+       if (!IS_ERR(dent))
                dent->d_sb->s_iflags |= SB_I_CGROUPWB;
        return dent;
 }
index 2b88439c2ee864ffcec7a1e43f6e620a729bdbff..455a6b2fd53957709ecfdf9aeff5386a4e60e2d0 100644 (file)
@@ -589,6 +589,7 @@ static void __merge_refs(struct list_head *head, int mode)
 
                        list_del(&ref2->list);
                        kmem_cache_free(btrfs_prelim_ref_cache, ref2);
+                       cond_resched();
                }
 
        }
index 2fe8f89091a3097aa8e55bcee4a20c7b43ab1ca2..33fe0355110552f93b1560476101de1fb4a86ae7 100644 (file)
@@ -427,6 +427,7 @@ struct btrfs_space_info {
        struct list_head ro_bgs;
        struct list_head priority_tickets;
        struct list_head tickets;
+       u64 tickets_id;
 
        struct rw_semaphore groups_sem;
        /* for block groups in our same type */
@@ -1028,6 +1029,7 @@ struct btrfs_fs_info {
        struct btrfs_workqueue *qgroup_rescan_workers;
        struct completion qgroup_rescan_completion;
        struct btrfs_work qgroup_rescan_work;
+       bool qgroup_rescan_running;     /* protected by qgroup_rescan_lock */
 
        /* filesystem state */
        unsigned long fs_state;
@@ -1079,6 +1081,8 @@ struct btrfs_fs_info {
        struct list_head pinned_chunks;
 
        int creating_free_space_tree;
+       /* Used to record internally whether fs has been frozen */
+       int fs_frozen;
 };
 
 struct btrfs_subvolume_writers {
@@ -2578,7 +2582,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   u64 root_objectid, u64 owner, u64 offset,
                                   struct btrfs_key *ins);
-int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
+int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
                         u64 min_alloc_size, u64 empty_size, u64 hint_byte,
                         struct btrfs_key *ins, int is_data, int delalloc);
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
index b6d210e7a993fd67634b3523aa3e61a1121d31bd..ac02e041464bc87876cfd206a79f7e07de9846b7 100644 (file)
@@ -541,7 +541,6 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
        struct btrfs_delayed_ref_head *existing;
        struct btrfs_delayed_ref_head *head_ref = NULL;
        struct btrfs_delayed_ref_root *delayed_refs;
-       struct btrfs_qgroup_extent_record *qexisting;
        int count_mod = 1;
        int must_insert_reserved = 0;
 
@@ -606,10 +605,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
                qrecord->num_bytes = num_bytes;
                qrecord->old_roots = NULL;
 
-               qexisting = btrfs_qgroup_insert_dirty_extent(fs_info,
-                                                            delayed_refs,
-                                                            qrecord);
-               if (qexisting)
+               if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
+                                       delayed_refs, qrecord))
                        kfree(qrecord);
        }
 
@@ -862,33 +859,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
-                                    struct btrfs_trans_handle *trans,
-                                    u64 ref_root, u64 bytenr, u64 num_bytes)
-{
-       struct btrfs_delayed_ref_root *delayed_refs;
-       struct btrfs_delayed_ref_head *ref_head;
-       int ret = 0;
-
-       if (!fs_info->quota_enabled || !is_fstree(ref_root))
-               return 0;
-
-       delayed_refs = &trans->transaction->delayed_refs;
-
-       spin_lock(&delayed_refs->lock);
-       ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
-       if (!ref_head) {
-               ret = -ENOENT;
-               goto out;
-       }
-       WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
-       ref_head->qgroup_ref_root = ref_root;
-       ref_head->qgroup_reserved = num_bytes;
-out:
-       spin_unlock(&delayed_refs->lock);
-       return ret;
-}
-
 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                                struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
index 5fca9534a2712b0b4dec9e9b15a1e024f272bb2f..43f3629760e90f186730842b0b1c609f799ae256 100644 (file)
@@ -250,9 +250,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
                               u64 parent, u64 ref_root,
                               u64 owner, u64 offset, u64 reserved, int action,
                               struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
-                                    struct btrfs_trans_handle *trans,
-                                    u64 ref_root, u64 bytenr, u64 num_bytes);
 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                                struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
index 59febfb8d04a3094fc8dfa6644a18d5a9aac6d42..54bc8c7c6bcd387ef48ffc9b69e9771de6e2ca2c 100644 (file)
@@ -559,8 +559,29 @@ static noinline int check_leaf(struct btrfs_root *root,
        u32 nritems = btrfs_header_nritems(leaf);
        int slot;
 
-       if (nritems == 0)
+       if (nritems == 0) {
+               struct btrfs_root *check_root;
+
+               key.objectid = btrfs_header_owner(leaf);
+               key.type = BTRFS_ROOT_ITEM_KEY;
+               key.offset = (u64)-1;
+
+               check_root = btrfs_get_fs_root(root->fs_info, &key, false);
+               /*
+                * The only reason we also check NULL here is that during
+                * open_ctree() some roots has not yet been set up.
+                */
+               if (!IS_ERR_OR_NULL(check_root)) {
+                       /* if leaf is the root, then it's fine */
+                       if (leaf->start !=
+                           btrfs_root_bytenr(&check_root->root_item)) {
+                               CORRUPT("non-root leaf's nritems is 0",
+                                       leaf, root, 0);
+                               return -EIO;
+                       }
+               }
                return 0;
+       }
 
        /* Check the 0 item */
        if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
@@ -612,6 +633,19 @@ static noinline int check_leaf(struct btrfs_root *root,
        return 0;
 }
 
+static int check_node(struct btrfs_root *root, struct extent_buffer *node)
+{
+       unsigned long nr = btrfs_header_nritems(node);
+
+       if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
+               btrfs_crit(root->fs_info,
+                          "corrupt node: block %llu root %llu nritems %lu",
+                          node->start, root->objectid, nr);
+               return -EIO;
+       }
+       return 0;
+}
+
 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
                                      u64 phy_offset, struct page *page,
                                      u64 start, u64 end, int mirror)
@@ -682,6 +716,9 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
                ret = -EIO;
        }
 
+       if (found_level > 0 && check_node(root, eb))
+               ret = -EIO;
+
        if (!ret)
                set_extent_buffer_uptodate(eb);
 err:
@@ -1618,8 +1655,8 @@ fail:
        return ret;
 }
 
-static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
-                                              u64 root_id)
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+                                       u64 root_id)
 {
        struct btrfs_root *root;
 
@@ -2298,6 +2335,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
        fs_info->quota_enabled = 0;
        fs_info->pending_quota_state = 0;
        fs_info->qgroup_ulist = NULL;
+       fs_info->qgroup_rescan_running = false;
        mutex_init(&fs_info->qgroup_rescan_lock);
 }
 
@@ -2624,6 +2662,7 @@ int open_ctree(struct super_block *sb,
        atomic_set(&fs_info->qgroup_op_seq, 0);
        atomic_set(&fs_info->reada_works_cnt, 0);
        atomic64_set(&fs_info->tree_mod_seq, 0);
+       fs_info->fs_frozen = 0;
        fs_info->sb = sb;
        fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
        fs_info->metadata_ratio = 0;
@@ -3739,8 +3778,15 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
        if (btrfs_root_refs(&root->root_item) == 0)
                synchronize_srcu(&fs_info->subvol_srcu);
 
-       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
                btrfs_free_log(NULL, root);
+               if (root->reloc_root) {
+                       free_extent_buffer(root->reloc_root->node);
+                       free_extent_buffer(root->reloc_root->commit_root);
+                       btrfs_put_fs_root(root->reloc_root);
+                       root->reloc_root = NULL;
+               }
+       }
 
        if (root->free_ino_pinned)
                __btrfs_remove_free_space_cache(root->free_ino_pinned);
@@ -3851,7 +3897,7 @@ void close_ctree(struct btrfs_root *root)
        smp_mb();
 
        /* wait for the qgroup rescan worker to stop */
-       btrfs_qgroup_wait_for_completion(fs_info);
+       btrfs_qgroup_wait_for_completion(fs_info, false);
 
        /* wait for the uuid_scan task to finish */
        down(&fs_info->uuid_tree_rescan_sem);
index b3207a0e09f7966703e1d130e250f05f610b2a3f..f19a982f5a4f122ca7d42aa6278a6614e061a2b1 100644 (file)
@@ -68,6 +68,8 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
                                      struct btrfs_key *location);
 int btrfs_init_fs_root(struct btrfs_root *root);
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+                                       u64 root_id);
 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
                         struct btrfs_root *root);
 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
index 61b494e8e604e9e80f1c46c581ede724f56ea050..665da8f66ff18ae5260f892b60a4bafb4fe323fa 100644 (file)
@@ -60,21 +60,6 @@ enum {
        CHUNK_ALLOC_FORCE = 2,
 };
 
-/*
- * Control how reservations are dealt with.
- *
- * RESERVE_FREE - freeing a reservation.
- * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
- *   ENOSPC accounting
- * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
- *   bytes_may_use as the ENOSPC accounting is done elsewhere
- */
-enum {
-       RESERVE_FREE = 0,
-       RESERVE_ALLOC = 1,
-       RESERVE_ALLOC_NO_ACCOUNT = 2,
-};
-
 static int update_block_group(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root, u64 bytenr,
                              u64 num_bytes, int alloc);
@@ -104,9 +89,10 @@ static int find_next_key(struct btrfs_path *path, int level,
                         struct btrfs_key *key);
 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
                            int dump_block_groups);
-static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                                      u64 num_bytes, int reserve,
-                                      int delalloc);
+static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                   u64 ram_bytes, u64 num_bytes, int delalloc);
+static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                    u64 num_bytes, int delalloc);
 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
                               u64 num_bytes);
 int btrfs_pin_extent(struct btrfs_root *root,
@@ -3501,7 +3487,6 @@ again:
                dcs = BTRFS_DC_SETUP;
        else if (ret == -ENOSPC)
                set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
-       btrfs_free_reserved_data_space(inode, 0, num_pages);
 
 out_put:
        iput(inode);
@@ -4286,13 +4271,10 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
        if (ret < 0)
                return ret;
 
-       /*
-        * Use new btrfs_qgroup_reserve_data to reserve precious data space
-        *
-        * TODO: Find a good method to avoid reserve data space for NOCOW
-        * range, but don't impact performance on quota disable case.
-        */
+       /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
        ret = btrfs_qgroup_reserve_data(inode, start, len);
+       if (ret)
+               btrfs_free_reserved_data_space_noquota(inode, start, len);
        return ret;
 }
 
@@ -4472,6 +4454,15 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
        }
 }
 
+/*
+ * If force is CHUNK_ALLOC_FORCE:
+ *    - return 1 if it successfully allocates a chunk,
+ *    - return errors including -ENOSPC otherwise.
+ * If force is NOT CHUNK_ALLOC_FORCE:
+ *    - return 0 if it doesn't need to allocate a new chunk,
+ *    - return 1 if it successfully allocates a chunk,
+ *    - return errors including -ENOSPC otherwise.
+ */
 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
                          struct btrfs_root *extent_root, u64 flags, int force)
 {
@@ -4882,7 +4873,7 @@ static int flush_space(struct btrfs_root *root,
                                     btrfs_get_alloc_profile(root, 0),
                                     CHUNK_ALLOC_NO_FORCE);
                btrfs_end_transaction(trans, root);
-               if (ret == -ENOSPC)
+               if (ret > 0 || ret == -ENOSPC)
                        ret = 0;
                break;
        case COMMIT_TRANS:
@@ -4907,11 +4898,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        u64 expected;
        u64 to_reclaim = 0;
 
-       to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
-       if (can_overcommit(root, space_info, to_reclaim,
-                          BTRFS_RESERVE_FLUSH_ALL))
-               return 0;
-
        list_for_each_entry(ticket, &space_info->tickets, list)
                to_reclaim += ticket->bytes;
        list_for_each_entry(ticket, &space_info->priority_tickets, list)
@@ -4919,6 +4905,11 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        if (to_reclaim)
                return to_reclaim;
 
+       to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
+       if (can_overcommit(root, space_info, to_reclaim,
+                          BTRFS_RESERVE_FLUSH_ALL))
+               return 0;
+
        used = space_info->bytes_used + space_info->bytes_reserved +
               space_info->bytes_pinned + space_info->bytes_readonly +
               space_info->bytes_may_use;
@@ -4972,12 +4963,12 @@ static void wake_all_tickets(struct list_head *head)
  */
 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
 {
-       struct reserve_ticket *last_ticket = NULL;
        struct btrfs_fs_info *fs_info;
        struct btrfs_space_info *space_info;
        u64 to_reclaim;
        int flush_state;
        int commit_cycles = 0;
+       u64 last_tickets_id;
 
        fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
@@ -4990,8 +4981,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                spin_unlock(&space_info->lock);
                return;
        }
-       last_ticket = list_first_entry(&space_info->tickets,
-                                      struct reserve_ticket, list);
+       last_tickets_id = space_info->tickets_id;
        spin_unlock(&space_info->lock);
 
        flush_state = FLUSH_DELAYED_ITEMS_NR;
@@ -5011,10 +5001,10 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                                                              space_info);
                ticket = list_first_entry(&space_info->tickets,
                                          struct reserve_ticket, list);
-               if (last_ticket == ticket) {
+               if (last_tickets_id == space_info->tickets_id) {
                        flush_state++;
                } else {
-                       last_ticket = ticket;
+                       last_tickets_id = space_info->tickets_id;
                        flush_state = FLUSH_DELAYED_ITEMS_NR;
                        if (commit_cycles)
                                commit_cycles--;
@@ -5390,6 +5380,7 @@ again:
                        list_del_init(&ticket->list);
                        num_bytes -= ticket->bytes;
                        ticket->bytes = 0;
+                       space_info->tickets_id++;
                        wake_up(&ticket->wait);
                } else {
                        ticket->bytes -= num_bytes;
@@ -5432,6 +5423,7 @@ again:
                        num_bytes -= ticket->bytes;
                        space_info->bytes_may_use += ticket->bytes;
                        ticket->bytes = 0;
+                       space_info->tickets_id++;
                        wake_up(&ticket->wait);
                } else {
                        trace_btrfs_space_reservation(fs_info, "space_info",
@@ -6497,19 +6489,15 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
 }
 
 /**
- * btrfs_update_reserved_bytes - update the block_group and space info counters
+ * btrfs_add_reserved_bytes - update the block_group and space info counters
  * @cache:     The cache we are manipulating
+ * @ram_bytes:  The number of bytes of file content, and will be same to
+ *              @num_bytes except for the compress path.
  * @num_bytes: The number of bytes in question
- * @reserve:   One of the reservation enums
  * @delalloc:   The blocks are allocated for the delalloc write
  *
- * This is called by the allocator when it reserves space, or by somebody who is
- * freeing space that was never actually used on disk.  For example if you
- * reserve some space for a new leaf in transaction A and before transaction A
- * commits you free that leaf, you call this with reserve set to 0 in order to
- * clear the reservation.
- *
- * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
+ * This is called by the allocator when it reserves space. Metadata
+ * reservations should be called with RESERVE_ALLOC so we do the proper
  * ENOSPC accounting.  For data we handle the reservation through clearing the
  * delalloc bits in the io_tree.  We have to do this since we could end up
  * allocating less disk space for the amount of data we have reserved in the
@@ -6519,44 +6507,63 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
  * make the reservation and return -EAGAIN, otherwise this function always
  * succeeds.
  */
-static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                                      u64 num_bytes, int reserve, int delalloc)
+static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                   u64 ram_bytes, u64 num_bytes, int delalloc)
 {
        struct btrfs_space_info *space_info = cache->space_info;
        int ret = 0;
 
        spin_lock(&space_info->lock);
        spin_lock(&cache->lock);
-       if (reserve != RESERVE_FREE) {
-               if (cache->ro) {
-                       ret = -EAGAIN;
-               } else {
-                       cache->reserved += num_bytes;
-                       space_info->bytes_reserved += num_bytes;
-                       if (reserve == RESERVE_ALLOC) {
-                               trace_btrfs_space_reservation(cache->fs_info,
-                                               "space_info", space_info->flags,
-                                               num_bytes, 0);
-                               space_info->bytes_may_use -= num_bytes;
-                       }
-
-                       if (delalloc)
-                               cache->delalloc_bytes += num_bytes;
-               }
+       if (cache->ro) {
+               ret = -EAGAIN;
        } else {
-               if (cache->ro)
-                       space_info->bytes_readonly += num_bytes;
-               cache->reserved -= num_bytes;
-               space_info->bytes_reserved -= num_bytes;
+               cache->reserved += num_bytes;
+               space_info->bytes_reserved += num_bytes;
 
+               trace_btrfs_space_reservation(cache->fs_info,
+                               "space_info", space_info->flags,
+                               ram_bytes, 0);
+               space_info->bytes_may_use -= ram_bytes;
                if (delalloc)
-                       cache->delalloc_bytes -= num_bytes;
+                       cache->delalloc_bytes += num_bytes;
        }
        spin_unlock(&cache->lock);
        spin_unlock(&space_info->lock);
        return ret;
 }
 
+/**
+ * btrfs_free_reserved_bytes - update the block_group and space info counters
+ * @cache:      The cache we are manipulating
+ * @num_bytes:  The number of bytes in question
+ * @delalloc:   The blocks are allocated for the delalloc write
+ *
+ * This is called by somebody who is freeing space that was never actually used
+ * on disk.  For example if you reserve some space for a new leaf in transaction
+ * A and before transaction A commits you free that leaf, you call this with
+ * reserve set to 0 in order to clear the reservation.
+ */
+
+static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                    u64 num_bytes, int delalloc)
+{
+       struct btrfs_space_info *space_info = cache->space_info;
+       int ret = 0;
+
+       spin_lock(&space_info->lock);
+       spin_lock(&cache->lock);
+       if (cache->ro)
+               space_info->bytes_readonly += num_bytes;
+       cache->reserved -= num_bytes;
+       space_info->bytes_reserved -= num_bytes;
+
+       if (delalloc)
+               cache->delalloc_bytes -= num_bytes;
+       spin_unlock(&cache->lock);
+       spin_unlock(&space_info->lock);
+       return ret;
+}
 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root)
 {
@@ -7191,7 +7198,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
 
                btrfs_add_free_space(cache, buf->start, buf->len);
-               btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
+               btrfs_free_reserved_bytes(cache, buf->len, 0);
                btrfs_put_block_group(cache);
                trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
                pin = 0;
@@ -7416,9 +7423,9 @@ btrfs_release_block_group(struct btrfs_block_group_cache *cache,
  * the free space extent currently.
  */
 static noinline int find_free_extent(struct btrfs_root *orig_root,
-                                    u64 num_bytes, u64 empty_size,
-                                    u64 hint_byte, struct btrfs_key *ins,
-                                    u64 flags, int delalloc)
+                               u64 ram_bytes, u64 num_bytes, u64 empty_size,
+                               u64 hint_byte, struct btrfs_key *ins,
+                               u64 flags, int delalloc)
 {
        int ret = 0;
        struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -7430,8 +7437,6 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
        struct btrfs_space_info *space_info;
        int loop = 0;
        int index = __get_raid_index(flags);
-       int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
-               RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
        bool failed_cluster_refill = false;
        bool failed_alloc = false;
        bool use_cluster = true;
@@ -7763,8 +7768,8 @@ checks:
                                             search_start - offset);
                BUG_ON(offset > search_start);
 
-               ret = btrfs_update_reserved_bytes(block_group, num_bytes,
-                                                 alloc_type, delalloc);
+               ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
+                               num_bytes, delalloc);
                if (ret == -EAGAIN) {
                        btrfs_add_free_space(block_group, offset, num_bytes);
                        goto loop;
@@ -7936,7 +7941,7 @@ again:
        up_read(&info->groups_sem);
 }
 
-int btrfs_reserve_extent(struct btrfs_root *root,
+int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
                         u64 num_bytes, u64 min_alloc_size,
                         u64 empty_size, u64 hint_byte,
                         struct btrfs_key *ins, int is_data, int delalloc)
@@ -7948,8 +7953,8 @@ int btrfs_reserve_extent(struct btrfs_root *root,
        flags = btrfs_get_alloc_profile(root, is_data);
 again:
        WARN_ON(num_bytes < root->sectorsize);
-       ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
-                              flags, delalloc);
+       ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
+                              hint_byte, ins, flags, delalloc);
        if (!ret && !is_data) {
                btrfs_dec_block_group_reservations(root->fs_info,
                                                   ins->objectid);
@@ -7958,6 +7963,7 @@ again:
                        num_bytes = min(num_bytes >> 1, ins->offset);
                        num_bytes = round_down(num_bytes, root->sectorsize);
                        num_bytes = max(num_bytes, min_alloc_size);
+                       ram_bytes = num_bytes;
                        if (num_bytes == min_alloc_size)
                                final_tried = true;
                        goto again;
@@ -7995,7 +8001,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
                if (btrfs_test_opt(root->fs_info, DISCARD))
                        ret = btrfs_discard_extent(root, start, len, NULL);
                btrfs_add_free_space(cache, start, len);
-               btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
+               btrfs_free_reserved_bytes(cache, len, delalloc);
                trace_btrfs_reserved_extent_free(root, start, len);
        }
 
@@ -8208,6 +8214,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
 {
        int ret;
        struct btrfs_block_group_cache *block_group;
+       struct btrfs_space_info *space_info;
 
        /*
         * Mixed block groups will exclude before processing the log so we only
@@ -8223,9 +8230,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
        if (!block_group)
                return -EINVAL;
 
-       ret = btrfs_update_reserved_bytes(block_group, ins->offset,
-                                         RESERVE_ALLOC_NO_ACCOUNT, 0);
-       BUG_ON(ret); /* logic error */
+       space_info = block_group->space_info;
+       spin_lock(&space_info->lock);
+       spin_lock(&block_group->lock);
+       space_info->bytes_reserved += ins->offset;
+       block_group->reserved += ins->offset;
+       spin_unlock(&block_group->lock);
+       spin_unlock(&space_info->lock);
+
        ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
                                         0, owner, offset, ins, 1);
        btrfs_put_block_group(block_group);
@@ -8368,7 +8380,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
        if (IS_ERR(block_rsv))
                return ERR_CAST(block_rsv);
 
-       ret = btrfs_reserve_extent(root, blocksize, blocksize,
+       ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
                                   empty_size, hint, &ins, 0, 0);
        if (ret)
                goto out_unuse;
@@ -8521,35 +8533,6 @@ reada:
        wc->reada_slot = slot;
 }
 
-/*
- * These may not be seen by the usual inc/dec ref code so we have to
- * add them here.
- */
-static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
-                                    struct btrfs_root *root, u64 bytenr,
-                                    u64 num_bytes)
-{
-       struct btrfs_qgroup_extent_record *qrecord;
-       struct btrfs_delayed_ref_root *delayed_refs;
-
-       qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
-       if (!qrecord)
-               return -ENOMEM;
-
-       qrecord->bytenr = bytenr;
-       qrecord->num_bytes = num_bytes;
-       qrecord->old_roots = NULL;
-
-       delayed_refs = &trans->transaction->delayed_refs;
-       spin_lock(&delayed_refs->lock);
-       if (btrfs_qgroup_insert_dirty_extent(trans->fs_info,
-                                            delayed_refs, qrecord))
-               kfree(qrecord);
-       spin_unlock(&delayed_refs->lock);
-
-       return 0;
-}
-
 static int account_leaf_items(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              struct extent_buffer *eb)
@@ -8583,7 +8566,8 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
 
                num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
 
-               ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
+               ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+                               bytenr, num_bytes, GFP_NOFS);
                if (ret)
                        return ret;
        }
@@ -8732,8 +8716,9 @@ walk_down:
                        btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
                        path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
 
-                       ret = record_one_subtree_extent(trans, root, child_bytenr,
-                                                       root->nodesize);
+                       ret = btrfs_qgroup_insert_dirty_extent(trans,
+                                       root->fs_info, child_bytenr,
+                                       root->nodesize, GFP_NOFS);
                        if (ret)
                                goto out;
                }
@@ -9906,6 +9891,7 @@ static int find_first_block_group(struct btrfs_root *root,
                        } else {
                                ret = 0;
                        }
+                       free_extent_map(em);
                        goto out;
                }
                path->slots[0]++;
@@ -9942,6 +9928,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
                block_group->iref = 0;
                block_group->inode = NULL;
                spin_unlock(&block_group->lock);
+               ASSERT(block_group->io_ctl.inode == NULL);
                iput(inode);
                last = block_group->key.objectid + block_group->key.offset;
                btrfs_put_block_group(block_group);
@@ -9999,6 +9986,10 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                        free_excluded_extents(info->extent_root, block_group);
 
                btrfs_remove_free_space_cache(block_group);
+               ASSERT(list_empty(&block_group->dirty_list));
+               ASSERT(list_empty(&block_group->io_list));
+               ASSERT(list_empty(&block_group->bg_list));
+               ASSERT(atomic_read(&block_group->count) == 1);
                btrfs_put_block_group(block_group);
 
                spin_lock(&info->block_group_cache_lock);
index bc2729a7612db5e472fda6b359a7862bcead0cd4..28cd88fccc7eaa0e6776229cb00e6e336799263d 100644 (file)
@@ -20,6 +20,7 @@
 #define EXTENT_DAMAGED         (1U << 14)
 #define EXTENT_NORESERVE       (1U << 15)
 #define EXTENT_QGROUP_RESERVED (1U << 16)
+#define EXTENT_CLEAR_DATA_RESV (1U << 17)
 #define EXTENT_IOBITS          (EXTENT_LOCKED | EXTENT_WRITEBACK)
 #define EXTENT_CTLBITS         (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
 
index 9404121fd5f7b44f165c6f76c856548cf5722aff..fea31a4a6e36844d97a53c80a7f442c166e82b60 100644 (file)
@@ -2033,6 +2033,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                 */
                clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                          &BTRFS_I(inode)->runtime_flags);
+               /*
+                * An ordered extent might have started before and completed
+                * already with io errors, in which case the inode was not
+                * updated and we end up here. So check the inode's mapping
+                * flags for any errors that might have happened while doing
+                * writeback of file data.
+                */
+               ret = btrfs_inode_check_errors(inode);
                inode_unlock(inode);
                goto out;
        }
@@ -2062,7 +2070,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        }
        trans->sync = true;
 
-       btrfs_init_log_ctx(&ctx);
+       btrfs_init_log_ctx(&ctx, inode);
 
        ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
        if (ret < 0) {
@@ -2667,6 +2675,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 
        alloc_start = round_down(offset, blocksize);
        alloc_end = round_up(offset + len, blocksize);
+       cur_offset = alloc_start;
 
        /* Make sure we aren't being give some crap mode */
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -2759,7 +2768,6 @@ static long btrfs_fallocate(struct file *file, int mode,
 
        /* First, check if we exceed the qgroup limit */
        INIT_LIST_HEAD(&reserve_list);
-       cur_offset = alloc_start;
        while (1) {
                em = btrfs_get_extent(inode, NULL, 0, cur_offset,
                                      alloc_end - cur_offset, 0);
@@ -2786,6 +2794,14 @@ static long btrfs_fallocate(struct file *file, int mode,
                                        last_byte - cur_offset);
                        if (ret < 0)
                                break;
+               } else {
+                       /*
+                        * Do not need to reserve unwritten extent for this
+                        * range, free reserved data space first, otherwise
+                        * it'll result in false ENOSPC error.
+                        */
+                       btrfs_free_reserved_data_space(inode, cur_offset,
+                               last_byte - cur_offset);
                }
                free_extent_map(em);
                cur_offset = last_byte;
@@ -2803,6 +2819,9 @@ static long btrfs_fallocate(struct file *file, int mode,
                                        range->start,
                                        range->len, 1 << inode->i_blkbits,
                                        offset + len, &alloc_hint);
+               else
+                       btrfs_free_reserved_data_space(inode, range->start,
+                                                      range->len);
                list_del(&range->list);
                kfree(range);
        }
@@ -2837,18 +2856,11 @@ out_unlock:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
                             &cached_state, GFP_KERNEL);
 out:
-       /*
-        * As we waited the extent range, the data_rsv_map must be empty
-        * in the range, as written data range will be released from it.
-        * And for prealloacted extent, it will also be released when
-        * its metadata is written.
-        * So this is completely used as cleanup.
-        */
-       btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
        inode_unlock(inode);
        /* Let go of our reservation. */
-       btrfs_free_reserved_data_space(inode, alloc_start,
-                                      alloc_end - alloc_start);
+       if (ret != 0)
+               btrfs_free_reserved_data_space(inode, alloc_start,
+                                      alloc_end - cur_offset);
        return ret;
 }
 
index aa6fabaee72ed488844fc042e98536a77e9148cf..359ee861b5a4b90e567ca63477fd579055477e3f 100644 (file)
@@ -495,10 +495,9 @@ again:
        ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
                                              prealloc, prealloc, &alloc_hint);
        if (ret) {
-               btrfs_delalloc_release_space(inode, 0, prealloc);
+               btrfs_delalloc_release_metadata(inode, prealloc);
                goto out_put;
        }
-       btrfs_free_reserved_data_space(inode, 0, prealloc);
 
        ret = btrfs_write_out_ino_cache(root, trans, path, inode);
 out_put:
index 2f5975954ccf198737e07b29c8706024114a78ae..e6811c42e41ef34d27119ee5420d4c6653df8519 100644 (file)
@@ -566,6 +566,8 @@ cont:
                                                     PAGE_SET_WRITEBACK |
                                                     page_error_op |
                                                     PAGE_END_WRITEBACK);
+                       btrfs_free_reserved_data_space_noquota(inode, start,
+                                               end - start + 1);
                        goto free_pages_out;
                }
        }
@@ -742,7 +744,7 @@ retry:
                lock_extent(io_tree, async_extent->start,
                            async_extent->start + async_extent->ram_size - 1);
 
-               ret = btrfs_reserve_extent(root,
+               ret = btrfs_reserve_extent(root, async_extent->ram_size,
                                           async_extent->compressed_size,
                                           async_extent->compressed_size,
                                           0, alloc_hint, &ins, 1, 1);
@@ -969,7 +971,8 @@ static noinline int cow_file_range(struct inode *inode,
                                     EXTENT_DEFRAG, PAGE_UNLOCK |
                                     PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
                                     PAGE_END_WRITEBACK);
-
+                       btrfs_free_reserved_data_space_noquota(inode, start,
+                                               end - start + 1);
                        *nr_written = *nr_written +
                             (end - start + PAGE_SIZE) / PAGE_SIZE;
                        *page_started = 1;
@@ -989,7 +992,7 @@ static noinline int cow_file_range(struct inode *inode,
                unsigned long op;
 
                cur_alloc_size = disk_num_bytes;
-               ret = btrfs_reserve_extent(root, cur_alloc_size,
+               ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
                                           root->sectorsize, 0, alloc_hint,
                                           &ins, 1, 1);
                if (ret < 0)
@@ -1489,8 +1492,10 @@ out_check:
                extent_clear_unlock_delalloc(inode, cur_offset,
                                             cur_offset + num_bytes - 1,
                                             locked_page, EXTENT_LOCKED |
-                                            EXTENT_DELALLOC, PAGE_UNLOCK |
-                                            PAGE_SET_PRIVATE2);
+                                            EXTENT_DELALLOC |
+                                            EXTENT_CLEAR_DATA_RESV,
+                                            PAGE_UNLOCK | PAGE_SET_PRIVATE2);
+
                if (!nolock && nocow)
                        btrfs_end_write_no_snapshoting(root);
                cur_offset = extent_end;
@@ -1807,7 +1812,9 @@ static void btrfs_clear_bit_hook(struct inode *inode,
                        return;
 
                if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
-                   && do_list && !(state->state & EXTENT_NORESERVE))
+                   && do_list && !(state->state & EXTENT_NORESERVE)
+                   && (*bits & (EXTENT_DO_ACCOUNTING |
+                   EXTENT_CLEAR_DATA_RESV)))
                        btrfs_free_reserved_data_space_noquota(inode,
                                        state->start, len);
 
@@ -3435,10 +3442,10 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                found_key.offset = 0;
                inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
                ret = PTR_ERR_OR_ZERO(inode);
-               if (ret && ret != -ESTALE)
+               if (ret && ret != -ENOENT)
                        goto out;
 
-               if (ret == -ESTALE && root == root->fs_info->tree_root) {
+               if (ret == -ENOENT && root == root->fs_info->tree_root) {
                        struct btrfs_root *dead_root;
                        struct btrfs_fs_info *fs_info = root->fs_info;
                        int is_dead_root = 0;
@@ -3474,7 +3481,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                 * Inode is already gone but the orphan item is still there,
                 * kill the orphan item.
                 */
-               if (ret == -ESTALE) {
+               if (ret == -ENOENT) {
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
                                ret = PTR_ERR(trans);
@@ -3633,7 +3640,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
 /*
  * read an inode from the btree into the in-memory inode
  */
-static void btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode)
 {
        struct btrfs_path *path;
        struct extent_buffer *leaf;
@@ -3652,14 +3659,19 @@ static void btrfs_read_locked_inode(struct inode *inode)
                filled = true;
 
        path = btrfs_alloc_path();
-       if (!path)
+       if (!path) {
+               ret = -ENOMEM;
                goto make_bad;
+       }
 
        memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
 
        ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
-       if (ret)
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
                goto make_bad;
+       }
 
        leaf = path->nodes[0];
 
@@ -3812,11 +3824,12 @@ cache_acl:
        }
 
        btrfs_update_iflags(inode);
-       return;
+       return 0;
 
 make_bad:
        btrfs_free_path(path);
        make_bad_inode(inode);
+       return ret;
 }
 
 /*
@@ -4204,6 +4217,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        int err = 0;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct btrfs_trans_handle *trans;
+       u64 last_unlink_trans;
 
        if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
                return -ENOTEMPTY;
@@ -4226,11 +4240,27 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (err)
                goto out;
 
+       last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
+
        /* now the directory is empty */
        err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
                                 dentry->d_name.name, dentry->d_name.len);
-       if (!err)
+       if (!err) {
                btrfs_i_size_write(inode, 0);
+               /*
+                * Propagate the last_unlink_trans value of the deleted dir to
+                * its parent directory. This is to prevent an unrecoverable
+                * log tree in the case we do something like this:
+                * 1) create dir foo
+                * 2) create snapshot under dir foo
+                * 3) delete the snapshot
+                * 4) rmdir foo
+                * 5) mkdir foo
+                * 6) fsync foo or some file inside foo
+                */
+               if (last_unlink_trans >= trans->transid)
+                       BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
+       }
 out:
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root);
@@ -5606,7 +5636,9 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                return ERR_PTR(-ENOMEM);
 
        if (inode->i_state & I_NEW) {
-               btrfs_read_locked_inode(inode);
+               int ret;
+
+               ret = btrfs_read_locked_inode(inode);
                if (!is_bad_inode(inode)) {
                        inode_tree_add(inode);
                        unlock_new_inode(inode);
@@ -5615,7 +5647,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                } else {
                        unlock_new_inode(inode);
                        iput(inode);
-                       inode = ERR_PTR(-ESTALE);
+                       ASSERT(ret < 0);
+                       inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
                }
        }
 
@@ -7225,7 +7258,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        int ret;
 
        alloc_hint = get_extent_allocation_hint(inode, start, len);
-       ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
+       ret = btrfs_reserve_extent(root, len, len, root->sectorsize, 0,
                                   alloc_hint, &ins, 1, 1);
        if (ret)
                return ERR_PTR(ret);
@@ -7725,6 +7758,13 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                                ret = PTR_ERR(em2);
                                goto unlock_err;
                        }
+                       /*
+                        * For inode marked NODATACOW or extent marked PREALLOC,
+                        * use the existing or preallocated extent, so does not
+                        * need to adjust btrfs_space_info's bytes_may_use.
+                        */
+                       btrfs_free_reserved_data_space_noquota(inode,
+                                       start, len);
                        goto unlock;
                }
        }
@@ -7759,7 +7799,6 @@ unlock:
                        i_size_write(inode, start + len);
 
                adjust_dio_outstanding_extents(inode, dio_data, len);
-               btrfs_free_reserved_data_space(inode, start, len);
                WARN_ON(dio_data->reserve < len);
                dio_data->reserve -= len;
                dio_data->unsubmitted_oe_range_end = start + len;
@@ -10280,6 +10319,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
        u64 last_alloc = (u64)-1;
        int ret = 0;
        bool own_trans = true;
+       u64 end = start + num_bytes - 1;
 
        if (trans)
                own_trans = false;
@@ -10301,8 +10341,8 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                 * sized chunks.
                 */
                cur_bytes = min(cur_bytes, last_alloc);
-               ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
-                                          *alloc_hint, &ins, 1, 0);
+               ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
+                               min_size, 0, *alloc_hint, &ins, 1, 0);
                if (ret) {
                        if (own_trans)
                                btrfs_end_transaction(trans, root);
@@ -10388,6 +10428,9 @@ next:
                if (own_trans)
                        btrfs_end_transaction(trans, root);
        }
+       if (cur_offset < end)
+               btrfs_free_reserved_data_space(inode, cur_offset,
+                       end - cur_offset + 1);
        return ret;
 }
 
index 14ed1e9e6bc83df20da4863798d0052b376e5636..7fd939bfbd99359b3ffab8b9689d8fdd2b313aeb 100644 (file)
@@ -1634,6 +1634,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
        int namelen;
        int ret = 0;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        ret = mnt_want_write_file(file);
        if (ret)
                goto out;
@@ -1691,6 +1694,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
        struct btrfs_ioctl_vol_args *vol_args;
        int ret;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
@@ -1714,6 +1720,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
        bool readonly = false;
        struct btrfs_qgroup_inherit *inherit = NULL;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
@@ -2357,6 +2366,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
        int ret;
        int err = 0;
 
+       if (!S_ISDIR(dir->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
@@ -5084,7 +5096,7 @@ static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       return btrfs_qgroup_wait_for_completion(root->fs_info);
+       return btrfs_qgroup_wait_for_completion(root->fs_info, true);
 }
 
 static long _btrfs_ioctl_set_received_subvol(struct file *file,
index 93ee1c18ef9d4c2e1677fc3ad437b3044c84f68e..8db2e29fdcf417db6096a107c938a4a133fb6af2 100644 (file)
@@ -995,7 +995,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
                goto out;
        fs_info->quota_enabled = 0;
        fs_info->pending_quota_state = 0;
-       btrfs_qgroup_wait_for_completion(fs_info);
+       btrfs_qgroup_wait_for_completion(fs_info, false);
        spin_lock(&fs_info->qgroup_lock);
        quota_root = fs_info->quota_root;
        fs_info->quota_root = NULL;
@@ -1453,10 +1453,9 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-struct btrfs_qgroup_extent_record *
-btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
-                                struct btrfs_delayed_ref_root *delayed_refs,
-                                struct btrfs_qgroup_extent_record *record)
+int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
+                               struct btrfs_delayed_ref_root *delayed_refs,
+                               struct btrfs_qgroup_extent_record *record)
 {
        struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
        struct rb_node *parent_node = NULL;
@@ -1475,12 +1474,42 @@ btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
                else if (bytenr > entry->bytenr)
                        p = &(*p)->rb_right;
                else
-                       return entry;
+                       return 1;
        }
 
        rb_link_node(&record->node, parent_node, p);
        rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
-       return NULL;
+       return 0;
+}
+
+int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+               struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
+               gfp_t gfp_flag)
+{
+       struct btrfs_qgroup_extent_record *record;
+       struct btrfs_delayed_ref_root *delayed_refs;
+       int ret;
+
+       if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0)
+               return 0;
+       if (WARN_ON(trans == NULL))
+               return -EINVAL;
+       record = kmalloc(sizeof(*record), gfp_flag);
+       if (!record)
+               return -ENOMEM;
+
+       delayed_refs = &trans->transaction->delayed_refs;
+       record->bytenr = bytenr;
+       record->num_bytes = num_bytes;
+       record->old_roots = NULL;
+
+       spin_lock(&delayed_refs->lock);
+       ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
+                                                     record);
+       spin_unlock(&delayed_refs->lock);
+       if (ret > 0)
+               kfree(record);
+       return 0;
 }
 
 #define UPDATE_NEW     0
@@ -2303,6 +2332,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        int err = -ENOMEM;
        int ret = 0;
 
+       mutex_lock(&fs_info->qgroup_rescan_lock);
+       fs_info->qgroup_rescan_running = true;
+       mutex_unlock(&fs_info->qgroup_rescan_lock);
+
        path = btrfs_alloc_path();
        if (!path)
                goto out;
@@ -2369,6 +2402,9 @@ out:
        }
 
 done:
+       mutex_lock(&fs_info->qgroup_rescan_lock);
+       fs_info->qgroup_rescan_running = false;
+       mutex_unlock(&fs_info->qgroup_rescan_lock);
        complete_all(&fs_info->qgroup_rescan_completion);
 }
 
@@ -2487,20 +2523,26 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
        return 0;
 }
 
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+                                    bool interruptible)
 {
        int running;
        int ret = 0;
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
        spin_lock(&fs_info->qgroup_lock);
-       running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+       running = fs_info->qgroup_rescan_running;
        spin_unlock(&fs_info->qgroup_lock);
        mutex_unlock(&fs_info->qgroup_rescan_lock);
 
-       if (running)
+       if (!running)
+               return 0;
+
+       if (interruptible)
                ret = wait_for_completion_interruptible(
                                        &fs_info->qgroup_rescan_completion);
+       else
+               wait_for_completion(&fs_info->qgroup_rescan_completion);
 
        return ret;
 }
index 710887c06aaf4c6171a74b2b954653dfa61478e1..1bc64c864b626a25b9ec9bf983090504fe9a78e9 100644 (file)
@@ -46,7 +46,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
                        struct btrfs_fs_info *fs_info);
 int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
 void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+                                    bool interruptible);
 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
                              struct btrfs_fs_info *fs_info, u64 src, u64 dst);
 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
@@ -63,10 +64,35 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
 struct btrfs_delayed_extent_op;
 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
                                         struct btrfs_fs_info *fs_info);
-struct btrfs_qgroup_extent_record *
-btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
-                                struct btrfs_delayed_ref_root *delayed_refs,
-                                struct btrfs_qgroup_extent_record *record);
+/*
+ * Insert one dirty extent record into @delayed_refs, informing qgroup to
+ * account that extent at commit trans time.
+ *
+ * No lock version, caller must acquire delayed ref lock and allocate memory.
+ *
+ * Return 0 for success insert
+ * Return >0 for existing record, caller can free @record safely.
+ * Error is not possible
+ */
+int btrfs_qgroup_insert_dirty_extent_nolock(
+               struct btrfs_fs_info *fs_info,
+               struct btrfs_delayed_ref_root *delayed_refs,
+               struct btrfs_qgroup_extent_record *record);
+
+/*
+ * Insert one dirty extent record into @delayed_refs, informing qgroup to
+ * account that extent at commit trans time.
+ *
+ * Better encapsulated version.
+ *
+ * Return 0 if the operation is done.
+ * Return <0 for error, like memory allocation failure or invalid parameter
+ * (NULL trans)
+ */
+int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+               struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
+               gfp_t gfp_flag);
+
 int
 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
                            struct btrfs_fs_info *fs_info,
index b26a5aea41b4a67be495fb419080e3dd9d235136..c0c13dc6fe1286673982dfa78d9930cb219927dd 100644 (file)
@@ -31,6 +31,7 @@
 #include "async-thread.h"
 #include "free-space-cache.h"
 #include "inode-map.h"
+#include "qgroup.h"
 
 /*
  * backref_node, mapping_node and tree_block start with this
@@ -3037,15 +3038,19 @@ int prealloc_file_extent_cluster(struct inode *inode,
        u64 num_bytes;
        int nr = 0;
        int ret = 0;
+       u64 prealloc_start = cluster->start - offset;
+       u64 prealloc_end = cluster->end - offset;
+       u64 cur_offset;
 
        BUG_ON(cluster->start != cluster->boundary[0]);
        inode_lock(inode);
 
-       ret = btrfs_check_data_free_space(inode, cluster->start,
-                                         cluster->end + 1 - cluster->start);
+       ret = btrfs_check_data_free_space(inode, prealloc_start,
+                                         prealloc_end + 1 - prealloc_start);
        if (ret)
                goto out;
 
+       cur_offset = prealloc_start;
        while (nr < cluster->nr) {
                start = cluster->boundary[nr] - offset;
                if (nr + 1 < cluster->nr)
@@ -3055,16 +3060,21 @@ int prealloc_file_extent_cluster(struct inode *inode,
 
                lock_extent(&BTRFS_I(inode)->io_tree, start, end);
                num_bytes = end + 1 - start;
+               if (cur_offset < start)
+                       btrfs_free_reserved_data_space(inode, cur_offset,
+                                       start - cur_offset);
                ret = btrfs_prealloc_file_range(inode, 0, start,
                                                num_bytes, num_bytes,
                                                end + 1, &alloc_hint);
+               cur_offset = end + 1;
                unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
                if (ret)
                        break;
                nr++;
        }
-       btrfs_free_reserved_data_space(inode, cluster->start,
-                                      cluster->end + 1 - cluster->start);
+       if (cur_offset < prealloc_end)
+               btrfs_free_reserved_data_space(inode, cur_offset,
+                                      prealloc_end + 1 - cur_offset);
 out:
        inode_unlock(inode);
        return ret;
@@ -3916,6 +3926,90 @@ int prepare_to_relocate(struct reloc_control *rc)
        return 0;
 }
 
+/*
+ * Qgroup fixer for data chunk relocation.
+ * The data relocation is done in the following steps
+ * 1) Copy data extents into data reloc tree
+ * 2) Create tree reloc tree(special snapshot) for related subvolumes
+ * 3) Modify file extents in tree reloc tree
+ * 4) Merge tree reloc tree with original fs tree, by swapping tree blocks
+ *
+ * The problem is, data and tree reloc tree are not accounted to qgroup,
+ * and 4) will only info qgroup to track tree blocks change, not file extents
+ * in the tree blocks.
+ *
+ * The good news is, related data extents are all in data reloc tree, so we
+ * only need to info qgroup to track all file extents in data reloc tree
+ * before commit trans.
+ */
+static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
+                                            struct reloc_control *rc)
+{
+       struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
+       struct inode *inode = rc->data_inode;
+       struct btrfs_root *data_reloc_root = BTRFS_I(inode)->root;
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       int ret = 0;
+
+       if (!fs_info->quota_enabled)
+               return 0;
+
+       /*
+        * Only for stage where we update data pointers the qgroup fix is
+        * valid.
+        * For MOVING_DATA stage, we will miss the timing of swapping tree
+        * blocks, and won't fix it.
+        */
+       if (!(rc->stage == UPDATE_DATA_PTRS && rc->extents_found))
+               return 0;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+       key.objectid = btrfs_ino(inode);
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, data_reloc_root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+
+       lock_extent(&BTRFS_I(inode)->io_tree, 0, (u64)-1);
+       while (1) {
+               struct btrfs_file_extent_item *fi;
+
+               btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+               if (key.objectid > btrfs_ino(inode))
+                       break;
+               if (key.type != BTRFS_EXTENT_DATA_KEY)
+                       goto next;
+               fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                   struct btrfs_file_extent_item);
+               if (btrfs_file_extent_type(path->nodes[0], fi) !=
+                               BTRFS_FILE_EXTENT_REG)
+                       goto next;
+               ret = btrfs_qgroup_insert_dirty_extent(trans, fs_info,
+                       btrfs_file_extent_disk_bytenr(path->nodes[0], fi),
+                       btrfs_file_extent_disk_num_bytes(path->nodes[0], fi),
+                       GFP_NOFS);
+               if (ret < 0)
+                       break;
+next:
+               ret = btrfs_next_item(data_reloc_root, path);
+               if (ret < 0)
+                       break;
+               if (ret > 0) {
+                       ret = 0;
+                       break;
+               }
+       }
+       unlock_extent(&BTRFS_I(inode)->io_tree, 0 , (u64)-1);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 {
        struct rb_root blocks = RB_ROOT;
@@ -4102,10 +4196,18 @@ restart:
 
        /* get rid of pinned extents */
        trans = btrfs_join_transaction(rc->extent_root);
-       if (IS_ERR(trans))
+       if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
-       else
-               btrfs_commit_transaction(trans, rc->extent_root);
+               goto out_free;
+       }
+       ret = qgroup_fix_relocated_data_extents(trans, rc);
+       if (ret < 0) {
+               btrfs_abort_transaction(trans, ret);
+               if (!err)
+                       err = ret;
+               goto out_free;
+       }
+       btrfs_commit_transaction(trans, rc->extent_root);
 out_free:
        btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
        btrfs_free_path(path);
@@ -4468,10 +4570,16 @@ int btrfs_recover_relocation(struct btrfs_root *root)
        unset_reloc_control(rc);
 
        trans = btrfs_join_transaction(rc->extent_root);
-       if (IS_ERR(trans))
+       if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
-       else
-               err = btrfs_commit_transaction(trans, rc->extent_root);
+               goto out_free;
+       }
+       err = qgroup_fix_relocated_data_extents(trans, rc);
+       if (err < 0) {
+               btrfs_abort_transaction(trans, err);
+               goto out_free;
+       }
+       err = btrfs_commit_transaction(trans, rc->extent_root);
 out_free:
        kfree(rc);
 out:
index 7fd7e1830cfe676e74000b54de026a5a1070cc1a..091296062456b5621ba6a24d4dcae6932afcba6f 100644 (file)
@@ -272,6 +272,23 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                root_key.objectid = key.offset;
                key.offset++;
 
+               /*
+                * The root might have been inserted already, as before we look
+                * for orphan roots, log replay might have happened, which
+                * triggers a transaction commit and qgroup accounting, which
+                * in turn reads and inserts fs roots while doing backref
+                * walking.
+                */
+               root = btrfs_lookup_fs_root(tree_root->fs_info,
+                                           root_key.objectid);
+               if (root) {
+                       WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
+                                         &root->state));
+                       if (btrfs_root_refs(&root->root_item) == 0)
+                               btrfs_add_dead_root(root);
+                       continue;
+               }
+
                root = btrfs_read_fs_root(tree_root, &root_key);
                err = PTR_ERR_OR_ZERO(root);
                if (err && err != -ENOENT) {
@@ -310,16 +327,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
 
                err = btrfs_insert_fs_root(root->fs_info, root);
-               /*
-                * The root might have been inserted already, as before we look
-                * for orphan roots, log replay might have happened, which
-                * triggers a transaction commit and qgroup accounting, which
-                * in turn reads and inserts fs roots while doing backref
-                * walking.
-                */
-               if (err == -EEXIST)
-                       err = 0;
                if (err) {
+                       BUG_ON(err == -EEXIST);
                        btrfs_free_fs_root(root);
                        break;
                }
index b71dd298385c1b5cfb3c00761db0c8ee674e01e7..a87675ffd02b33a9766eb229eea2b3b4200b783e 100644 (file)
@@ -231,7 +231,6 @@ struct pending_dir_move {
        u64 parent_ino;
        u64 ino;
        u64 gen;
-       bool is_orphan;
        struct list_head update_refs;
 };
 
@@ -274,6 +273,39 @@ struct name_cache_entry {
        char name[];
 };
 
+static void inconsistent_snapshot_error(struct send_ctx *sctx,
+                                       enum btrfs_compare_tree_result result,
+                                       const char *what)
+{
+       const char *result_string;
+
+       switch (result) {
+       case BTRFS_COMPARE_TREE_NEW:
+               result_string = "new";
+               break;
+       case BTRFS_COMPARE_TREE_DELETED:
+               result_string = "deleted";
+               break;
+       case BTRFS_COMPARE_TREE_CHANGED:
+               result_string = "updated";
+               break;
+       case BTRFS_COMPARE_TREE_SAME:
+               ASSERT(0);
+               result_string = "unchanged";
+               break;
+       default:
+               ASSERT(0);
+               result_string = "unexpected";
+       }
+
+       btrfs_err(sctx->send_root->fs_info,
+                 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
+                 result_string, what, sctx->cmp_key->objectid,
+                 sctx->send_root->root_key.objectid,
+                 (sctx->parent_root ?
+                  sctx->parent_root->root_key.objectid : 0));
+}
+
 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
 
 static struct waiting_dir_move *
@@ -1861,7 +1893,8 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
         * was already unlinked/moved, so we can safely assume that we will not
         * overwrite anything at this point in time.
         */
-       if (other_inode > sctx->send_progress) {
+       if (other_inode > sctx->send_progress ||
+           is_waiting_for_move(sctx, other_inode)) {
                ret = get_inode_info(sctx->parent_root, other_inode, NULL,
                                who_gen, NULL, NULL, NULL, NULL);
                if (ret < 0)
@@ -2502,6 +2535,8 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
        key.type = BTRFS_INODE_ITEM_KEY;
        key.offset = 0;
        ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
+       if (ret > 0)
+               ret = -ENOENT;
        if (ret < 0)
                goto out;
 
@@ -2947,6 +2982,10 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
                }
 
                if (loc.objectid > send_progress) {
+                       struct orphan_dir_info *odi;
+
+                       odi = get_orphan_dir_info(sctx, dir);
+                       free_orphan_dir_info(sctx, odi);
                        ret = 0;
                        goto out;
                }
@@ -3047,7 +3086,6 @@ static int add_pending_dir_move(struct send_ctx *sctx,
        pm->parent_ino = parent_ino;
        pm->ino = ino;
        pm->gen = ino_gen;
-       pm->is_orphan = is_orphan;
        INIT_LIST_HEAD(&pm->list);
        INIT_LIST_HEAD(&pm->update_refs);
        RB_CLEAR_NODE(&pm->node);
@@ -3113,6 +3151,48 @@ static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
        return NULL;
 }
 
+static int path_loop(struct send_ctx *sctx, struct fs_path *name,
+                    u64 ino, u64 gen, u64 *ancestor_ino)
+{
+       int ret = 0;
+       u64 parent_inode = 0;
+       u64 parent_gen = 0;
+       u64 start_ino = ino;
+
+       *ancestor_ino = 0;
+       while (ino != BTRFS_FIRST_FREE_OBJECTID) {
+               fs_path_reset(name);
+
+               if (is_waiting_for_rm(sctx, ino))
+                       break;
+               if (is_waiting_for_move(sctx, ino)) {
+                       if (*ancestor_ino == 0)
+                               *ancestor_ino = ino;
+                       ret = get_first_ref(sctx->parent_root, ino,
+                                           &parent_inode, &parent_gen, name);
+               } else {
+                       ret = __get_cur_name_and_parent(sctx, ino, gen,
+                                                       &parent_inode,
+                                                       &parent_gen, name);
+                       if (ret > 0) {
+                               ret = 0;
+                               break;
+                       }
+               }
+               if (ret < 0)
+                       break;
+               if (parent_inode == start_ino) {
+                       ret = 1;
+                       if (*ancestor_ino == 0)
+                               *ancestor_ino = ino;
+                       break;
+               }
+               ino = parent_inode;
+               gen = parent_gen;
+       }
+       return ret;
+}
+
 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
 {
        struct fs_path *from_path = NULL;
@@ -3123,6 +3203,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        u64 parent_ino, parent_gen;
        struct waiting_dir_move *dm = NULL;
        u64 rmdir_ino = 0;
+       u64 ancestor;
+       bool is_orphan;
        int ret;
 
        name = fs_path_alloc();
@@ -3135,9 +3217,10 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        dm = get_waiting_dir_move(sctx, pm->ino);
        ASSERT(dm);
        rmdir_ino = dm->rmdir_ino;
+       is_orphan = dm->orphanized;
        free_waiting_dir_move(sctx, dm);
 
-       if (pm->is_orphan) {
+       if (is_orphan) {
                ret = gen_unique_name(sctx, pm->ino,
                                      pm->gen, from_path);
        } else {
@@ -3155,6 +3238,24 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                goto out;
 
        sctx->send_progress = sctx->cur_ino + 1;
+       ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
+       if (ret < 0)
+               goto out;
+       if (ret) {
+               LIST_HEAD(deleted_refs);
+               ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
+               ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
+                                          &pm->update_refs, &deleted_refs,
+                                          is_orphan);
+               if (ret < 0)
+                       goto out;
+               if (rmdir_ino) {
+                       dm = get_waiting_dir_move(sctx, pm->ino);
+                       ASSERT(dm);
+                       dm->rmdir_ino = rmdir_ino;
+               }
+               goto out;
+       }
        fs_path_reset(name);
        to_path = name;
        name = NULL;
@@ -3174,7 +3275,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                        /* already deleted */
                        goto finish;
                }
-               ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
+               ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
                if (ret < 0)
                        goto out;
                if (!ret)
@@ -3204,8 +3305,18 @@ finish:
         * and old parent(s).
         */
        list_for_each_entry(cur, &pm->update_refs, list) {
-               if (cur->dir == rmdir_ino)
+               /*
+                * The parent inode might have been deleted in the send snapshot
+                */
+               ret = get_inode_info(sctx->send_root, cur->dir, NULL,
+                                    NULL, NULL, NULL, NULL, NULL);
+               if (ret == -ENOENT) {
+                       ret = 0;
                        continue;
+               }
+               if (ret < 0)
+                       goto out;
+
                ret = send_utimes(sctx, cur->dir, cur->dir_gen);
                if (ret < 0)
                        goto out;
@@ -3325,6 +3436,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
        u64 left_gen;
        u64 right_gen;
        int ret = 0;
+       struct waiting_dir_move *wdm;
 
        if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
                return 0;
@@ -3383,7 +3495,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
                goto out;
        }
 
-       if (is_waiting_for_move(sctx, di_key.objectid)) {
+       wdm = get_waiting_dir_move(sctx, di_key.objectid);
+       if (wdm && !wdm->orphanized) {
                ret = add_pending_dir_move(sctx,
                                           sctx->cur_ino,
                                           sctx->cur_inode_gen,
@@ -3470,7 +3583,8 @@ static int wait_for_parent_move(struct send_ctx *sctx,
                        ret = is_ancestor(sctx->parent_root,
                                          sctx->cur_ino, sctx->cur_inode_gen,
                                          ino, path_before);
-                       break;
+                       if (ret)
+                               break;
                }
 
                fs_path_reset(path_before);
@@ -3643,11 +3757,26 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
                                goto out;
                        if (ret) {
                                struct name_cache_entry *nce;
+                               struct waiting_dir_move *wdm;
 
                                ret = orphanize_inode(sctx, ow_inode, ow_gen,
                                                cur->full_path);
                                if (ret < 0)
                                        goto out;
+
+                               /*
+                                * If ow_inode has its rename operation delayed
+                                * make sure that its orphanized name is used in
+                                * the source path when performing its rename
+                                * operation.
+                                */
+                               if (is_waiting_for_move(sctx, ow_inode)) {
+                                       wdm = get_waiting_dir_move(sctx,
+                                                                  ow_inode);
+                                       ASSERT(wdm);
+                                       wdm->orphanized = true;
+                               }
+
                                /*
                                 * Make sure we clear our orphanized inode's
                                 * name from the name cache. This is because the
@@ -3663,6 +3792,19 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
                                        name_cache_delete(sctx, nce);
                                        kfree(nce);
                                }
+
+                               /*
+                                * ow_inode might currently be an ancestor of
+                                * cur_ino, therefore compute valid_path (the
+                                * current path of cur_ino) again because it
+                                * might contain the pre-orphanization name of
+                                * ow_inode, which is no longer valid.
+                                */
+                               fs_path_reset(valid_path);
+                               ret = get_cur_path(sctx, sctx->cur_ino,
+                                          sctx->cur_inode_gen, valid_path);
+                               if (ret < 0)
+                                       goto out;
                        } else {
                                ret = send_unlink(sctx, cur->full_path);
                                if (ret < 0)
@@ -4126,10 +4268,12 @@ static int process_all_refs(struct send_ctx *sctx,
        }
        btrfs_release_path(path);
 
+       /*
+        * We don't actually care about pending_move as we are simply
+        * re-creating this inode and will be rename'ing it into place once we
+        * rename the parent directory.
+        */
        ret = process_recorded_refs(sctx, &pending_move);
-       /* Only applicable to an incremental send. */
-       ASSERT(pending_move == 0);
-
 out:
        btrfs_free_path(path);
        return ret;
@@ -5602,7 +5746,10 @@ static int changed_ref(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "reference");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen &&
            sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
@@ -5627,7 +5774,10 @@ static int changed_xattr(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "xattr");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result == BTRFS_COMPARE_TREE_NEW)
@@ -5651,7 +5801,10 @@ static int changed_extent(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "extent");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result != BTRFS_COMPARE_TREE_DELETED)
index 864ce334f696c31badf2919b07d503416545ded6..4071fe2bd0981a547f983ebeaa944f3179d97659 100644 (file)
@@ -2241,6 +2241,13 @@ static int btrfs_freeze(struct super_block *sb)
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = btrfs_sb(sb)->tree_root;
 
+       root->fs_info->fs_frozen = 1;
+       /*
+        * We don't need a barrier here, we'll wait for any transaction that
+        * could be in progress on other threads (and do delayed iputs that
+        * we want to avoid on a frozen filesystem), or do the commit
+        * ourselves.
+        */
        trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
                /* no transaction, don't bother */
@@ -2251,6 +2258,14 @@ static int btrfs_freeze(struct super_block *sb)
        return btrfs_commit_transaction(trans, root);
 }
 
+static int btrfs_unfreeze(struct super_block *sb)
+{
+       struct btrfs_root *root = btrfs_sb(sb)->tree_root;
+
+       root->fs_info->fs_frozen = 0;
+       return 0;
+}
+
 static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
@@ -2299,6 +2314,7 @@ static const struct super_operations btrfs_super_ops = {
        .statfs         = btrfs_statfs,
        .remount_fs     = btrfs_remount,
        .freeze_fs      = btrfs_freeze,
+       .unfreeze_fs    = btrfs_unfreeze,
 };
 
 static const struct file_operations btrfs_ctl_fops = {
index 9cca0a72196180986b440865221a8aafa5659315..95d41919d034ef69647b6983dfac27f87f17e130 100644 (file)
@@ -2278,8 +2278,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 
+       /*
+        * If fs has been frozen, we can not handle delayed iputs, otherwise
+        * it'll result in deadlock about SB_FREEZE_FS.
+        */
        if (current != root->fs_info->transaction_kthread &&
-           current != root->fs_info->cleaner_kthread)
+           current != root->fs_info->cleaner_kthread &&
+           !root->fs_info->fs_frozen)
                btrfs_run_delayed_iputs(root);
 
        return ret;
index d31a0c4f56bed436e0eb933cceb592fdc498eb53..ef9c55bc79074534e826fd30c9e72a0d5036af84 100644 (file)
@@ -27,6 +27,7 @@
 #include "backref.h"
 #include "hash.h"
 #include "compression.h"
+#include "qgroup.h"
 
 /* magic values for the inode_only field in btrfs_log_inode:
  *
@@ -680,6 +681,21 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                ins.type = BTRFS_EXTENT_ITEM_KEY;
                offset = key->offset - btrfs_file_extent_offset(eb, item);
 
+               /*
+                * Manually record dirty extent, as here we did a shallow
+                * file extent item copy and skip normal backref update,
+                * but modifying extent tree all by ourselves.
+                * So need to manually record dirty extent for qgroup,
+                * as the owner of the file extent changed from log tree
+                * (doesn't affect qgroup) to fs/file tree(affects qgroup)
+                */
+               ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+                               btrfs_file_extent_disk_bytenr(eb, item),
+                               btrfs_file_extent_disk_num_bytes(eb, item),
+                               GFP_NOFS);
+               if (ret < 0)
+                       goto out;
+
                if (ins.objectid > 0) {
                        u64 csum_start;
                        u64 csum_end;
@@ -2807,7 +2823,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
         */
        mutex_unlock(&root->log_mutex);
 
-       btrfs_init_log_ctx(&root_log_ctx);
+       btrfs_init_log_ctx(&root_log_ctx, NULL);
 
        mutex_lock(&log_root_tree->log_mutex);
        atomic_inc(&log_root_tree->log_batch);
@@ -2851,6 +2867,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 
        if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
                blk_finish_plug(&plug);
+               list_del_init(&root_log_ctx.list);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = root_log_ctx.log_ret;
                goto out;
@@ -4469,7 +4486,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
                                         const int slot,
                                         const struct btrfs_key *key,
-                                        struct inode *inode)
+                                        struct inode *inode,
+                                        u64 *other_ino)
 {
        int ret;
        struct btrfs_path *search_path;
@@ -4528,7 +4546,16 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
                                           search_path, parent,
                                           name, this_name_len, 0);
                if (di && !IS_ERR(di)) {
-                       ret = 1;
+                       struct btrfs_key di_key;
+
+                       btrfs_dir_item_key_to_cpu(search_path->nodes[0],
+                                                 di, &di_key);
+                       if (di_key.type == BTRFS_INODE_ITEM_KEY) {
+                               ret = 1;
+                               *other_ino = di_key.objectid;
+                       } else {
+                               ret = -EAGAIN;
+                       }
                        goto out;
                } else if (IS_ERR(di)) {
                        ret = PTR_ERR(di);
@@ -4722,16 +4749,72 @@ again:
                if ((min_key.type == BTRFS_INODE_REF_KEY ||
                     min_key.type == BTRFS_INODE_EXTREF_KEY) &&
                    BTRFS_I(inode)->generation == trans->transid) {
+                       u64 other_ino = 0;
+
                        ret = btrfs_check_ref_name_override(path->nodes[0],
                                                            path->slots[0],
-                                                           &min_key, inode);
+                                                           &min_key, inode,
+                                                           &other_ino);
                        if (ret < 0) {
                                err = ret;
                                goto out_unlock;
-                       } else if (ret > 0) {
-                               err = 1;
-                               btrfs_set_log_full_commit(root->fs_info, trans);
-                               goto out_unlock;
+                       } else if (ret > 0 && ctx &&
+                                  other_ino != btrfs_ino(ctx->inode)) {
+                               struct btrfs_key inode_key;
+                               struct inode *other_inode;
+
+                               if (ins_nr > 0) {
+                                       ins_nr++;
+                               } else {
+                                       ins_nr = 1;
+                                       ins_start_slot = path->slots[0];
+                               }
+                               ret = copy_items(trans, inode, dst_path, path,
+                                                &last_extent, ins_start_slot,
+                                                ins_nr, inode_only,
+                                                logged_isize);
+                               if (ret < 0) {
+                                       err = ret;
+                                       goto out_unlock;
+                               }
+                               ins_nr = 0;
+                               btrfs_release_path(path);
+                               inode_key.objectid = other_ino;
+                               inode_key.type = BTRFS_INODE_ITEM_KEY;
+                               inode_key.offset = 0;
+                               other_inode = btrfs_iget(root->fs_info->sb,
+                                                        &inode_key, root,
+                                                        NULL);
+                               /*
+                                * If the other inode that had a conflicting dir
+                                * entry was deleted in the current transaction,
+                                * we don't need to do more work nor fallback to
+                                * a transaction commit.
+                                */
+                               if (IS_ERR(other_inode) &&
+                                   PTR_ERR(other_inode) == -ENOENT) {
+                                       goto next_key;
+                               } else if (IS_ERR(other_inode)) {
+                                       err = PTR_ERR(other_inode);
+                                       goto out_unlock;
+                               }
+                               /*
+                                * We are safe logging the other inode without
+                                * acquiring its i_mutex as long as we log with
+                                * the LOG_INODE_EXISTS mode. We're safe against
+                                * concurrent renames of the other inode as well
+                                * because during a rename we pin the log and
+                                * update the log with the new name before we
+                                * unpin it.
+                                */
+                               err = btrfs_log_inode(trans, root, other_inode,
+                                                     LOG_INODE_EXISTS,
+                                                     0, LLONG_MAX, ctx);
+                               iput(other_inode);
+                               if (err)
+                                       goto out_unlock;
+                               else
+                                       goto next_key;
                        }
                }
 
@@ -4799,7 +4882,7 @@ next_slot:
                        ins_nr = 0;
                }
                btrfs_release_path(path);
-
+next_key:
                if (min_key.offset < (u64)-1) {
                        min_key.offset++;
                } else if (min_key.type < max_key.type) {
@@ -4993,8 +5076,12 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
                if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
                        break;
 
-               if (IS_ROOT(parent))
+               if (IS_ROOT(parent)) {
+                       inode = d_inode(parent);
+                       if (btrfs_must_commit_transaction(trans, inode))
+                               ret = 1;
                        break;
+               }
 
                parent = dget_parent(parent);
                dput(old_parent);
index a9f1b75d080d3796756c9e9f09c04180b404eb1c..ab858e31ccbc2210a01a9b7ce69bf52127d00b8b 100644 (file)
@@ -30,15 +30,18 @@ struct btrfs_log_ctx {
        int log_transid;
        int io_err;
        bool log_new_dentries;
+       struct inode *inode;
        struct list_head list;
 };
 
-static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
+static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
+                                     struct inode *inode)
 {
        ctx->log_ret = 0;
        ctx->log_transid = 0;
        ctx->io_err = 0;
        ctx->log_new_dentries = false;
+       ctx->inode = inode;
        INIT_LIST_HEAD(&ctx->list);
 }
 
index 51f125508771ad950f2e1d4100cc398772bd06c6..035efce603a9c2db3989c1057dd1e20f82d2eba2 100644 (file)
@@ -834,10 +834,6 @@ static void __free_device(struct work_struct *work)
        struct btrfs_device *device;
 
        device = container_of(work, struct btrfs_device, rcu_work);
-
-       if (device->bdev)
-               blkdev_put(device->bdev, device->mode);
-
        rcu_string_free(device->name);
        kfree(device);
 }
@@ -852,6 +848,17 @@ static void free_device(struct rcu_head *head)
        schedule_work(&device->rcu_work);
 }
 
+static void btrfs_close_bdev(struct btrfs_device *device)
+{
+       if (device->bdev && device->writeable) {
+               sync_blockdev(device->bdev);
+               invalidate_bdev(device->bdev);
+       }
+
+       if (device->bdev)
+               blkdev_put(device->bdev, device->mode);
+}
+
 static void btrfs_close_one_device(struct btrfs_device *device)
 {
        struct btrfs_fs_devices *fs_devices = device->fs_devices;
@@ -870,10 +877,7 @@ static void btrfs_close_one_device(struct btrfs_device *device)
        if (device->missing)
                fs_devices->missing_devices--;
 
-       if (device->bdev && device->writeable) {
-               sync_blockdev(device->bdev);
-               invalidate_bdev(device->bdev);
-       }
+       btrfs_close_bdev(device);
 
        new_device = btrfs_alloc_device(NULL, &device->devid,
                                        device->uuid);
@@ -1932,6 +1936,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
                btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
        }
 
+       btrfs_close_bdev(device);
+
        call_rcu(&device->rcu, free_device);
 
        num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
@@ -2025,6 +2031,9 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
                /* zero out the old super if it is writable */
                btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
        }
+
+       btrfs_close_bdev(srcdev);
+
        call_rcu(&srcdev->rcu, free_device);
 
        /*
@@ -2080,6 +2089,8 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
         * the device_list_mutex lock.
         */
        btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
+
+       btrfs_close_bdev(tgtdev);
        call_rcu(&tgtdev->rcu, free_device);
 }
 
index 99115cae1652ac1661d37a5a286da7180b6d9d94..16e6ded0b7f281bf72e8074b9d2896713fe4aef2 100644 (file)
@@ -1347,9 +1347,12 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
-       struct ceph_mds_session *session = *psession;
+       struct ceph_mds_session *session = NULL;
        int mds;
+
        dout("ceph_flush_snaps %p\n", inode);
+       if (psession)
+               session = *psession;
 retry:
        spin_lock(&ci->i_ceph_lock);
        if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
index c64a0b794d491b39d3dd94ab9c17951a5a0fc2d8..df4b3e6fa56398d248d51e0b6215a1773dacc1ec 100644 (file)
@@ -597,7 +597,7 @@ static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
        if (is_hash_order(new_pos)) {
                /* no need to reset last_name for a forward seek when
                 * dentries are sotred in hash order */
-       } else if (fi->frag |= fpos_frag(new_pos)) {
+       } else if (fi->frag != fpos_frag(new_pos)) {
                return true;
        }
        rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
index fa59a85226b262f2fe086ec5dfc1bf6813711986..f72d4ae303b273a98ee2631d8ed3dde21a71e796 100644 (file)
@@ -2759,6 +2759,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        } else {
                path = NULL;
                pathlen = 0;
+               pathbase = 0;
        }
 
        spin_lock(&ci->i_ceph_lock);
index 6bbec5e784cd493c7d88cec2a345ced6eb7bff61..14ae4b8e1a3ce8370b282503c91a1ff428d61f5b 100644 (file)
@@ -609,6 +609,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
        char *s, *p;
        char sep;
 
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+               return dget(sb->s_root);
+
        full_path = cifs_build_path_to_root(vol, cifs_sb,
                                            cifs_sb_master_tcon(cifs_sb));
        if (full_path == NULL)
@@ -686,26 +689,22 @@ cifs_do_mount(struct file_system_type *fs_type,
        cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
        if (cifs_sb->mountdata == NULL) {
                root = ERR_PTR(-ENOMEM);
-               goto out_cifs_sb;
+               goto out_free;
        }
 
-       if (volume_info->prepath) {
-               cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
-               if (cifs_sb->prepath == NULL) {
-                       root = ERR_PTR(-ENOMEM);
-                       goto out_cifs_sb;
-               }
+       rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
+       if (rc) {
+               root = ERR_PTR(rc);
+               goto out_free;
        }
 
-       cifs_setup_cifs_sb(volume_info, cifs_sb);
-
        rc = cifs_mount(cifs_sb, volume_info);
        if (rc) {
                if (!(flags & MS_SILENT))
                        cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
                                 rc);
                root = ERR_PTR(rc);
-               goto out_mountdata;
+               goto out_free;
        }
 
        mnt_data.vol = volume_info;
@@ -735,11 +734,7 @@ cifs_do_mount(struct file_system_type *fs_type,
                sb->s_flags |= MS_ACTIVE;
        }
 
-       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
-               root = dget(sb->s_root);
-       else
-               root = cifs_get_root(volume_info, sb);
-
+       root = cifs_get_root(volume_info, sb);
        if (IS_ERR(root))
                goto out_super;
 
@@ -752,9 +747,9 @@ out:
        cifs_cleanup_volume_info(volume_info);
        return root;
 
-out_mountdata:
+out_free:
+       kfree(cifs_sb->prepath);
        kfree(cifs_sb->mountdata);
-out_cifs_sb:
        kfree(cifs_sb);
 out_nls:
        unload_nls(volume_info->local_nls);
index 1243bd326591a626028727841f818cf0ce3f7615..95dab43646f040011011a7b1c4bce4446c80891b 100644 (file)
@@ -184,7 +184,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
                                 unsigned int to_read);
 extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
                                      struct page *page, unsigned int to_read);
-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
                               struct cifs_sb_info *cifs_sb);
 extern int cifs_match_super(struct super_block *, void *);
 extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
index 7ae03283bd61c128c6c804805c30c6c25da79f31..2e4f4bad8b1e9bf75cc76074914b8c2c8f63a3ff 100644 (file)
@@ -2781,6 +2781,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
        return 1;
 }
 
+static int
+match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+{
+       struct cifs_sb_info *old = CIFS_SB(sb);
+       struct cifs_sb_info *new = mnt_data->cifs_sb;
+
+       if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
+               if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
+                       return 0;
+               /* The prepath should be null terminated strings */
+               if (strcmp(new->prepath, old->prepath))
+                       return 0;
+
+               return 1;
+       }
+       return 0;
+}
+
 int
 cifs_match_super(struct super_block *sb, void *data)
 {
@@ -2808,7 +2826,8 @@ cifs_match_super(struct super_block *sb, void *data)
 
        if (!match_server(tcp_srv, volume_info) ||
            !match_session(ses, volume_info) ||
-           !match_tcon(tcon, volume_info->UNC)) {
+           !match_tcon(tcon, volume_info->UNC) ||
+           !match_prepath(sb, mnt_data)) {
                rc = 0;
                goto out;
        }
@@ -3222,7 +3241,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
        }
 }
 
-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
                        struct cifs_sb_info *cifs_sb)
 {
        INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
@@ -3316,6 +3335,14 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 
        if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
                cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
+
+       if (pvolume_info->prepath) {
+               cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
+               if (cifs_sb->prepath == NULL)
+                       return -ENOMEM;
+       }
+
+       return 0;
 }
 
 static void
index c30cf49b69d2f3de59e61ef273478d797cccc1b7..2c6312db85168050f6ee35d0e82465d994e7a1ea 100644 (file)
@@ -333,6 +333,7 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
                if (bin_attr->cb_max_size &&
                        *ppos + count > bin_attr->cb_max_size) {
                        len = -EFBIG;
+                       goto out;
                }
 
                tbuf = vmalloc(*ppos + count);
index 0f9961eede1e74021c43c4943b2bb419ed09a198..ed115acb5dee04bd15726ed2b9f368f6d13315cc 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/random.h>
 #include <linux/string.h>
 #include <linux/fscrypto.h>
+#include <linux/mount.h>
 
 static int inode_has_encryption_context(struct inode *inode)
 {
@@ -92,26 +93,42 @@ static int create_encryption_context_from_policy(struct inode *inode,
        return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
 }
 
-int fscrypt_process_policy(struct inode *inode,
+int fscrypt_process_policy(struct file *filp,
                                const struct fscrypt_policy *policy)
 {
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
        if (policy->version != 0)
                return -EINVAL;
 
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
        if (!inode_has_encryption_context(inode)) {
-               if (!inode->i_sb->s_cop->empty_dir)
-                       return -EOPNOTSUPP;
-               if (!inode->i_sb->s_cop->empty_dir(inode))
-                       return -ENOTEMPTY;
-               return create_encryption_context_from_policy(inode, policy);
+               if (!S_ISDIR(inode->i_mode))
+                       ret = -EINVAL;
+               else if (!inode->i_sb->s_cop->empty_dir)
+                       ret = -EOPNOTSUPP;
+               else if (!inode->i_sb->s_cop->empty_dir(inode))
+                       ret = -ENOTEMPTY;
+               else
+                       ret = create_encryption_context_from_policy(inode,
+                                                                   policy);
+       } else if (!is_encryption_context_consistent_with_policy(inode,
+                                                                policy)) {
+               printk(KERN_WARNING
+                      "%s: Policy inconsistent with encryption context\n",
+                      __func__);
+               ret = -EINVAL;
        }
 
-       if (is_encryption_context_consistent_with_policy(inode, policy))
-               return 0;
-
-       printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
-              __func__);
-       return -EINVAL;
+       mnt_drop_write_file(filp);
+       return ret;
 }
 EXPORT_SYMBOL(fscrypt_process_policy);
 
index d116453b0276634fabb32ee96df9cf96ce9cb25a..79a5941c2474622d7888c8e8c3bc925eabdb77e1 100644 (file)
@@ -585,7 +585,8 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
  */
 void *devpts_get_priv(struct dentry *dentry)
 {
-       WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
+       if (dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC)
+               return NULL;
        return dentry->d_fsdata;
 }
 
index eea64912c9c0a6a1e69e0b041b8404e611bf1e81..466f7d60edc2746f12a425ec91df2773f64dfedb 100644 (file)
@@ -607,20 +607,54 @@ static const struct file_operations format2_fops;
 static const struct file_operations format3_fops;
 static const struct file_operations format4_fops;
 
-static int table_open(struct inode *inode, struct file *file)
+static int table_open1(struct inode *inode, struct file *file)
 {
        struct seq_file *seq;
-       int ret = -1;
+       int ret;
 
-       if (file->f_op == &format1_fops)
-               ret = seq_open(file, &format1_seq_ops);
-       else if (file->f_op == &format2_fops)
-               ret = seq_open(file, &format2_seq_ops);
-       else if (file->f_op == &format3_fops)
-               ret = seq_open(file, &format3_seq_ops);
-       else if (file->f_op == &format4_fops)
-               ret = seq_open(file, &format4_seq_ops);
+       ret = seq_open(file, &format1_seq_ops);
+       if (ret)
+               return ret;
+
+       seq = file->private_data;
+       seq->private = inode->i_private; /* the dlm_ls */
+       return 0;
+}
+
+static int table_open2(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       int ret;
+
+       ret = seq_open(file, &format2_seq_ops);
+       if (ret)
+               return ret;
+
+       seq = file->private_data;
+       seq->private = inode->i_private; /* the dlm_ls */
+       return 0;
+}
+
+static int table_open3(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       int ret;
+
+       ret = seq_open(file, &format3_seq_ops);
+       if (ret)
+               return ret;
+
+       seq = file->private_data;
+       seq->private = inode->i_private; /* the dlm_ls */
+       return 0;
+}
+
+static int table_open4(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       int ret;
 
+       ret = seq_open(file, &format4_seq_ops);
        if (ret)
                return ret;
 
@@ -631,7 +665,7 @@ static int table_open(struct inode *inode, struct file *file)
 
 static const struct file_operations format1_fops = {
        .owner   = THIS_MODULE,
-       .open    = table_open,
+       .open    = table_open1,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release
@@ -639,7 +673,7 @@ static const struct file_operations format1_fops = {
 
 static const struct file_operations format2_fops = {
        .owner   = THIS_MODULE,
-       .open    = table_open,
+       .open    = table_open2,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release
@@ -647,7 +681,7 @@ static const struct file_operations format2_fops = {
 
 static const struct file_operations format3_fops = {
        .owner   = THIS_MODULE,
-       .open    = table_open,
+       .open    = table_open3,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release
@@ -655,7 +689,7 @@ static const struct file_operations format3_fops = {
 
 static const struct file_operations format4_fops = {
        .owner   = THIS_MODULE,
-       .open    = table_open,
+       .open    = table_open4,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release
index 3131747199e160f5feca6c322a2b4190201108bf..c6ea25a190f88b09354e93cb7245586af2dbc87a 100644 (file)
@@ -5466,8 +5466,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
                                                      sbi->s_want_extra_isize,
                                                      iloc, handle);
                        if (ret) {
-                               ext4_set_inode_state(inode,
-                                                    EXT4_STATE_NO_EXPAND);
                                if (mnt_count !=
                                        le16_to_cpu(sbi->s_es->s_mnt_count)) {
                                        ext4_warning(inode->i_sb,
index 10686fd67fb425880cacb94183291698f00ed702..1bb7df5e45369896a0b71de051b1364492b6a813 100644 (file)
@@ -776,7 +776,7 @@ resizefs_out:
                                   (struct fscrypt_policy __user *)arg,
                                   sizeof(policy)))
                        return -EFAULT;
-               return fscrypt_process_policy(inode, &policy);
+               return fscrypt_process_policy(filp, &policy);
 #else
                return -EOPNOTSUPP;
 #endif
index 1c593aa0218eee5cec9e2dac0adf8aaa97056d8b..3ec8708989ca016c3dd9e7e2d9bd479a1cd9beaf 100644 (file)
@@ -2211,6 +2211,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
 
 /* Called at mount-time, super-block is locked */
 static int ext4_check_descriptors(struct super_block *sb,
+                                 ext4_fsblk_t sb_block,
                                  ext4_group_t *first_not_zeroed)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -2241,6 +2242,11 @@ static int ext4_check_descriptors(struct super_block *sb,
                        grp = i;
 
                block_bitmap = ext4_block_bitmap(sb, gdp);
+               if (block_bitmap == sb_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Block bitmap for group %u overlaps "
+                                "superblock", i);
+               }
                if (block_bitmap < first_block || block_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Block bitmap for group %u not in group "
@@ -2248,6 +2254,11 @@ static int ext4_check_descriptors(struct super_block *sb,
                        return 0;
                }
                inode_bitmap = ext4_inode_bitmap(sb, gdp);
+               if (inode_bitmap == sb_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode bitmap for group %u overlaps "
+                                "superblock", i);
+               }
                if (inode_bitmap < first_block || inode_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Inode bitmap for group %u not in group "
@@ -2255,6 +2266,11 @@ static int ext4_check_descriptors(struct super_block *sb,
                        return 0;
                }
                inode_table = ext4_inode_table(sb, gdp);
+               if (inode_table == sb_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode table for group %u overlaps "
+                                "superblock", i);
+               }
                if (inode_table < first_block ||
                    inode_table + sbi->s_itb_per_group - 1 > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -3757,7 +3773,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
-       if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
+       if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
                ret = -EFSCORRUPTED;
                goto failed_mount2;
index 39e9cfb1b3715c2fcf2b81c0e03c2183d58c9100..2eb935ca5d9effac8eef6a2284d33acc62ae509e 100644 (file)
@@ -1353,15 +1353,19 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
        size_t min_offs, free;
        int total_ino;
        void *base, *start, *end;
-       int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
+       int error = 0, tried_min_extra_isize = 0;
        int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
+       int isize_diff; /* How much do we need to grow i_extra_isize */
 
        down_write(&EXT4_I(inode)->xattr_sem);
+       /*
+        * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
+        */
+       ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
 retry:
-       if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
-               up_write(&EXT4_I(inode)->xattr_sem);
-               return 0;
-       }
+       isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
+       if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
+               goto out;
 
        header = IHDR(inode, raw_inode);
        entry = IFIRST(header);
@@ -1382,7 +1386,7 @@ retry:
                goto cleanup;
 
        free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
-       if (free >= new_extra_isize) {
+       if (free >= isize_diff) {
                entry = IFIRST(header);
                ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
                                - new_extra_isize, (void *)raw_inode +
@@ -1390,8 +1394,7 @@ retry:
                                (void *)header, total_ino,
                                inode->i_sb->s_blocksize);
                EXT4_I(inode)->i_extra_isize = new_extra_isize;
-               error = 0;
-               goto cleanup;
+               goto out;
        }
 
        /*
@@ -1414,7 +1417,7 @@ retry:
                end = bh->b_data + bh->b_size;
                min_offs = end - base;
                free = ext4_xattr_free_space(first, &min_offs, base, NULL);
-               if (free < new_extra_isize) {
+               if (free < isize_diff) {
                        if (!tried_min_extra_isize && s_min_extra_isize) {
                                tried_min_extra_isize++;
                                new_extra_isize = s_min_extra_isize;
@@ -1428,7 +1431,7 @@ retry:
                free = inode->i_sb->s_blocksize;
        }
 
-       while (new_extra_isize > 0) {
+       while (isize_diff > 0) {
                size_t offs, size, entry_size;
                struct ext4_xattr_entry *small_entry = NULL;
                struct ext4_xattr_info i = {
@@ -1459,7 +1462,7 @@ retry:
                        EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
                                        EXT4_XATTR_LEN(last->e_name_len);
                        if (total_size <= free && total_size < min_total_size) {
-                               if (total_size < new_extra_isize) {
+                               if (total_size < isize_diff) {
                                        small_entry = last;
                                } else {
                                        entry = last;
@@ -1514,22 +1517,22 @@ retry:
                error = ext4_xattr_ibody_set(handle, inode, &i, is);
                if (error)
                        goto cleanup;
+               total_ino -= entry_size;
 
                entry = IFIRST(header);
-               if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
-                       shift_bytes = new_extra_isize;
+               if (entry_size + EXT4_XATTR_SIZE(size) >= isize_diff)
+                       shift_bytes = isize_diff;
                else
-                       shift_bytes = entry_size + size;
+                       shift_bytes = entry_size + EXT4_XATTR_SIZE(size);
                /* Adjust the offsets and shift the remaining entries ahead */
-               ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
-                       shift_bytes, (void *)raw_inode +
-                       EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
-                       (void *)header, total_ino - entry_size,
-                       inode->i_sb->s_blocksize);
+               ext4_xattr_shift_entries(entry, -shift_bytes,
+                       (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+                       EXT4_I(inode)->i_extra_isize + shift_bytes,
+                       (void *)header, total_ino, inode->i_sb->s_blocksize);
 
-               extra_isize += shift_bytes;
-               new_extra_isize -= shift_bytes;
-               EXT4_I(inode)->i_extra_isize = extra_isize;
+               isize_diff -= shift_bytes;
+               EXT4_I(inode)->i_extra_isize += shift_bytes;
+               header = IHDR(inode, raw_inode);
 
                i.name = b_entry_name;
                i.value = buffer;
@@ -1551,6 +1554,8 @@ retry:
                kfree(bs);
        }
        brelse(bh);
+out:
+       ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
        up_write(&EXT4_I(inode)->xattr_sem);
        return 0;
 
@@ -1562,6 +1567,10 @@ cleanup:
        kfree(is);
        kfree(bs);
        brelse(bh);
+       /*
+        * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
+        * size expansion failed.
+        */
        up_write(&EXT4_I(inode)->xattr_sem);
        return error;
 }
index 69dd3e6566e02edd05a4ef8054913df1326bd29a..a92e783fa057070a2d46ece41089bae6e3a26fe4 100644 (file)
@@ -24,6 +24,7 @@
 #define EXT4_XATTR_INDEX_SYSTEM                        7
 #define EXT4_XATTR_INDEX_RICHACL               8
 #define EXT4_XATTR_INDEX_ENCRYPTION            9
+#define EXT4_XATTR_INDEX_HURD                  10 /* Reserved for Hurd */
 
 struct ext4_xattr_header {
        __le32  h_magic;        /* magic number for identification */
index d64d2a515cb2ce1d162b8fd9c0c0e0e5e68a7fbb..ccb401eebc112b26b848818185579929b5ee1030 100644 (file)
@@ -1699,11 +1699,11 @@ static int f2fs_write_end(struct file *file,
        trace_f2fs_write_end(inode, pos, len, copied);
 
        set_page_dirty(page);
-       f2fs_put_page(page, 1);
 
        if (pos + copied > i_size_read(inode))
                f2fs_i_size_write(inode, pos + copied);
 
+       f2fs_put_page(page, 1);
        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
        return copied;
 }
index 675fa79d86f6500d779917b7ca7da3e62f14fda7..14f5fe2b841e2e7d4e699072cc2aeb6e1c14c645 100644 (file)
@@ -538,7 +538,7 @@ struct f2fs_nm_info {
        /* NAT cache management */
        struct radix_tree_root nat_root;/* root of the nat entry cache */
        struct radix_tree_root nat_set_root;/* root of the nat set cache */
-       struct percpu_rw_semaphore nat_tree_lock;       /* protect nat_tree_lock */
+       struct rw_semaphore nat_tree_lock;      /* protect nat_tree_lock */
        struct list_head nat_entries;   /* cached nat entry list (clean) */
        unsigned int nat_cnt;           /* the # of cached nat entries */
        unsigned int dirty_nat_cnt;     /* total num of nat entries in set */
@@ -787,7 +787,7 @@ struct f2fs_sb_info {
        struct f2fs_checkpoint *ckpt;           /* raw checkpoint pointer */
        struct inode *meta_inode;               /* cache meta blocks */
        struct mutex cp_mutex;                  /* checkpoint procedure lock */
-       struct percpu_rw_semaphore cp_rwsem;            /* blocking FS operations */
+       struct rw_semaphore cp_rwsem;           /* blocking FS operations */
        struct rw_semaphore node_write;         /* locking node writes */
        wait_queue_head_t cp_wait;
        unsigned long last_time[MAX_TIME];      /* to store time in jiffies */
@@ -1074,22 +1074,22 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 
 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
 {
-       percpu_down_read(&sbi->cp_rwsem);
+       down_read(&sbi->cp_rwsem);
 }
 
 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
 {
-       percpu_up_read(&sbi->cp_rwsem);
+       up_read(&sbi->cp_rwsem);
 }
 
 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
 {
-       percpu_down_write(&sbi->cp_rwsem);
+       down_write(&sbi->cp_rwsem);
 }
 
 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
 {
-       percpu_up_write(&sbi->cp_rwsem);
+       up_write(&sbi->cp_rwsem);
 }
 
 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
index 0e493f63ea41410ad04c22e24038bd2d4eebe0ba..28f4f4cbb8d84d0b07c22e3952d09b9eb1e1bf8c 100644 (file)
@@ -1757,21 +1757,14 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
 {
        struct fscrypt_policy policy;
        struct inode *inode = file_inode(filp);
-       int ret;
 
        if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
                                                        sizeof(policy)))
                return -EFAULT;
 
-       ret = mnt_want_write_file(filp);
-       if (ret)
-               return ret;
-
        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-       ret = fscrypt_process_policy(inode, &policy);
 
-       mnt_drop_write_file(filp);
-       return ret;
+       return fscrypt_process_policy(filp, &policy);
 }
 
 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
@@ -2086,15 +2079,19 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
        if (unlikely(f2fs_readonly(src->i_sb)))
                return -EROFS;
 
-       if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode))
-               return -EISDIR;
+       if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
+               return -EINVAL;
 
        if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
                return -EOPNOTSUPP;
 
        inode_lock(src);
-       if (src != dst)
-               inode_lock(dst);
+       if (src != dst) {
+               if (!inode_trylock(dst)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+       }
 
        ret = -EINVAL;
        if (pos_in + len > src->i_size || pos_in + len < pos_in)
@@ -2152,6 +2149,7 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
 out_unlock:
        if (src != dst)
                inode_unlock(dst);
+out:
        inode_unlock(src);
        return ret;
 }
index b2fa4b615925b894a92377b215780edb33354571..f75d197d5beb05366e876876e09521db7ac3930c 100644 (file)
@@ -206,14 +206,14 @@ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
        struct nat_entry *e;
        bool need = false;
 
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, nid);
        if (e) {
                if (!get_nat_flag(e, IS_CHECKPOINTED) &&
                                !get_nat_flag(e, HAS_FSYNCED_INODE))
                        need = true;
        }
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
        return need;
 }
 
@@ -223,11 +223,11 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
        struct nat_entry *e;
        bool is_cp = true;
 
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, nid);
        if (e && !get_nat_flag(e, IS_CHECKPOINTED))
                is_cp = false;
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
        return is_cp;
 }
 
@@ -237,13 +237,13 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
        struct nat_entry *e;
        bool need_update = true;
 
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, ino);
        if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
                        (get_nat_flag(e, IS_CHECKPOINTED) ||
                         get_nat_flag(e, HAS_FSYNCED_INODE)))
                need_update = false;
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
        return need_update;
 }
 
@@ -284,7 +284,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct nat_entry *e;
 
-       percpu_down_write(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, ni->nid);
        if (!e) {
                e = grab_nat_entry(nm_i, ni->nid);
@@ -334,7 +334,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
                        set_nat_flag(e, HAS_FSYNCED_INODE, true);
                set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
        }
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 }
 
 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -342,7 +342,8 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        int nr = nr_shrink;
 
-       percpu_down_write(&nm_i->nat_tree_lock);
+       if (!down_write_trylock(&nm_i->nat_tree_lock))
+               return 0;
 
        while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
                struct nat_entry *ne;
@@ -351,7 +352,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
                __del_from_nat_cache(nm_i, ne);
                nr_shrink--;
        }
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
        return nr - nr_shrink;
 }
 
@@ -373,13 +374,13 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
        ni->nid = nid;
 
        /* Check nat cache */
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, nid);
        if (e) {
                ni->ino = nat_get_ino(e);
                ni->blk_addr = nat_get_blkaddr(e);
                ni->version = nat_get_version(e);
-               percpu_up_read(&nm_i->nat_tree_lock);
+               up_read(&nm_i->nat_tree_lock);
                return;
        }
 
@@ -403,11 +404,11 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
        node_info_from_raw_nat(ni, &ne);
        f2fs_put_page(page, 1);
 cache:
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
        /* cache nat entry */
-       percpu_down_write(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
        cache_nat_entry(sbi, nid, &ne);
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 }
 
 /*
@@ -1788,7 +1789,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
        ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
                                                        META_NAT, true);
 
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
 
        while (1) {
                struct page *page = get_current_nat_page(sbi, nid);
@@ -1820,7 +1821,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
                        remove_free_nid(nm_i, nid);
        }
        up_read(&curseg->journal_rwsem);
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
 
        ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
                                        nm_i->ra_nid_pages, META_NAT, false);
@@ -2209,7 +2210,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
        if (!nm_i->dirty_nat_cnt)
                return;
 
-       percpu_down_write(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
 
        /*
         * if there are no enough space in journal to store dirty nat
@@ -2232,7 +2233,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
        list_for_each_entry_safe(set, tmp, &sets, set_list)
                __flush_nat_entry_set(sbi, set);
 
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 
        f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
 }
@@ -2268,8 +2269,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
 
        mutex_init(&nm_i->build_lock);
        spin_lock_init(&nm_i->free_nid_list_lock);
-       if (percpu_init_rwsem(&nm_i->nat_tree_lock))
-               return -ENOMEM;
+       init_rwsem(&nm_i->nat_tree_lock);
 
        nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
        nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@@ -2326,7 +2326,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
        spin_unlock(&nm_i->free_nid_list_lock);
 
        /* destroy nat cache */
-       percpu_down_write(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
        while ((found = __gang_lookup_nat_cache(nm_i,
                                        nid, NATVEC_SIZE, natvec))) {
                unsigned idx;
@@ -2351,9 +2351,8 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
                        kmem_cache_free(nat_entry_set_slab, setvec[idx]);
                }
        }
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 
-       percpu_free_rwsem(&nm_i->nat_tree_lock);
        kfree(nm_i->nat_bitmap);
        sbi->nm_info = NULL;
        kfree(nm_i);
index 1b86d3f638efcf87435da17af79811237fc584a9..7f863a645ab1d0c46052875f35f603bebde10a33 100644 (file)
@@ -706,8 +706,6 @@ static void destroy_percpu_info(struct f2fs_sb_info *sbi)
                percpu_counter_destroy(&sbi->nr_pages[i]);
        percpu_counter_destroy(&sbi->alloc_valid_block_count);
        percpu_counter_destroy(&sbi->total_valid_inode_count);
-
-       percpu_free_rwsem(&sbi->cp_rwsem);
 }
 
 static void f2fs_put_super(struct super_block *sb)
@@ -1483,9 +1481,6 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
 {
        int i, err;
 
-       if (percpu_init_rwsem(&sbi->cp_rwsem))
-               return -ENOMEM;
-
        for (i = 0; i < NR_COUNT_TYPE; i++) {
                err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
                if (err)
@@ -1686,6 +1681,7 @@ try_onemore:
                sbi->write_io[i].bio = NULL;
        }
 
+       init_rwsem(&sbi->cp_rwsem);
        init_waitqueue_head(&sbi->cp_wait);
        init_sb_info(sbi);
 
index 4d09d4441e3ee4fb7a2939a5c2cabbfa9e08070e..05713a5da0834233edad0049d7443dd63e6c1078 100644 (file)
@@ -1949,6 +1949,12 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
 {
        struct backing_dev_info *bdi;
 
+       /*
+        * If we are expecting writeback progress we must submit plugged IO.
+        */
+       if (blk_needs_flush_plug(current))
+               blk_schedule_flush_plug(current);
+
        if (!nr_pages)
                nr_pages = get_nr_dirty_pages();
 
index f394aff59c363a34c43eea0eec32293e21570986..3988b43c2f5ac8d87533db4de75395e5c4d1c286 100644 (file)
@@ -530,13 +530,13 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
        req->out.args[0].size = count;
 }
 
-static void fuse_release_user_pages(struct fuse_req *req, int write)
+static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
 {
        unsigned i;
 
        for (i = 0; i < req->num_pages; i++) {
                struct page *page = req->pages[i];
-               if (write)
+               if (should_dirty)
                        set_page_dirty_lock(page);
                put_page(page);
        }
@@ -1320,6 +1320,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                       loff_t *ppos, int flags)
 {
        int write = flags & FUSE_DIO_WRITE;
+       bool should_dirty = !write && iter_is_iovec(iter);
        int cuse = flags & FUSE_DIO_CUSE;
        struct file *file = io->file;
        struct inode *inode = file->f_mapping->host;
@@ -1363,7 +1364,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        nres = fuse_send_read(req, io, pos, nbytes, owner);
 
                if (!io->async)
-                       fuse_release_user_pages(req, !write);
+                       fuse_release_user_pages(req, should_dirty);
                if (req->out.h.error) {
                        err = req->out.h.error;
                        break;
index 0f56deb24ce6547c1aaa13e6a731779c09047077..c415668c86d4cc727460ae0f0eb5a6d1f5840f61 100644 (file)
@@ -568,7 +568,7 @@ static int ioctl_fsthaw(struct file *filp)
        return thaw_super(sb);
 }
 
-static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
+static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
 {
        struct file_dedupe_range __user *argp = arg;
        struct file_dedupe_range *same = NULL;
@@ -582,6 +582,10 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
        }
 
        size = offsetof(struct file_dedupe_range __user, info[count]);
+       if (size > PAGE_SIZE) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        same = memdup_user(argp, size);
        if (IS_ERR(same)) {
index 48141b8eff5f4f799f804674c83173863c156b2c..706270f21b35cd23e18a5057bfec9b2995b77bba 100644 (file)
@@ -84,8 +84,11 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
         * Now the data has been copied, commit the range we've copied.  This
         * should not fail unless the filesystem has had a fatal error.
         */
-       ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0,
-                       flags, &iomap);
+       if (ops->iomap_end) {
+               ret = ops->iomap_end(inode, pos, length,
+                                    written > 0 ? written : 0,
+                                    flags, &iomap);
+       }
 
        return written ? written : ret;
 }
@@ -194,12 +197,9 @@ again:
                if (mapping_writably_mapped(inode->i_mapping))
                        flush_dcache_page(page);
 
-               pagefault_disable();
                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-               pagefault_enable();
 
                flush_dcache_page(page);
-               mark_page_accessed(page);
 
                status = iomap_write_end(inode, pos, bytes, copied, page);
                if (unlikely(status < 0))
@@ -428,9 +428,12 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
                break;
        }
 
+       if (iomap->flags & IOMAP_F_MERGED)
+               flags |= FIEMAP_EXTENT_MERGED;
+
        return fiemap_fill_next_extent(fi, iomap->offset,
                        iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
-                       iomap->length, flags | FIEMAP_EXTENT_MERGED);
+                       iomap->length, flags);
 
 }
 
@@ -470,13 +473,18 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
        if (ret)
                return ret;
 
-       ret = filemap_write_and_wait(inode->i_mapping);
-       if (ret)
-               return ret;
+       if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
+               ret = filemap_write_and_wait(inode->i_mapping);
+               if (ret)
+                       return ret;
+       }
 
        while (len > 0) {
                ret = iomap_apply(inode, start, len, 0, ops, &ctx,
                                iomap_fiemap_actor);
+               /* inode with no (attribute) mapping will give ENOENT */
+               if (ret == -ENOENT)
+                       break;
                if (ret < 0)
                        return ret;
                if (ret == 0)
index e1574008adc9e4607e9deb2c1898f5d25be528a9..2bcb86e6e6ca0988cbe4337c970247869db34a24 100644 (file)
@@ -840,21 +840,35 @@ repeat:
        mutex_lock(&kernfs_mutex);
 
        list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
+               struct kernfs_node *parent;
                struct inode *inode;
-               struct dentry *dentry;
 
+               /*
+                * We want fsnotify_modify() on @kn but as the
+                * modifications aren't originating from userland don't
+                * have the matching @file available.  Look up the inodes
+                * and generate the events manually.
+                */
                inode = ilookup(info->sb, kn->ino);
                if (!inode)
                        continue;
 
-               dentry = d_find_any_alias(inode);
-               if (dentry) {
-                       fsnotify_parent(NULL, dentry, FS_MODIFY);
-                       fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
-                                NULL, 0);
-                       dput(dentry);
+               parent = kernfs_get_parent(kn);
+               if (parent) {
+                       struct inode *p_inode;
+
+                       p_inode = ilookup(info->sb, parent->ino);
+                       if (p_inode) {
+                               fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
+                                        inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
+                               iput(p_inode);
+                       }
+
+                       kernfs_put(parent);
                }
 
+               fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
+                        kn->name, 0);
                iput(inode);
        }
 
index f55a4e7560470d7a1ef8ab790beb6db59232b35f..217847679f0eac675492ac049cbc0a42a6f75066 100644 (file)
@@ -346,7 +346,7 @@ static void bl_write_cleanup(struct work_struct *work)
                        PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
 
                ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
-                                       (end - start) >> SECTOR_SHIFT);
+                                       (end - start) >> SECTOR_SHIFT, end);
        }
 
        pnfs_ld_write_done(hdr);
index 18e6fd0b9506e931a62acd3f91303574433a1113..efc007f007427c84bf0e56961b51f07073b78865 100644 (file)
@@ -141,6 +141,7 @@ struct pnfs_block_layout {
        struct rb_root          bl_ext_ro;
        spinlock_t              bl_ext_lock;   /* Protects list manipulation */
        bool                    bl_scsi_layout;
+       u64                     bl_lwb;
 };
 
 static inline struct pnfs_block_layout *
@@ -182,7 +183,7 @@ int ext_tree_insert(struct pnfs_block_layout *bl,
 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start,
                sector_t end);
 int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
-               sector_t len);
+               sector_t len, u64 lwb);
 bool ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect,
                struct pnfs_block_extent *ret, bool rw);
 int ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg);
index 992bcb19c11e744bed4c390e133ea3ebe7cf0916..c85fbfd2d0d99e7cd5e3cc902ebd4b75b8e1b02c 100644 (file)
@@ -402,7 +402,7 @@ ext_tree_split(struct rb_root *root, struct pnfs_block_extent *be,
 
 int
 ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
-               sector_t len)
+               sector_t len, u64 lwb)
 {
        struct rb_root *root = &bl->bl_ext_rw;
        sector_t end = start + len;
@@ -471,6 +471,8 @@ ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
                }
        }
 out:
+       if (bl->bl_lwb < lwb)
+               bl->bl_lwb = lwb;
        spin_unlock(&bl->bl_ext_lock);
 
        __ext_put_deviceids(&tmp);
@@ -518,7 +520,7 @@ static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p)
 }
 
 static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
-               size_t buffer_size, size_t *count)
+               size_t buffer_size, size_t *count, __u64 *lastbyte)
 {
        struct pnfs_block_extent *be;
        int ret = 0;
@@ -542,6 +544,8 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
                        p = encode_block_extent(be, p);
                be->be_tag = EXTENT_COMMITTING;
        }
+       *lastbyte = bl->bl_lwb - 1;
+       bl->bl_lwb = 0;
        spin_unlock(&bl->bl_ext_lock);
 
        return ret;
@@ -564,7 +568,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
        arg->layoutupdate_pages = &arg->layoutupdate_page;
 
 retry:
-       ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count);
+       ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten);
        if (unlikely(ret)) {
                ext_tree_free_commitdata(arg, buffer_size);
 
index a7f2e6e3330525c1069c524f8b56960e780f7f4b..52a28311e2a4b2d72063cf36ac9e4296481211b7 100644 (file)
@@ -275,6 +275,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
 err_socks:
        svc_rpcb_cleanup(serv, net);
 err_bind:
+       nn->cb_users[minorversion]--;
        dprintk("NFS: Couldn't create callback socket: err = %d; "
                        "net = %p\n", ret, net);
        return ret;
index c92a75e066a6f75828079f422c2546ea73a90415..f953ef6b2f2e7eafcfb7fb9015972f12f214811e 100644 (file)
@@ -454,11 +454,8 @@ static bool referring_call_exists(struct nfs_client *clp,
                                ((u32 *)&rclist->rcl_sessionid.data)[3],
                                ref->rc_sequenceid, ref->rc_slotid);
 
-                       spin_lock(&tbl->slot_tbl_lock);
-                       status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
-                                 tbl->slots[ref->rc_slotid].seq_nr ==
-                                       ref->rc_sequenceid);
-                       spin_unlock(&tbl->slot_tbl_lock);
+                       status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
+                                       ref->rc_sequenceid, HZ >> 1) < 0;
                        if (status)
                                goto out;
                }
@@ -487,7 +484,6 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
                goto out;
 
        tbl = &clp->cl_session->bc_slot_table;
-       slot = tbl->slots + args->csa_slotid;
 
        /* Set up res before grabbing the spinlock */
        memcpy(&res->csr_sessionid, &args->csa_sessionid,
index 003ebce4bbc49fa0e5508816027ae65798119e4f..1e106780a23752fa9b49ed74baa9bd01c500c912 100644 (file)
@@ -426,7 +426,7 @@ EXPORT_SYMBOL_GPL(nfs_mark_client_ready);
  * Initialise the timeout values for a connection
  */
 void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
-                                   unsigned int timeo, unsigned int retrans)
+                                   int timeo, int retrans)
 {
        to->to_initval = timeo * HZ / 10;
        to->to_retries = retrans;
@@ -434,9 +434,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
        switch (proto) {
        case XPRT_TRANSPORT_TCP:
        case XPRT_TRANSPORT_RDMA:
-               if (to->to_retries == 0)
+               if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_TCP_RETRANS;
-               if (to->to_initval == 0)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
                        to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
                        to->to_initval = NFS_MAX_TCP_TIMEOUT;
@@ -449,9 +449,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
                to->to_exponential = 0;
                break;
        case XPRT_TRANSPORT_UDP:
-               if (to->to_retries == 0)
+               if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_UDP_RETRANS;
-               if (!to->to_initval)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
                        to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
                        to->to_initval = NFS_MAX_UDP_TIMEOUT;
index 7d620970f2e1addfbd447dc7b4a0bd1233bc9ec6..ca699ddc11c10e2e012f200d37213c90739af286 100644 (file)
@@ -657,7 +657,10 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        if (result <= 0)
                goto out;
 
-       written = generic_write_sync(iocb, result);
+       result = generic_write_sync(iocb, result);
+       if (result < 0)
+               goto out;
+       written = result;
        iocb->ki_pos += written;
 
        /* Return error values */
index e6206eaf2bdf34e4aa11d23dbf68b6e7c0c4684e..51b51369704c56c00732b10ff0c31edfd0d9c19e 100644 (file)
@@ -37,6 +37,7 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
        if (ffl) {
                INIT_LIST_HEAD(&ffl->error_list);
                INIT_LIST_HEAD(&ffl->mirrors);
+               ffl->last_report_time = ktime_get();
                return &ffl->generic_hdr;
        } else
                return NULL;
@@ -640,19 +641,18 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
 {
        static const ktime_t notime = {0};
        s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
+       struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
 
        nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
        if (ktime_equal(mirror->start_time, notime))
                mirror->start_time = now;
-       if (ktime_equal(mirror->last_report_time, notime))
-               mirror->last_report_time = now;
        if (mirror->report_interval != 0)
                report_interval = (s64)mirror->report_interval * 1000LL;
        else if (layoutstats_timer != 0)
                report_interval = (s64)layoutstats_timer * 1000LL;
-       if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
+       if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
                        report_interval) {
-               mirror->last_report_time = now;
+               ffl->last_report_time = now;
                return true;
        }
 
@@ -806,11 +806,14 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
 {
        struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
        struct nfs4_pnfs_ds *ds;
+       bool fail_return = false;
        int idx;
 
        /* mirrors are sorted by efficiency */
        for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
-               ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
+               if (idx+1 == fls->mirror_array_cnt)
+                       fail_return = true;
+               ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
                if (ds) {
                        *best_idx = idx;
                        return ds;
@@ -859,6 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
        struct nfs4_pnfs_ds *ds;
        int ds_idx;
 
+retry:
        /* Use full layout for now */
        if (!pgio->pg_lseg)
                ff_layout_pg_get_read(pgio, req, false);
@@ -871,10 +875,13 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 
        ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
        if (!ds) {
-               if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
-                       goto out_pnfs;
-               else
+               if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                        goto out_mds;
+               pnfs_put_lseg(pgio->pg_lseg);
+               pgio->pg_lseg = NULL;
+               /* Sleep for 1 second before retrying */
+               ssleep(1);
+               goto retry;
        }
 
        mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
@@ -890,12 +897,6 @@ out_mds:
        pnfs_put_lseg(pgio->pg_lseg);
        pgio->pg_lseg = NULL;
        nfs_pageio_reset_read_mds(pgio);
-       return;
-
-out_pnfs:
-       pnfs_set_lo_fail(pgio->pg_lseg);
-       pnfs_put_lseg(pgio->pg_lseg);
-       pgio->pg_lseg = NULL;
 }
 
 static void
@@ -909,6 +910,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
        int i;
        int status;
 
+retry:
        if (!pgio->pg_lseg) {
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   req->wb_context,
@@ -940,10 +942,13 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
        for (i = 0; i < pgio->pg_mirror_count; i++) {
                ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
                if (!ds) {
-                       if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
-                               goto out_pnfs;
-                       else
+                       if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                                goto out_mds;
+                       pnfs_put_lseg(pgio->pg_lseg);
+                       pgio->pg_lseg = NULL;
+                       /* Sleep for 1 second before retrying */
+                       ssleep(1);
+                       goto retry;
                }
                pgm = &pgio->pg_mirrors[i];
                mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
@@ -956,12 +961,6 @@ out_mds:
        pnfs_put_lseg(pgio->pg_lseg);
        pgio->pg_lseg = NULL;
        nfs_pageio_reset_write_mds(pgio);
-       return;
-
-out_pnfs:
-       pnfs_set_lo_fail(pgio->pg_lseg);
-       pnfs_put_lseg(pgio->pg_lseg);
-       pgio->pg_lseg = NULL;
 }
 
 static unsigned int
index 1bcdb15d0c41a74d139ce7db378c3e19844052a0..3ee0c9fcea7632269edbf51f3e5bd1bf80bb868d 100644 (file)
@@ -84,7 +84,6 @@ struct nfs4_ff_layout_mirror {
        struct nfs4_ff_layoutstat       read_stat;
        struct nfs4_ff_layoutstat       write_stat;
        ktime_t                         start_time;
-       ktime_t                         last_report_time;
        u32                             report_interval;
 };
 
@@ -101,6 +100,7 @@ struct nfs4_flexfile_layout {
        struct pnfs_ds_commit_info commit_info;
        struct list_head        mirrors;
        struct list_head        error_list; /* nfs4_ff_layout_ds_err */
+       ktime_t                 last_report_time; /* Layoutstat report times */
 };
 
 static inline struct nfs4_flexfile_layout *
index 0aa36be71fceaaf3ec532af442358b5ca80828ab..f7a3f6b05369a21d7e1190625e5871c55d1aeeb2 100644 (file)
@@ -17,8 +17,8 @@
 
 #define NFSDBG_FACILITY                NFSDBG_PNFS_LD
 
-static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
-static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
+static unsigned int dataserver_retrans;
 
 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
 {
@@ -379,7 +379,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
 
        devid = &mirror->mirror_ds->id_node;
        if (ff_layout_test_devid_unavailable(devid))
-               goto out;
+               goto out_fail;
 
        ds = mirror->mirror_ds->ds;
        /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
@@ -405,15 +405,16 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                        mirror->mirror_ds->ds_versions[0].rsize = max_payload;
                if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
                        mirror->mirror_ds->ds_versions[0].wsize = max_payload;
-       } else {
-               ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
-                                        mirror, lseg->pls_range.offset,
-                                        lseg->pls_range.length, NFS4ERR_NXIO,
-                                        OP_ILLEGAL, GFP_NOIO);
-               if (fail_return || !ff_layout_has_available_ds(lseg))
-                       pnfs_error_mark_layout_for_return(ino, lseg);
-               ds = NULL;
+               goto out;
        }
+       ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+                                mirror, lseg->pls_range.offset,
+                                lseg->pls_range.length, NFS4ERR_NXIO,
+                                OP_ILLEGAL, GFP_NOIO);
+out_fail:
+       if (fail_return || !ff_layout_has_available_ds(lseg))
+               pnfs_error_mark_layout_for_return(ino, lseg);
+       ds = NULL;
 out:
        return ds;
 }
index 7ce5e023c3c3cd36056d1272cc16c9ebb1d1198a..74935a19e4bfc678767dcc7fde3fcef16d7872e2 100644 (file)
@@ -58,6 +58,9 @@ struct nfs_clone_mount {
  */
 #define NFS_UNSPEC_PORT                (-1)
 
+#define NFS_UNSPEC_RETRANS     (UINT_MAX)
+#define NFS_UNSPEC_TIMEO       (UINT_MAX)
+
 /*
  * Maximum number of pages that readdir can use for creating
  * a vmapped array of pages.
@@ -156,7 +159,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
 int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
 void nfs_server_insert_lists(struct nfs_server *);
 void nfs_server_remove_lists(struct nfs_server *);
-void nfs_init_timeout_values(struct rpc_timeout *, int, unsigned int, unsigned int);
+void nfs_init_timeout_values(struct rpc_timeout *to, int proto, int timeo, int retrans);
 int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
                rpc_authflavor_t);
 struct nfs_server *nfs_alloc_server(void);
index 33da841a21bb2871f753fb38a72dd76ce2725ded..64b43b4ad9dd8841cbfc41e86c046f89e7c4c46d 100644 (file)
@@ -318,10 +318,22 @@ static void
 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
 {
        struct nfs42_layoutstat_data *data = calldata;
-       struct nfs_server *server = NFS_SERVER(data->args.inode);
+       struct inode *inode = data->inode;
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct pnfs_layout_hdr *lo;
 
+       spin_lock(&inode->i_lock);
+       lo = NFS_I(inode)->layout;
+       if (!pnfs_layout_is_valid(lo)) {
+               spin_unlock(&inode->i_lock);
+               rpc_exit(task, 0);
+               return;
+       }
+       nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
+       spin_unlock(&inode->i_lock);
        nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,
                             &data->res.seq_res, task);
+
 }
 
 static void
@@ -338,12 +350,14 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
        case 0:
                break;
        case -NFS4ERR_EXPIRED:
+       case -NFS4ERR_ADMIN_REVOKED:
+       case -NFS4ERR_DELEG_REVOKED:
        case -NFS4ERR_STALE_STATEID:
-       case -NFS4ERR_OLD_STATEID:
        case -NFS4ERR_BAD_STATEID:
                spin_lock(&inode->i_lock);
                lo = NFS_I(inode)->layout;
-               if (lo && nfs4_stateid_match(&data->args.stateid,
+               if (pnfs_layout_is_valid(lo) &&
+                   nfs4_stateid_match(&data->args.stateid,
                                             &lo->plh_stateid)) {
                        LIST_HEAD(head);
 
@@ -357,11 +371,23 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
                } else
                        spin_unlock(&inode->i_lock);
                break;
+       case -NFS4ERR_OLD_STATEID:
+               spin_lock(&inode->i_lock);
+               lo = NFS_I(inode)->layout;
+               if (pnfs_layout_is_valid(lo) &&
+                   nfs4_stateid_match_other(&data->args.stateid,
+                                       &lo->plh_stateid)) {
+                       /* Do we need to delay before resending? */
+                       if (!nfs4_stateid_is_newer(&lo->plh_stateid,
+                                               &data->args.stateid))
+                               rpc_delay(task, HZ);
+                       rpc_restart_call_prepare(task);
+               }
+               spin_unlock(&inode->i_lock);
+               break;
        case -ENOTSUPP:
        case -EOPNOTSUPP:
                NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
-       default:
-               break;
        }
 
        dprintk("%s server returns %d\n", __func__, task->tk_status);
index 324bfdc212504de591347da77c1ad3f1db595974..9bf64eacba5bd6d47a04ca3c9ac66b74912bdc72 100644 (file)
@@ -396,6 +396,10 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
 extern void nfs4_kill_renewd(struct nfs_client *);
 extern void nfs4_renew_state(struct work_struct *);
+extern void nfs4_set_lease_period(struct nfs_client *clp,
+               unsigned long lease,
+               unsigned long lastrenewed);
+
 
 /* nfs4state.c */
 struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp);
index 8d7d08d4f95f17e09d53bda53da364ef02ea05b8..cd3b7cfdde16ae0e16cc17fbc9389ffc60eb511a 100644 (file)
@@ -817,6 +817,11 @@ static int nfs4_set_client(struct nfs_server *server,
                goto error;
        }
 
+       if (server->nfs_client == clp) {
+               error = -ELOOP;
+               goto error;
+       }
+
        /*
         * Query for the lease time on clientid setup or renewal
         *
index a036e93bdf9656813abec3a3565e26fca3b28838..a9dec32ba9ba787f95693a8e7f266292376b0132 100644 (file)
@@ -634,15 +634,11 @@ out_sleep:
 }
 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
 
-static int nfs40_sequence_done(struct rpc_task *task,
-                              struct nfs4_sequence_res *res)
+static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
 {
        struct nfs4_slot *slot = res->sr_slot;
        struct nfs4_slot_table *tbl;
 
-       if (slot == NULL)
-               goto out;
-
        tbl = slot->table;
        spin_lock(&tbl->slot_tbl_lock);
        if (!nfs41_wake_and_assign_slot(tbl, slot))
@@ -650,7 +646,13 @@ static int nfs40_sequence_done(struct rpc_task *task,
        spin_unlock(&tbl->slot_tbl_lock);
 
        res->sr_slot = NULL;
-out:
+}
+
+static int nfs40_sequence_done(struct rpc_task *task,
+                              struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL)
+               nfs40_sequence_free_slot(res);
        return 1;
 }
 
@@ -666,6 +668,11 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
        tbl = slot->table;
        session = tbl->session;
 
+       /* Bump the slot sequence number */
+       if (slot->seq_done)
+               slot->seq_nr++;
+       slot->seq_done = 0;
+
        spin_lock(&tbl->slot_tbl_lock);
        /* Be nice to the server: try to ensure that the last transmitted
         * value for highest_user_slotid <= target_highest_slotid
@@ -686,9 +693,12 @@ out_unlock:
        res->sr_slot = NULL;
        if (send_new_highest_used_slotid)
                nfs41_notify_server(session->clp);
+       if (waitqueue_active(&tbl->slot_waitq))
+               wake_up_all(&tbl->slot_waitq);
 }
 
-int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+static int nfs41_sequence_process(struct rpc_task *task,
+               struct nfs4_sequence_res *res)
 {
        struct nfs4_session *session;
        struct nfs4_slot *slot = res->sr_slot;
@@ -714,7 +724,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
        switch (res->sr_status) {
        case 0:
                /* Update the slot's sequence and clientid lease timer */
-               ++slot->seq_nr;
+               slot->seq_done = 1;
                clp = session->clp;
                do_renew_lease(clp, res->sr_timestamp);
                /* Check sequence flags */
@@ -769,16 +779,16 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
                goto retry_nowait;
        default:
                /* Just update the slot sequence no. */
-               ++slot->seq_nr;
+               slot->seq_done = 1;
        }
 out:
        /* The session may be reset by one of the error handlers. */
        dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
-       nfs41_sequence_free_slot(res);
 out_noaction:
        return ret;
 retry_nowait:
        if (rpc_restart_call_prepare(task)) {
+               nfs41_sequence_free_slot(res);
                task->tk_status = 0;
                ret = 0;
        }
@@ -789,8 +799,37 @@ out_retry:
        rpc_delay(task, NFS4_POLL_RETRY_MAX);
        return 0;
 }
+
+int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       if (!nfs41_sequence_process(task, res))
+               return 0;
+       if (res->sr_slot != NULL)
+               nfs41_sequence_free_slot(res);
+       return 1;
+
+}
 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
 
+static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot == NULL)
+               return 1;
+       if (res->sr_slot->table->session != NULL)
+               return nfs41_sequence_process(task, res);
+       return nfs40_sequence_done(task, res);
+}
+
+static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL) {
+               if (res->sr_slot->table->session != NULL)
+                       nfs41_sequence_free_slot(res);
+               else
+                       nfs40_sequence_free_slot(res);
+       }
+}
+
 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
 {
        if (res->sr_slot == NULL)
@@ -920,6 +959,17 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
                                    args, res, task);
 }
 
+static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       return nfs40_sequence_done(task, res);
+}
+
+static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL)
+               nfs40_sequence_free_slot(res);
+}
+
 int nfs4_sequence_done(struct rpc_task *task,
                       struct nfs4_sequence_res *res)
 {
@@ -1197,6 +1247,7 @@ static void nfs4_opendata_free(struct kref *kref)
        struct super_block *sb = p->dentry->d_sb;
 
        nfs_free_seqid(p->o_arg.seqid);
+       nfs4_sequence_free_slot(&p->o_res.seq_res);
        if (p->state != NULL)
                nfs4_put_open_state(p->state);
        nfs4_put_state_owner(p->owner);
@@ -1656,9 +1707,14 @@ err:
 static struct nfs4_state *
 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
 {
+       struct nfs4_state *ret;
+
        if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
-               return _nfs4_opendata_reclaim_to_nfs4_state(data);
-       return _nfs4_opendata_to_nfs4_state(data);
+               ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
+       else
+               ret = _nfs4_opendata_to_nfs4_state(data);
+       nfs4_sequence_free_slot(&data->o_res.seq_res);
+       return ret;
 }
 
 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
@@ -2056,7 +2112,7 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
 
        data->rpc_status = task->tk_status;
 
-       if (!nfs4_sequence_done(task, &data->o_res.seq_res))
+       if (!nfs4_sequence_process(task, &data->o_res.seq_res))
                return;
 
        if (task->tk_status == 0) {
@@ -4237,12 +4293,9 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
                err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
                trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
                if (err == 0) {
-                       struct nfs_client *clp = server->nfs_client;
-
-                       spin_lock(&clp->cl_lock);
-                       clp->cl_lease_time = fsinfo->lease_time * HZ;
-                       clp->cl_last_renewal = now;
-                       spin_unlock(&clp->cl_lock);
+                       nfs4_set_lease_period(server->nfs_client,
+                                       fsinfo->lease_time * HZ,
+                                       now);
                        break;
                }
                err = nfs4_handle_exception(server, err, &exception);
@@ -7517,12 +7570,20 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
        status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
        trace_nfs4_create_session(clp, status);
 
+       switch (status) {
+       case -NFS4ERR_STALE_CLIENTID:
+       case -NFS4ERR_DELAY:
+       case -ETIMEDOUT:
+       case -EACCES:
+       case -EAGAIN:
+               goto out;
+       };
+
+       clp->cl_seqid++;
        if (!status) {
                /* Verify the session's negotiated channel_attrs values */
                status = nfs4_verify_channel_attrs(&args, &res);
                /* Increment the clientid slot sequence id */
-               if (clp->cl_seqid == res.seqid)
-                       clp->cl_seqid++;
                if (status)
                        goto out;
                nfs4_update_session(session, &res);
@@ -7867,7 +7928,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
        struct nfs4_layoutget *lgp = calldata;
 
        dprintk("--> %s\n", __func__);
-       nfs41_sequence_done(task, &lgp->res.seq_res);
+       nfs41_sequence_process(task, &lgp->res.seq_res);
        dprintk("<-- %s\n", __func__);
 }
 
@@ -8083,6 +8144,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
        /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
        if (status == 0 && lgp->res.layoutp->len)
                lseg = pnfs_layout_process(lgp);
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
        if (status)
@@ -8109,7 +8171,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
 
        dprintk("--> %s\n", __func__);
 
-       if (!nfs41_sequence_done(task, &lrp->res.seq_res))
+       if (!nfs41_sequence_process(task, &lrp->res.seq_res))
                return;
 
        server = NFS_SERVER(lrp->args.inode);
@@ -8121,6 +8183,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
        case -NFS4ERR_DELAY:
                if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
                        break;
+               nfs4_sequence_free_slot(&lrp->res.seq_res);
                rpc_restart_call_prepare(task);
                return;
        }
@@ -8135,12 +8198,16 @@ static void nfs4_layoutreturn_release(void *calldata)
 
        dprintk("--> %s\n", __func__);
        spin_lock(&lo->plh_inode->i_lock);
-       pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range,
-                       be32_to_cpu(lrp->args.stateid.seqid));
-       if (lrp->res.lrs_present && pnfs_layout_is_valid(lo))
+       if (lrp->res.lrs_present) {
+               pnfs_mark_matching_lsegs_invalid(lo, &freeme,
+                               &lrp->args.range,
+                               be32_to_cpu(lrp->args.stateid.seqid));
                pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+       } else
+               pnfs_mark_layout_stateid_invalid(lo, &freeme);
        pnfs_clear_layoutreturn_waitbit(lo);
        spin_unlock(&lo->plh_inode->i_lock);
+       nfs4_sequence_free_slot(&lrp->res.seq_res);
        pnfs_free_lseg_list(&freeme);
        pnfs_put_layout_hdr(lrp->args.layout);
        nfs_iput_and_deactive(lrp->inode);
index e1ba58c3d1ad305ab28d932a5b90ac269092f98b..82e77198d17efdf656315f39c3d50c4d9aa568a9 100644 (file)
@@ -136,6 +136,26 @@ nfs4_kill_renewd(struct nfs_client *clp)
        cancel_delayed_work_sync(&clp->cl_renewd);
 }
 
+/**
+ * nfs4_set_lease_period - Sets the lease period on a nfs_client
+ *
+ * @clp: pointer to nfs_client
+ * @lease: new value for lease period
+ * @lastrenewed: time at which lease was last renewed
+ */
+void nfs4_set_lease_period(struct nfs_client *clp,
+               unsigned long lease,
+               unsigned long lastrenewed)
+{
+       spin_lock(&clp->cl_lock);
+       clp->cl_lease_time = lease;
+       clp->cl_last_renewal = lastrenewed;
+       spin_unlock(&clp->cl_lock);
+
+       /* Cap maximum reconnect timeout at 1/2 lease period */
+       rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1);
+}
+
 /*
  * Local variables:
  *   c-basic-offset: 8
index 332d06e64fa910fcfa54ca3fc304e9869f05bc6f..b62973045a3e048016f1af48f761fdb152f3908d 100644 (file)
@@ -28,6 +28,7 @@ static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
        tbl->highest_used_slotid = NFS4_NO_SLOT;
        spin_lock_init(&tbl->slot_tbl_lock);
        rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
+       init_waitqueue_head(&tbl->slot_waitq);
        init_completion(&tbl->complete);
 }
 
@@ -172,6 +173,58 @@ struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
        return ERR_PTR(-E2BIG);
 }
 
+static int nfs4_slot_get_seqid(struct nfs4_slot_table  *tbl, u32 slotid,
+               u32 *seq_nr)
+       __must_hold(&tbl->slot_tbl_lock)
+{
+       struct nfs4_slot *slot;
+
+       slot = nfs4_lookup_slot(tbl, slotid);
+       if (IS_ERR(slot))
+               return PTR_ERR(slot);
+       *seq_nr = slot->seq_nr;
+       return 0;
+}
+
+/*
+ * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
+ *
+ * Given a slot table, slot id and sequence number, determine if the
+ * RPC call in question is still in flight. This function is mainly
+ * intended for use by the callback channel.
+ */
+static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr)
+{
+       u32 cur_seq;
+       bool ret = false;
+
+       spin_lock(&tbl->slot_tbl_lock);
+       if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
+           cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
+               ret = true;
+       spin_unlock(&tbl->slot_tbl_lock);
+       return ret;
+}
+
+/*
+ * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
+ *
+ * Given a slot table, slot id and sequence number, wait until the
+ * corresponding RPC call completes. This function is mainly
+ * intended for use by the callback channel.
+ */
+int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr,
+               unsigned long timeout)
+{
+       if (wait_event_timeout(tbl->slot_waitq,
+                       !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
+                       timeout) == 0)
+               return -ETIMEDOUT;
+       return 0;
+}
+
 /*
  * nfs4_alloc_slot - efficiently look for a free slot
  *
index 5b51298d1d03765684dd9ea79599169c01c71aaa..f703b755351bd8ad2217fb18009e93232f2ccc61 100644 (file)
@@ -21,7 +21,8 @@ struct nfs4_slot {
        unsigned long           generation;
        u32                     slot_nr;
        u32                     seq_nr;
-       unsigned int            interrupted : 1;
+       unsigned int            interrupted : 1,
+                               seq_done : 1;
 };
 
 /* Sessions */
@@ -36,6 +37,7 @@ struct nfs4_slot_table {
        unsigned long   used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
        spinlock_t      slot_tbl_lock;
        struct rpc_wait_queue   slot_tbl_waitq; /* allocators may wait here */
+       wait_queue_head_t       slot_waitq;     /* Completion wait on slot */
        u32             max_slots;              /* # slots in table */
        u32             max_slotid;             /* Max allowed slotid value */
        u32             highest_used_slotid;    /* sent to server on each SEQ.
@@ -78,6 +80,9 @@ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
 extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid);
+extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr,
+               unsigned long timeout);
 extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
index 834b875900d62addf6db7f6eb590ce5c2d1b09bc..cada00aa5096d7dbd57a0b8717d1a6583abbbfa7 100644 (file)
@@ -277,20 +277,17 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
 {
        int status;
        struct nfs_fsinfo fsinfo;
+       unsigned long now;
 
        if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
                nfs4_schedule_state_renewal(clp);
                return 0;
        }
 
+       now = jiffies;
        status = nfs4_proc_get_lease_time(clp, &fsinfo);
        if (status == 0) {
-               /* Update lease time and schedule renewal */
-               spin_lock(&clp->cl_lock);
-               clp->cl_lease_time = fsinfo.lease_time * HZ;
-               clp->cl_last_renewal = jiffies;
-               spin_unlock(&clp->cl_lock);
-
+               nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
                nfs4_schedule_state_renewal(clp);
        }
 
index 70806cae0d36bf71ca704d5d1ab7bc128d1455e9..2c93a85eda51c9d6ebcde892d1e7ca60a1f159b3 100644 (file)
@@ -365,7 +365,8 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
        /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
        atomic_dec(&lo->plh_refcount);
        if (list_empty(&lo->plh_segs)) {
-               set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+               if (atomic_read(&lo->plh_outstanding) == 0)
+                       set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
                clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
        }
        rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
@@ -768,17 +769,32 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
        pnfs_destroy_layouts_byclid(clp, false);
 }
 
+static void
+pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
+{
+       lo->plh_return_iomode = 0;
+       lo->plh_return_seq = 0;
+       clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+}
+
 /* update lo->plh_stateid with new if is more recent */
 void
 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
                        bool update_barrier)
 {
        u32 oldseq, newseq, new_barrier = 0;
-       bool invalid = !pnfs_layout_is_valid(lo);
 
        oldseq = be32_to_cpu(lo->plh_stateid.seqid);
        newseq = be32_to_cpu(new->seqid);
-       if (invalid || pnfs_seqid_is_newer(newseq, oldseq)) {
+
+       if (!pnfs_layout_is_valid(lo)) {
+               nfs4_stateid_copy(&lo->plh_stateid, new);
+               lo->plh_barrier = newseq;
+               pnfs_clear_layoutreturn_info(lo);
+               clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+               return;
+       }
+       if (pnfs_seqid_is_newer(newseq, oldseq)) {
                nfs4_stateid_copy(&lo->plh_stateid, new);
                /*
                 * Because of wraparound, we want to keep the barrier
@@ -790,7 +806,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
                new_barrier = be32_to_cpu(new->seqid);
        else if (new_barrier == 0)
                return;
-       if (invalid || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
+       if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
                lo->plh_barrier = new_barrier;
 }
 
@@ -886,19 +902,14 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
        rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
 }
 
-static void
-pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
-{
-       lo->plh_return_iomode = 0;
-       lo->plh_return_seq = 0;
-       clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
-}
-
 static bool
 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
                nfs4_stateid *stateid,
                enum pnfs_iomode *iomode)
 {
+       /* Serialise LAYOUTGET/LAYOUTRETURN */
+       if (atomic_read(&lo->plh_outstanding) != 0)
+               return false;
        if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
                return false;
        pnfs_get_layout_hdr(lo);
@@ -1555,6 +1566,7 @@ pnfs_update_layout(struct inode *ino,
        }
 
 lookup_again:
+       nfs4_client_recover_expired_lease(clp);
        first = false;
        spin_lock(&ino->i_lock);
        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1797,16 +1809,11 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
                 */
                pnfs_mark_layout_stateid_invalid(lo, &free_me);
 
-               nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
-               lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
+               pnfs_set_layout_stateid(lo, &res->stateid, true);
        }
 
        pnfs_get_lseg(lseg);
        pnfs_layout_insert_lseg(lo, lseg, &free_me);
-       if (!pnfs_layout_is_valid(lo)) {
-               pnfs_clear_layoutreturn_info(lo);
-               clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-       }
 
 
        if (res->return_on_close)
@@ -2510,7 +2517,6 @@ pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
 
        data->args.fh = NFS_FH(inode);
        data->args.inode = inode;
-       nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
        status = ld->prepare_layoutstats(&data->args);
        if (status)
                goto out_free;
index 18d446e1a82bbb5b558fca8deb3869849f91e58c..d39601381adf56fe21cd531d09b77f4960cf5060 100644 (file)
@@ -923,6 +923,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (data) {
+               data->timeo             = NFS_UNSPEC_TIMEO;
+               data->retrans           = NFS_UNSPEC_RETRANS;
                data->acregmin          = NFS_DEF_ACREGMIN;
                data->acregmax          = NFS_DEF_ACREGMAX;
                data->acdirmin          = NFS_DEF_ACDIRMIN;
@@ -1189,6 +1191,19 @@ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
        return rc;
 }
 
+static int nfs_get_option_ul_bound(substring_t args[], unsigned long *option,
+               unsigned long l_bound, unsigned long u_bound)
+{
+       int ret;
+
+       ret = nfs_get_option_ul(args, option);
+       if (ret != 0)
+               return ret;
+       if (*option < l_bound || *option > u_bound)
+               return -ERANGE;
+       return 0;
+}
+
 /*
  * Error-check and convert a string of mount options from user space into
  * a data structure.  The whole mount string is processed; bad options are
@@ -1352,12 +1367,12 @@ static int nfs_parse_mount_options(char *raw,
                        mnt->bsize = option;
                        break;
                case Opt_timeo:
-                       if (nfs_get_option_ul(args, &option) || option == 0)
+                       if (nfs_get_option_ul_bound(args, &option, 1, INT_MAX))
                                goto out_invalid_value;
                        mnt->timeo = option;
                        break;
                case Opt_retrans:
-                       if (nfs_get_option_ul(args, &option) || option == 0)
+                       if (nfs_get_option_ul_bound(args, &option, 0, INT_MAX))
                                goto out_invalid_value;
                        mnt->retrans = option;
                        break;
index 8410ca275db1aecf0a1f8a022b92cdc1597ff258..a204d7e109d4d63a76d01a31198b00f3f1cd09be 100644 (file)
@@ -4903,6 +4903,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        return nfs_ok;
 }
 
+static __be32
+nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
+{
+       struct nfs4_ol_stateid *stp = openlockstateid(s);
+       __be32 ret;
+
+       mutex_lock(&stp->st_mutex);
+
+       ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
+       if (ret)
+               goto out;
+
+       ret = nfserr_locks_held;
+       if (check_for_locks(stp->st_stid.sc_file,
+                           lockowner(stp->st_stateowner)))
+               goto out;
+
+       release_lock_stateid(stp);
+       ret = nfs_ok;
+
+out:
+       mutex_unlock(&stp->st_mutex);
+       nfs4_put_stid(s);
+       return ret;
+}
+
 __be32
 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                   struct nfsd4_free_stateid *free_stateid)
@@ -4910,7 +4936,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        stateid_t *stateid = &free_stateid->fr_stateid;
        struct nfs4_stid *s;
        struct nfs4_delegation *dp;
-       struct nfs4_ol_stateid *stp;
        struct nfs4_client *cl = cstate->session->se_client;
        __be32 ret = nfserr_bad_stateid;
 
@@ -4929,18 +4954,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                ret = nfserr_locks_held;
                break;
        case NFS4_LOCK_STID:
-               ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
-               if (ret)
-                       break;
-               stp = openlockstateid(s);
-               ret = nfserr_locks_held;
-               if (check_for_locks(stp->st_stid.sc_file,
-                                   lockowner(stp->st_stateowner)))
-                       break;
-               WARN_ON(!unhash_lock_stateid(stp));
+               atomic_inc(&s->sc_count);
                spin_unlock(&cl->cl_lock);
-               nfs4_put_stid(s);
-               ret = nfs_ok;
+               ret = nfsd4_free_lock_stateid(stateid, s);
                goto out;
        case NFS4_REVOKED_DELEG_STID:
                dp = delegstateid(s);
@@ -5507,7 +5523,7 @@ static __be32
 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
                            struct nfs4_ol_stateid *ost,
                            struct nfsd4_lock *lock,
-                           struct nfs4_ol_stateid **lst, bool *new)
+                           struct nfs4_ol_stateid **plst, bool *new)
 {
        __be32 status;
        struct nfs4_file *fi = ost->st_stid.sc_file;
@@ -5515,7 +5531,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
        struct nfs4_client *cl = oo->oo_owner.so_client;
        struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
        struct nfs4_lockowner *lo;
+       struct nfs4_ol_stateid *lst;
        unsigned int strhashval;
+       bool hashed;
 
        lo = find_lockowner_str(cl, &lock->lk_new_owner);
        if (!lo) {
@@ -5531,12 +5549,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
                        goto out;
        }
 
-       *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
-       if (*lst == NULL) {
+retry:
+       lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
+       if (lst == NULL) {
                status = nfserr_jukebox;
                goto out;
        }
+
+       mutex_lock(&lst->st_mutex);
+
+       /* See if it's still hashed to avoid race with FREE_STATEID */
+       spin_lock(&cl->cl_lock);
+       hashed = !list_empty(&lst->st_perfile);
+       spin_unlock(&cl->cl_lock);
+
+       if (!hashed) {
+               mutex_unlock(&lst->st_mutex);
+               nfs4_put_stid(&lst->st_stid);
+               goto retry;
+       }
        status = nfs_ok;
+       *plst = lst;
 out:
        nfs4_put_stateowner(&lo->lo_owner);
        return status;
@@ -5603,8 +5636,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        goto out;
                status = lookup_or_create_lock_state(cstate, open_stp, lock,
                                                        &lock_stp, &new);
-               if (status == nfs_ok)
-                       mutex_lock(&lock_stp->st_mutex);
        } else {
                status = nfs4_preprocess_seqid_op(cstate,
                                       lock->lk_old_lock_seqid,
index ba944123167b92f3a7460d8acc66b02dc7c53575..ff476e654b8f8044b84808b1c92c54055e4ca393 100644 (file)
@@ -1252,10 +1252,13 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
        if (IS_ERR(dchild))
                return nfserrno(host_err);
        err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
-       if (err) {
-               dput(dchild);
+       /*
+        * We unconditionally drop our ref to dchild as fh_compose will have
+        * already grabbed its own ref for it.
+        */
+       dput(dchild);
+       if (err)
                return err;
-       }
        return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type,
                                        rdev, resfhp);
 }
index d2f97ecca6a5dfe6091d56da09871449574524bf..e0e5f7c3c99fe076d11dc5d907b543d5e5376671 100644 (file)
@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
 
        pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
-       wait_event(group->fanotify_data.access_waitq, event->response ||
-                               atomic_read(&group->fanotify_data.bypass_perm));
-
-       if (!event->response) { /* bypass_perm set */
-               /*
-                * Event was canceled because group is being destroyed. Remove
-                * it from group's event list because we are responsible for
-                * freeing the permission event.
-                */
-               fsnotify_remove_event(group, &event->fae.fse);
-               return 0;
-       }
+       wait_event(group->fanotify_data.access_waitq, event->response);
 
        /* userspace responded, convert to something usable */
        switch (event->response) {
index 8e8e6bcd1d43d266346bac16dbb12ff8c893bae2..a64313868d3a15cefca72b5e228e798d98a43a1e 100644 (file)
@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
        struct fanotify_perm_event_info *event, *next;
+       struct fsnotify_event *fsn_event;
 
        /*
-        * There may be still new events arriving in the notification queue
-        * but since userspace cannot use fanotify fd anymore, no event can
-        * enter or leave access_list by now.
+        * Stop new events from arriving in the notification queue. since
+        * userspace cannot use fanotify fd anymore, no event can enter or
+        * leave access_list by now either.
         */
-       spin_lock(&group->fanotify_data.access_lock);
-
-       atomic_inc(&group->fanotify_data.bypass_perm);
+       fsnotify_group_stop_queueing(group);
 
+       /*
+        * Process all permission events on access_list and notification queue
+        * and simulate reply from userspace.
+        */
+       spin_lock(&group->fanotify_data.access_lock);
        list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
                                 fae.fse.list) {
                pr_debug("%s: found group=%p event=%p\n", __func__, group,
@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
        spin_unlock(&group->fanotify_data.access_lock);
 
        /*
-        * Since bypass_perm is set, newly queued events will not wait for
-        * access response. Wake up the already sleeping ones now.
-        * synchronize_srcu() in fsnotify_destroy_group() will wait for all
-        * processes sleeping in fanotify_handle_event() waiting for access
-        * response and thus also for all permission events to be freed.
+        * Destroy all non-permission events. For permission events just
+        * dequeue them and set the response. They will be freed once the
+        * response is consumed and fanotify_get_response() returns.
         */
+       mutex_lock(&group->notification_mutex);
+       while (!fsnotify_notify_queue_is_empty(group)) {
+               fsn_event = fsnotify_remove_first_event(group);
+               if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
+                       fsnotify_destroy_event(group, fsn_event);
+               else
+                       FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
+       }
+       mutex_unlock(&group->notification_mutex);
+
+       /* Response for all permission events it set, wakeup waiters */
        wake_up(&group->fanotify_data.access_waitq);
 #endif
 
@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
        spin_lock_init(&group->fanotify_data.access_lock);
        init_waitqueue_head(&group->fanotify_data.access_waitq);
        INIT_LIST_HEAD(&group->fanotify_data.access_list);
-       atomic_set(&group->fanotify_data.bypass_perm, 0);
 #endif
        switch (flags & FAN_ALL_CLASS_BITS) {
        case FAN_CLASS_NOTIF:
index 3e2dd85be5dd375a51af031566a381f20e46b21f..b47f7cfdcaa456f6da08fad1324873496179ec07 100644 (file)
@@ -39,6 +39,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
        kfree(group);
 }
 
+/*
+ * Stop queueing new events for this group. Once this function returns
+ * fsnotify_add_event() will not add any new events to the group's queue.
+ */
+void fsnotify_group_stop_queueing(struct fsnotify_group *group)
+{
+       mutex_lock(&group->notification_mutex);
+       group->shutdown = true;
+       mutex_unlock(&group->notification_mutex);
+}
+
 /*
  * Trying to get rid of a group. Remove all marks, flush all events and release
  * the group reference.
@@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
  */
 void fsnotify_destroy_group(struct fsnotify_group *group)
 {
+       /*
+        * Stop queueing new events. The code below is careful enough to not
+        * require this but fanotify needs to stop queuing events even before
+        * fsnotify_destroy_group() is called and this makes the other callers
+        * of fsnotify_destroy_group() to see the same behavior.
+        */
+       fsnotify_group_stop_queueing(group);
+
        /* clear all inode marks for this group, attach them to destroy_list */
        fsnotify_detach_group_marks(group);
 
index a95d8e037aebe24ba36421861d3abaad6f7dfc89..e455e83ceeebc9ea5cb0b3166e10bd505cec43f1 100644 (file)
@@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
  * Add an event to the group notification queue.  The group can later pull this
  * event off the queue to deal with.  The function returns 0 if the event was
  * added to the queue, 1 if the event was merged with some other queued event,
- * 2 if the queue of events has overflown.
+ * 2 if the event was not queued - either the queue of events has overflown
+ * or the group is shutting down.
  */
 int fsnotify_add_event(struct fsnotify_group *group,
                       struct fsnotify_event *event,
@@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
 
        mutex_lock(&group->notification_mutex);
 
+       if (group->shutdown) {
+               mutex_unlock(&group->notification_mutex);
+               return 2;
+       }
+
        if (group->q_len >= group->max_events) {
                ret = 2;
                /* Queue overflow event only if it isn't already queued */
@@ -125,21 +131,6 @@ queue:
        return ret;
 }
 
-/*
- * Remove @event from group's notification queue. It is the responsibility of
- * the caller to destroy the event.
- */
-void fsnotify_remove_event(struct fsnotify_group *group,
-                          struct fsnotify_event *event)
-{
-       mutex_lock(&group->notification_mutex);
-       if (!list_empty(&event->list)) {
-               list_del_init(&event->list);
-               group->q_len--;
-       }
-       mutex_unlock(&group->notification_mutex);
-}
-
 /*
  * Remove and return the first event from the notification list.  It is the
  * responsibility of the caller to destroy the obtained event
index 7dabbc31060e466e0959f5a8ec09b0028662cd6b..f165f867f3326f1c0dce98c4e1d031308fb65c51 100644 (file)
@@ -5922,7 +5922,6 @@ bail:
 }
 
 static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
-                                        handle_t *handle,
                                         struct inode *data_alloc_inode,
                                         struct buffer_head *data_alloc_bh)
 {
@@ -5935,11 +5934,19 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
        struct ocfs2_truncate_log *tl;
        struct inode *tl_inode = osb->osb_tl_inode;
        struct buffer_head *tl_bh = osb->osb_tl_bh;
+       handle_t *handle;
 
        di = (struct ocfs2_dinode *) tl_bh->b_data;
        tl = &di->id2.i_dealloc;
        i = le16_to_cpu(tl->tl_used) - 1;
        while (i >= 0) {
+               handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
+               if (IS_ERR(handle)) {
+                       status = PTR_ERR(handle);
+                       mlog_errno(status);
+                       goto bail;
+               }
+
                /* Caller has given us at least enough credits to
                 * update the truncate log dinode */
                status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
@@ -5974,12 +5981,7 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
                        }
                }
 
-               status = ocfs2_extend_trans(handle,
-                               OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
-               if (status < 0) {
-                       mlog_errno(status);
-                       goto bail;
-               }
+               ocfs2_commit_trans(osb, handle);
                i--;
        }
 
@@ -5994,7 +5996,6 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
 {
        int status;
        unsigned int num_to_flush;
-       handle_t *handle;
        struct inode *tl_inode = osb->osb_tl_inode;
        struct inode *data_alloc_inode = NULL;
        struct buffer_head *tl_bh = osb->osb_tl_bh;
@@ -6038,21 +6039,11 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
                goto out_mutex;
        }
 
-       handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
-       if (IS_ERR(handle)) {
-               status = PTR_ERR(handle);
-               mlog_errno(status);
-               goto out_unlock;
-       }
-
-       status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
+       status = ocfs2_replay_truncate_records(osb, data_alloc_inode,
                                               data_alloc_bh);
        if (status < 0)
                mlog_errno(status);
 
-       ocfs2_commit_trans(osb, handle);
-
-out_unlock:
        brelse(data_alloc_bh);
        ocfs2_inode_unlock(data_alloc_inode, 1);
 
@@ -6413,43 +6404,34 @@ static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
                goto out_mutex;
        }
 
-       handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               mlog_errno(ret);
-               goto out_unlock;
-       }
-
        while (head) {
                if (head->free_bg)
                        bg_blkno = head->free_bg;
                else
                        bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
                                                              head->free_bit);
+               handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
+               if (IS_ERR(handle)) {
+                       ret = PTR_ERR(handle);
+                       mlog_errno(ret);
+                       goto out_unlock;
+               }
+
                trace_ocfs2_free_cached_blocks(
                     (unsigned long long)head->free_blk, head->free_bit);
 
                ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
                                               head->free_bit, bg_blkno, 1);
-               if (ret) {
+               if (ret)
                        mlog_errno(ret);
-                       goto out_journal;
-               }
 
-               ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE);
-               if (ret) {
-                       mlog_errno(ret);
-                       goto out_journal;
-               }
+               ocfs2_commit_trans(osb, handle);
 
                tmp = head;
                head = head->free_next;
                kfree(tmp);
        }
 
-out_journal:
-       ocfs2_commit_trans(osb, handle);
-
 out_unlock:
        ocfs2_inode_unlock(inode, 1);
        brelse(di_bh);
index 94b18369b1cc58bb620eb4b69a77542515dd4fc0..b95e7df5b76ac750dfb70f3b1ff268876585cd48 100644 (file)
@@ -44,9 +44,6 @@
  * version here in tcp_internal.h should not need to be bumped for
  * filesystem locking changes.
  *
- * New in version 12
- *     - Negotiate hb timeout when storage is down.
- *
  * New in version 11
  *     - Negotiation of filesystem locking in the dlm join.
  *
@@ -78,7 +75,7 @@
  *     - full 64 bit i_size in the metadata lock lvbs
  *     - introduction of "rw" lock and pushing meta/data locking down
  */
-#define O2NET_PROTOCOL_VERSION 12ULL
+#define O2NET_PROTOCOL_VERSION 11ULL
 struct o2net_handshake {
        __be64  protocol_version;
        __be64  connector_id;
index cdeafb4e7ed60d496200cd8a78806c874e9d8c62..0bb128659d4b0b3b0be717776b4ab6bdb06b6a39 100644 (file)
@@ -268,7 +268,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
                                  struct dlm_lock *lock, int flags, int type)
 {
        enum dlm_status status;
-       u8 old_owner = res->owner;
 
        mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
             lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
@@ -335,7 +334,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
 
        spin_lock(&res->spinlock);
        res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
-       lock->convert_pending = 0;
        /* if it failed, move it back to granted queue.
         * if master returns DLM_NORMAL and then down before sending ast,
         * it may have already been moved to granted queue, reset to
@@ -344,12 +342,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
                if (status != DLM_NOTQUEUED)
                        dlm_error(status);
                dlm_revert_pending_convert(res, lock);
-       } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
-                       (old_owner != res->owner)) {
-               mlog(0, "res %.*s is in recovering or has been recovered.\n",
-                               res->lockname.len, res->lockname.name);
+       } else if (!lock->convert_pending) {
+               mlog(0, "%s: res %.*s, owner died and lock has been moved back "
+                               "to granted list, retry convert.\n",
+                               dlm->name, res->lockname.len, res->lockname.name);
                status = DLM_RECOVERING;
        }
+
+       lock->convert_pending = 0;
 bail:
        spin_unlock(&res->spinlock);
 
index 4e7b0dc22450564cd86064061d7d84e8461f2bbf..0b055bfb8e866ddf2c4bf874ff653694081a5dec 100644 (file)
@@ -1506,7 +1506,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
                                       u64 start, u64 len)
 {
        int ret = 0;
-       u64 tmpend, end = start + len;
+       u64 tmpend = 0;
+       u64 end = start + len;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        unsigned int csize = osb->s_clustersize;
        handle_t *handle;
@@ -1538,18 +1539,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
        }
 
        /*
-        * We want to get the byte offset of the end of the 1st cluster.
+        * If start is on a cluster boundary and end is somewhere in another
+        * cluster, we have not COWed the cluster starting at start, unless
+        * end is also within the same cluster. So, in this case, we skip this
+        * first call to ocfs2_zero_range_for_truncate() truncate and move on
+        * to the next one.
         */
-       tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
-       if (tmpend > end)
-               tmpend = end;
+       if ((start & (csize - 1)) != 0) {
+               /*
+                * We want to get the byte offset of the end of the 1st
+                * cluster.
+                */
+               tmpend = (u64)osb->s_clustersize +
+                       (start & ~(osb->s_clustersize - 1));
+               if (tmpend > end)
+                       tmpend = end;
 
-       trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
-                                                (unsigned long long)tmpend);
+               trace_ocfs2_zero_partial_clusters_range1(
+                       (unsigned long long)start,
+                       (unsigned long long)tmpend);
 
-       ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
-       if (ret)
-               mlog_errno(ret);
+               ret = ocfs2_zero_range_for_truncate(inode, handle, start,
+                                                   tmpend);
+               if (ret)
+                       mlog_errno(ret);
+       }
 
        if (tmpend < end) {
                /*
index ea47120a85ff246c094dc95b9aa5dd6810a92b91..6ad3533940ba5e9fa32bfe83d3c1acc19b14513a 100644 (file)
@@ -1199,14 +1199,24 @@ retry:
                        inode_unlock((*ac)->ac_inode);
 
                        ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted);
-                       if (ret == 1)
+                       if (ret == 1) {
+                               iput((*ac)->ac_inode);
+                               (*ac)->ac_inode = NULL;
                                goto retry;
+                       }
 
                        if (ret < 0)
                                mlog_errno(ret);
 
                        inode_lock((*ac)->ac_inode);
-                       ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
+                       ret = ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
+                       if (ret < 0) {
+                               mlog_errno(ret);
+                               inode_unlock((*ac)->ac_inode);
+                               iput((*ac)->ac_inode);
+                               (*ac)->ac_inode = NULL;
+                               goto bail;
+                       }
                }
                if (status < 0) {
                        if (status != -ENOSPC)
index 54e5d6681786780812c9a5adddc27dd782b40bbb..43fdc2765aea65b85a20b0f28b844a3ba1b40d4e 100644 (file)
@@ -80,6 +80,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
        }
 
        for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+               if (ovl_is_private_xattr(name))
+                       continue;
 retry:
                size = vfs_getxattr(old, name, value, value_size);
                if (size == -ERANGE)
index 12bcd07b9e32c516f89605a7247da493d3ccedad..1560fdc09a5fabf12ed678a3e87e78bb3e43cce2 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/xattr.h>
 #include <linux/security.h>
 #include <linux/cred.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
 #include "overlayfs.h"
 
 void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
@@ -186,6 +188,9 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
        struct dentry *newdentry;
        int err;
 
+       if (!hardlink && !IS_POSIXACL(udir))
+               stat->mode &= ~current_umask();
+
        inode_lock_nested(udir, I_MUTEX_PARENT);
        newdentry = lookup_one_len(dentry->d_name.name, upperdir,
                                   dentry->d_name.len);
@@ -335,6 +340,32 @@ out_free:
        return ret;
 }
 
+static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
+                            const struct posix_acl *acl)
+{
+       void *buffer;
+       size_t size;
+       int err;
+
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl)
+               return 0;
+
+       size = posix_acl_to_xattr(NULL, acl, NULL, 0);
+       buffer = kmalloc(size, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+       err = size;
+       if (err < 0)
+               goto out_free;
+
+       err = vfs_setxattr(upperdentry, name, buffer, size, XATTR_CREATE);
+out_free:
+       kfree(buffer);
+       return err;
+}
+
 static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                                    struct kstat *stat, const char *link,
                                    struct dentry *hardlink)
@@ -346,10 +377,18 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        struct dentry *upper;
        struct dentry *newdentry;
        int err;
+       struct posix_acl *acl, *default_acl;
 
        if (WARN_ON(!workdir))
                return -EROFS;
 
+       if (!hardlink) {
+               err = posix_acl_create(dentry->d_parent->d_inode,
+                                      &stat->mode, &default_acl, &acl);
+               if (err)
+                       return err;
+       }
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -384,6 +423,17 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                if (err)
                        goto out_cleanup;
        }
+       if (!hardlink) {
+               err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_ACCESS,
+                                       acl);
+               if (err)
+                       goto out_cleanup;
+
+               err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_DEFAULT,
+                                       default_acl);
+               if (err)
+                       goto out_cleanup;
+       }
 
        if (!hardlink && S_ISDIR(stat->mode)) {
                err = ovl_set_opaque(newdentry);
@@ -410,6 +460,10 @@ out_dput:
 out_unlock:
        unlock_rename(workdir, upperdir);
 out:
+       if (!hardlink) {
+               posix_acl_release(acl);
+               posix_acl_release(default_acl);
+       }
        return err;
 
 out_cleanup:
@@ -950,9 +1004,9 @@ const struct inode_operations ovl_dir_inode_operations = {
        .permission     = ovl_permission,
        .getattr        = ovl_dir_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .get_acl        = ovl_get_acl,
        .update_time    = ovl_update_time,
 };
index 1b885c156028d3a4209cf51b6898006c28ac0ee6..c75625c1efa36fafdd7d077b86f874cd6a16b7e7 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
+#include <linux/posix_acl.h>
 #include "overlayfs.h"
 
 static int ovl_copy_up_truncate(struct dentry *dentry)
@@ -191,32 +192,44 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
        return err;
 }
 
-static bool ovl_is_private_xattr(const char *name)
+bool ovl_is_private_xattr(const char *name)
 {
-#define OVL_XATTR_PRE_NAME OVL_XATTR_PREFIX "."
-       return strncmp(name, OVL_XATTR_PRE_NAME,
-                      sizeof(OVL_XATTR_PRE_NAME) - 1) == 0;
+       return strncmp(name, OVL_XATTR_PREFIX,
+                      sizeof(OVL_XATTR_PREFIX) - 1) == 0;
 }
 
-int ovl_setxattr(struct dentry *dentry, struct inode *inode,
-                const char *name, const void *value,
-                size_t size, int flags)
+int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
+                 size_t size, int flags)
 {
        int err;
-       struct dentry *upperdentry;
+       struct path realpath;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
        const struct cred *old_cred;
 
        err = ovl_want_write(dentry);
        if (err)
                goto out;
 
+       if (!value && !OVL_TYPE_UPPER(type)) {
+               err = vfs_getxattr(realpath.dentry, name, NULL, 0);
+               if (err < 0)
+                       goto out_drop_write;
+       }
+
        err = ovl_copy_up(dentry);
        if (err)
                goto out_drop_write;
 
-       upperdentry = ovl_dentry_upper(dentry);
+       if (!OVL_TYPE_UPPER(type))
+               ovl_path_upper(dentry, &realpath);
+
        old_cred = ovl_override_creds(dentry->d_sb);
-       err = vfs_setxattr(upperdentry, name, value, size, flags);
+       if (value)
+               err = vfs_setxattr(realpath.dentry, name, value, size, flags);
+       else {
+               WARN_ON(flags != XATTR_REPLACE);
+               err = vfs_removexattr(realpath.dentry, name);
+       }
        revert_creds(old_cred);
 
 out_drop_write:
@@ -225,16 +238,13 @@ out:
        return err;
 }
 
-ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
-                    const char *name, void *value, size_t size)
+int ovl_xattr_get(struct dentry *dentry, const char *name,
+                 void *value, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
        const struct cred *old_cred;
 
-       if (ovl_is_private_xattr(name))
-               return -ENODATA;
-
        old_cred = ovl_override_creds(dentry->d_sb);
        res = vfs_getxattr(realdentry, name, value, size);
        revert_creds(old_cred);
@@ -245,7 +255,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
-       int off;
+       size_t len;
+       char *s;
        const struct cred *old_cred;
 
        old_cred = ovl_override_creds(dentry->d_sb);
@@ -255,73 +266,39 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
                return res;
 
        /* filter out private xattrs */
-       for (off = 0; off < res;) {
-               char *s = list + off;
-               size_t slen = strlen(s) + 1;
+       for (s = list, len = res; len;) {
+               size_t slen = strnlen(s, len) + 1;
 
-               BUG_ON(off + slen > res);
+               /* underlying fs providing us with an broken xattr list? */
+               if (WARN_ON(slen > len))
+                       return -EIO;
 
+               len -= slen;
                if (ovl_is_private_xattr(s)) {
                        res -= slen;
-                       memmove(s, s + slen, res - off);
+                       memmove(s, s + slen, len);
                } else {
-                       off += slen;
+                       s += slen;
                }
        }
 
        return res;
 }
 
-int ovl_removexattr(struct dentry *dentry, const char *name)
-{
-       int err;
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
-       const struct cred *old_cred;
-
-       err = ovl_want_write(dentry);
-       if (err)
-               goto out;
-
-       err = -ENODATA;
-       if (ovl_is_private_xattr(name))
-               goto out_drop_write;
-
-       if (!OVL_TYPE_UPPER(type)) {
-               err = vfs_getxattr(realpath.dentry, name, NULL, 0);
-               if (err < 0)
-                       goto out_drop_write;
-
-               err = ovl_copy_up(dentry);
-               if (err)
-                       goto out_drop_write;
-
-               ovl_path_upper(dentry, &realpath);
-       }
-
-       old_cred = ovl_override_creds(dentry->d_sb);
-       err = vfs_removexattr(realpath.dentry, name);
-       revert_creds(old_cred);
-out_drop_write:
-       ovl_drop_write(dentry);
-out:
-       return err;
-}
-
 struct posix_acl *ovl_get_acl(struct inode *inode, int type)
 {
        struct inode *realinode = ovl_inode_real(inode, NULL);
        const struct cred *old_cred;
        struct posix_acl *acl;
 
-       if (!IS_POSIXACL(realinode))
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
                return NULL;
 
        if (!realinode->i_op->get_acl)
                return NULL;
 
        old_cred = ovl_override_creds(inode->i_sb);
-       acl = realinode->i_op->get_acl(realinode, type);
+       acl = get_acl(realinode, type);
        revert_creds(old_cred);
 
        return acl;
@@ -391,9 +368,9 @@ static const struct inode_operations ovl_file_inode_operations = {
        .permission     = ovl_permission,
        .getattr        = ovl_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .get_acl        = ovl_get_acl,
        .update_time    = ovl_update_time,
 };
@@ -404,9 +381,9 @@ static const struct inode_operations ovl_symlink_inode_operations = {
        .readlink       = ovl_readlink,
        .getattr        = ovl_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .update_time    = ovl_update_time,
 };
 
@@ -415,6 +392,9 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode)
        inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_flags |= S_NOCMTIME;
+#ifdef CONFIG_FS_POSIX_ACL
+       inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
+#endif
 
        mode &= S_IFMT;
        switch (mode) {
index e4f5c9536bfeaf1346ee908dc8081ee42f726fc1..5813ccff8cd9f47d4a92c9a1c422fa984e3d5976 100644 (file)
@@ -24,8 +24,8 @@ enum ovl_path_type {
        (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
 
 
-#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay"
-#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX ".opaque"
+#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay."
+#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
 
 #define OVL_ISUPPER_MASK 1UL
 
@@ -179,20 +179,21 @@ int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
 void ovl_cache_free(struct list_head *list);
 int ovl_check_d_type_supported(struct path *realpath);
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+                        struct dentry *dentry, int level);
 
 /* inode.c */
 int ovl_setattr(struct dentry *dentry, struct iattr *attr);
 int ovl_permission(struct inode *inode, int mask);
-int ovl_setxattr(struct dentry *dentry, struct inode *inode,
-                const char *name, const void *value,
-                size_t size, int flags);
-ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
-                    const char *name, void *value, size_t size);
+int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
+                 size_t size, int flags);
+int ovl_xattr_get(struct dentry *dentry, const char *name,
+                 void *value, size_t size);
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
-int ovl_removexattr(struct dentry *dentry, const char *name);
 struct posix_acl *ovl_get_acl(struct inode *inode, int type);
 int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
 int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
+bool ovl_is_private_xattr(const char *name);
 
 struct inode *ovl_new_inode(struct super_block *sb, umode_t mode);
 struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode);
index cf37fc76fc9fc02f00caf5a64bd632f20942a528..f241b4ee3d8a50b1c03ae4b1e1848b772fd26af1 100644 (file)
@@ -248,7 +248,7 @@ static inline int ovl_dir_read(struct path *realpath,
                        err = rdd->err;
        } while (!err && rdd->count);
 
-       if (!err && rdd->first_maybe_whiteout)
+       if (!err && rdd->first_maybe_whiteout && rdd->dentry)
                err = ovl_check_whiteouts(realpath->dentry, rdd);
 
        fput(realfile);
@@ -606,3 +606,64 @@ int ovl_check_d_type_supported(struct path *realpath)
 
        return rdd.d_type_supported;
 }
+
+static void ovl_workdir_cleanup_recurse(struct path *path, int level)
+{
+       int err;
+       struct inode *dir = path->dentry->d_inode;
+       LIST_HEAD(list);
+       struct ovl_cache_entry *p;
+       struct ovl_readdir_data rdd = {
+               .ctx.actor = ovl_fill_merge,
+               .dentry = NULL,
+               .list = &list,
+               .root = RB_ROOT,
+               .is_lowest = false,
+       };
+
+       err = ovl_dir_read(path, &rdd);
+       if (err)
+               goto out;
+
+       inode_lock_nested(dir, I_MUTEX_PARENT);
+       list_for_each_entry(p, &list, l_node) {
+               struct dentry *dentry;
+
+               if (p->name[0] == '.') {
+                       if (p->len == 1)
+                               continue;
+                       if (p->len == 2 && p->name[1] == '.')
+                               continue;
+               }
+               dentry = lookup_one_len(p->name, path->dentry, p->len);
+               if (IS_ERR(dentry))
+                       continue;
+               if (dentry->d_inode)
+                       ovl_workdir_cleanup(dir, path->mnt, dentry, level);
+               dput(dentry);
+       }
+       inode_unlock(dir);
+out:
+       ovl_cache_free(&list);
+}
+
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+                        struct dentry *dentry, int level)
+{
+       int err;
+
+       if (!d_is_dir(dentry) || level > 1) {
+               ovl_cleanup(dir, dentry);
+               return;
+       }
+
+       err = ovl_do_rmdir(dir, dentry);
+       if (err) {
+               struct path path = { .mnt = mnt, .dentry = dentry };
+
+               inode_unlock(dir);
+               ovl_workdir_cleanup_recurse(&path, level + 1);
+               inode_lock_nested(dir, I_MUTEX_PARENT);
+               ovl_cleanup(dir, dentry);
+       }
+}
index 4036132842b534934020dd0640b721d81320cee3..e2a94a26767ba5df543540c8da39e85977aa371c 100644 (file)
@@ -814,6 +814,10 @@ retry:
                struct kstat stat = {
                        .mode = S_IFDIR | 0,
                };
+               struct iattr attr = {
+                       .ia_valid = ATTR_MODE,
+                       .ia_mode = stat.mode,
+               };
 
                if (work->d_inode) {
                        err = -EEXIST;
@@ -821,7 +825,7 @@ retry:
                                goto out_dput;
 
                        retried = true;
-                       ovl_cleanup(dir, work);
+                       ovl_workdir_cleanup(dir, mnt, work, 0);
                        dput(work);
                        goto retry;
                }
@@ -829,6 +833,21 @@ retry:
                err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
                if (err)
                        goto out_dput;
+
+               err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
+               if (err && err != -ENODATA && err != -EOPNOTSUPP)
+                       goto out_dput;
+
+               err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
+               if (err && err != -ENODATA && err != -EOPNOTSUPP)
+                       goto out_dput;
+
+               /* Clear any inherited mode bits */
+               inode_lock(work->d_inode);
+               err = notify_change(work, &attr, NULL);
+               inode_unlock(work->d_inode);
+               if (err)
+                       goto out_dput;
        }
 out_unlock:
        inode_unlock(dir);
@@ -967,10 +986,19 @@ static unsigned int ovl_split_lowerdirs(char *str)
        return ctr;
 }
 
-static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
-                                  struct dentry *dentry, struct inode *inode,
-                                  const char *name, const void *value,
-                                  size_t size, int flags)
+static int __maybe_unused
+ovl_posix_acl_xattr_get(const struct xattr_handler *handler,
+                       struct dentry *dentry, struct inode *inode,
+                       const char *name, void *buffer, size_t size)
+{
+       return ovl_xattr_get(dentry, handler->name, buffer, size);
+}
+
+static int __maybe_unused
+ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
+                       struct dentry *dentry, struct inode *inode,
+                       const char *name, const void *value,
+                       size_t size, int flags)
 {
        struct dentry *workdir = ovl_workdir(dentry);
        struct inode *realinode = ovl_inode_real(inode, NULL);
@@ -998,19 +1026,22 @@ static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
 
        posix_acl_release(acl);
 
-       return ovl_setxattr(dentry, inode, handler->name, value, size, flags);
+       err = ovl_xattr_set(dentry, handler->name, value, size, flags);
+       if (!err)
+               ovl_copyattr(ovl_inode_real(inode, NULL), inode);
+
+       return err;
 
 out_acl_release:
        posix_acl_release(acl);
        return err;
 }
 
-static int ovl_other_xattr_set(const struct xattr_handler *handler,
-                              struct dentry *dentry, struct inode *inode,
-                              const char *name, const void *value,
-                              size_t size, int flags)
+static int ovl_own_xattr_get(const struct xattr_handler *handler,
+                            struct dentry *dentry, struct inode *inode,
+                            const char *name, void *buffer, size_t size)
 {
-       return ovl_setxattr(dentry, inode, name, value, size, flags);
+       return -EPERM;
 }
 
 static int ovl_own_xattr_set(const struct xattr_handler *handler,
@@ -1021,42 +1052,59 @@ static int ovl_own_xattr_set(const struct xattr_handler *handler,
        return -EPERM;
 }
 
-static const struct xattr_handler ovl_posix_acl_access_xattr_handler = {
+static int ovl_other_xattr_get(const struct xattr_handler *handler,
+                              struct dentry *dentry, struct inode *inode,
+                              const char *name, void *buffer, size_t size)
+{
+       return ovl_xattr_get(dentry, name, buffer, size);
+}
+
+static int ovl_other_xattr_set(const struct xattr_handler *handler,
+                              struct dentry *dentry, struct inode *inode,
+                              const char *name, const void *value,
+                              size_t size, int flags)
+{
+       return ovl_xattr_set(dentry, name, value, size, flags);
+}
+
+static const struct xattr_handler __maybe_unused
+ovl_posix_acl_access_xattr_handler = {
        .name = XATTR_NAME_POSIX_ACL_ACCESS,
        .flags = ACL_TYPE_ACCESS,
+       .get = ovl_posix_acl_xattr_get,
        .set = ovl_posix_acl_xattr_set,
 };
 
-static const struct xattr_handler ovl_posix_acl_default_xattr_handler = {
+static const struct xattr_handler __maybe_unused
+ovl_posix_acl_default_xattr_handler = {
        .name = XATTR_NAME_POSIX_ACL_DEFAULT,
        .flags = ACL_TYPE_DEFAULT,
+       .get = ovl_posix_acl_xattr_get,
        .set = ovl_posix_acl_xattr_set,
 };
 
 static const struct xattr_handler ovl_own_xattr_handler = {
        .prefix = OVL_XATTR_PREFIX,
+       .get = ovl_own_xattr_get,
        .set = ovl_own_xattr_set,
 };
 
 static const struct xattr_handler ovl_other_xattr_handler = {
        .prefix = "", /* catch all */
+       .get = ovl_other_xattr_get,
        .set = ovl_other_xattr_set,
 };
 
 static const struct xattr_handler *ovl_xattr_handlers[] = {
+#ifdef CONFIG_FS_POSIX_ACL
        &ovl_posix_acl_access_xattr_handler,
        &ovl_posix_acl_default_xattr_handler,
+#endif
        &ovl_own_xattr_handler,
        &ovl_other_xattr_handler,
        NULL
 };
 
-static const struct xattr_handler *ovl_xattr_noacl_handlers[] = {
-       &ovl_own_xattr_handler,
-       &ovl_other_xattr_handler,
-       NULL,
-};
-
 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct path upperpath = { NULL, NULL };
@@ -1132,7 +1180,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        err = -EINVAL;
        stacklen = ovl_split_lowerdirs(lowertmp);
        if (stacklen > OVL_MAX_STACK) {
-               pr_err("overlayfs: too many lower directries, limit is %d\n",
+               pr_err("overlayfs: too many lower directories, limit is %d\n",
                       OVL_MAX_STACK);
                goto out_free_lowertmp;
        } else if (!ufs->config.upperdir && stacklen == 1) {
@@ -1269,10 +1317,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic = OVERLAYFS_SUPER_MAGIC;
        sb->s_op = &ovl_super_operations;
-       if (IS_ENABLED(CONFIG_FS_POSIX_ACL))
-               sb->s_xattr = ovl_xattr_handlers;
-       else
-               sb->s_xattr = ovl_xattr_noacl_handlers;
+       sb->s_xattr = ovl_xattr_handlers;
        sb->s_root = root_dentry;
        sb->s_fs_info = ufs;
        sb->s_flags |= MS_POSIXACL;
index 4b32928f542661f5e04730ca8c8cb134e59ea531..4ebe6b2e5217c2e26c7185bcf4254d8af3b55872 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -144,10 +144,8 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
        struct page *page = buf->page;
 
        if (page_count(page) == 1) {
-               if (memcg_kmem_enabled()) {
+               if (memcg_kmem_enabled())
                        memcg_kmem_uncharge(page, 0);
-                       __ClearPageKmemcg(page);
-               }
                __SetPageLocked(page);
                return 0;
        }
index 54e270262979b6f331a31b132171c0b74b92b920..ac0df4dde823866b54480c7062d5188887009365 100644 (file)
@@ -1556,18 +1556,13 @@ static const struct file_operations proc_pid_set_comm_operations = {
 static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
 {
        struct task_struct *task;
-       struct mm_struct *mm;
        struct file *exe_file;
 
        task = get_proc_task(d_inode(dentry));
        if (!task)
                return -ENOENT;
-       mm = get_task_mm(task);
+       exe_file = get_task_exe_file(task);
        put_task_struct(task);
-       if (!mm)
-               return -ENOENT;
-       exe_file = get_mm_exe_file(mm);
-       mmput(mm);
        if (exe_file) {
                *exe_path = exe_file->f_path;
                path_get(&exe_file->f_path);
index a939f5ed7f89ccb39673fef11beb542e5bef8159..5c89a07e3d7f540b48757b907557e5f5df300ff2 100644 (file)
@@ -430,6 +430,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
 static ssize_t
 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 {
+       char *buf = file->private_data;
        ssize_t acc = 0;
        size_t size, tsz;
        size_t elf_buflen;
@@ -500,23 +501,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                        if (clear_user(buffer, tsz))
                                return -EFAULT;
                } else if (is_vmalloc_or_module_addr((void *)start)) {
-                       char * elf_buf;
-
-                       elf_buf = kzalloc(tsz, GFP_KERNEL);
-                       if (!elf_buf)
-                               return -ENOMEM;
-                       vread(elf_buf, (char *)start, tsz);
+                       vread(buf, (char *)start, tsz);
                        /* we have to zero-fill user buffer even if no read */
-                       if (copy_to_user(buffer, elf_buf, tsz)) {
-                               kfree(elf_buf);
+                       if (copy_to_user(buffer, buf, tsz))
                                return -EFAULT;
-                       }
-                       kfree(elf_buf);
                } else {
                        if (kern_addr_valid(start)) {
                                unsigned long n;
 
-                               n = copy_to_user(buffer, (char *)start, tsz);
+                               /*
+                                * Using bounce buffer to bypass the
+                                * hardened user copy kernel text checks.
+                                */
+                               memcpy(buf, (char *) start, tsz);
+                               n = copy_to_user(buffer, buf, tsz);
                                /*
                                 * We cannot distinguish between fault on source
                                 * and fault on destination. When this happens
@@ -549,6 +547,11 @@ static int open_kcore(struct inode *inode, struct file *filp)
 {
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
+
+       filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!filp->private_data)
+               return -ENOMEM;
+
        if (kcore_need_update)
                kcore_update_ram();
        if (i_size_read(inode) != proc_root_kcore->size) {
@@ -559,10 +562,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static int release_kcore(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
 
 static const struct file_operations proc_kcore_operations = {
        .read           = read_kcore,
        .open           = open_kcore,
+       .release        = release_kcore,
        .llseek         = default_llseek,
 };
 
index 09e18fdf61e5b48dd67234b19972a10dd5131662..b9a8c813e5e66b5e751080e1bd7b11b7e8d87634 100644 (file)
@@ -46,7 +46,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                cached = 0;
 
        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
-               pages[lru] = global_page_state(NR_LRU_BASE + lru);
+               pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
 
        available = si_mem_available();
 
index 187d84ef9de9d00cff139ffaa61a524af6b752c2..f6fa99eca5158f36d3fe38a23baac5b7c54ca6bb 100644 (file)
@@ -581,6 +581,8 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
                mss->anonymous_thp += HPAGE_PMD_SIZE;
        else if (PageSwapBacked(page))
                mss->shmem_thp += HPAGE_PMD_SIZE;
+       else if (is_zone_device_page(page))
+               /* pass */;
        else
                VM_BUG_ON_PAGE(1, page);
        smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
index 183a212694bf8af77ee3f682137709ebdb38a274..12af0490322f9d8d5354258c35f8fe4a4762c9d9 100644 (file)
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/ramfs.h>
+#include <linux/sched.h>
 
 #include "internal.h"
 
+static unsigned long ramfs_mmu_get_unmapped_area(struct file *file,
+               unsigned long addr, unsigned long len, unsigned long pgoff,
+               unsigned long flags)
+{
+       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
 const struct file_operations ramfs_file_operations = {
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
@@ -38,6 +46,7 @@ const struct file_operations ramfs_file_operations = {
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
        .llseek         = generic_file_llseek,
+       .get_unmapped_area      = ramfs_mmu_get_unmapped_area,
 };
 
 const struct inode_operations ramfs_file_inode_operations = {
index 19f532e7d35e9a501256ab64f76a645bf7d6b2e6..6dc4296eed62c5d2a493c5ba3f649c890aa37981 100644 (file)
@@ -223,8 +223,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
                size -= n;
                buf += n;
                copied += n;
-               if (!m->count)
+               if (!m->count) {
+                       m->from = 0;
                        m->index++;
+               }
                if (!size)
                        goto Done;
        }
index f35523d4fa3a6d657260e7b04d5273db2dcd53b1..b803213d1307e9137c3bfe5e04ee4ac5bd396cce 100644 (file)
@@ -114,9 +114,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
         * If buf != of->prealloc_buf, we don't know how
         * large it is, so cannot safely pass it to ->show
         */
-       if (pos || WARN_ON_ONCE(buf != of->prealloc_buf))
+       if (WARN_ON_ONCE(buf != of->prealloc_buf))
                return 0;
        len = ops->show(kobj, of->kn->priv, buf);
+       if (pos) {
+               if (len <= pos)
+                       return 0;
+               len -= pos;
+               memmove(buf, buf + pos, len);
+       }
        return min(count, len);
 }
 
index b45345d701e77fea809144009e3a46fa05560533..51157da3f76ed87a2a0c7f34c7c2240db116ed00 100644 (file)
@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
 
        p = c->gap_lebs;
        do {
-               ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
+               ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
                written = layout_leb_in_gaps(c, p);
                if (written < 0) {
                        err = written;
index e237811f09ce5ce6139aab0951047a74dd715665..11a004114eba5fcd00cce57893c28846ba71d17f 100644 (file)
@@ -575,7 +575,8 @@ static int ubifs_xattr_get(const struct xattr_handler *handler,
        dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
                inode->i_ino, dentry, size);
 
-       return  __ubifs_getxattr(inode, name, buffer, size);
+       name = xattr_full_name(handler, name);
+       return __ubifs_getxattr(inode, name, buffer, size);
 }
 
 static int ubifs_xattr_set(const struct xattr_handler *handler,
@@ -586,6 +587,8 @@ static int ubifs_xattr_set(const struct xattr_handler *handler,
        dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
                name, inode->i_ino, dentry, size);
 
+       name = xattr_full_name(handler, name);
+
        if (value)
                return __ubifs_setxattr(inode, name, value, size, flags);
        else
index 776ae2f325d1e4f534540206a30a138b01c1ea46..05b5243d89f6ffcb4d77e71455ef34ec88165ebe 100644 (file)
@@ -1582,6 +1582,7 @@ xfs_alloc_ag_vextent_small(
        xfs_extlen_t    *flenp, /* result length */
        int             *stat)  /* status: 0-freelist, 1-normal/none */
 {
+       struct xfs_owner_info   oinfo;
        int             error;
        xfs_agblock_t   fbno;
        xfs_extlen_t    flen;
@@ -1624,6 +1625,18 @@ xfs_alloc_ag_vextent_small(
                                error0);
                        args->wasfromfl = 1;
                        trace_xfs_alloc_small_freelist(args);
+
+                       /*
+                        * If we're feeding an AGFL block to something that
+                        * doesn't live in the free space, we need to clear
+                        * out the OWN_AG rmap.
+                        */
+                       xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
+                       error = xfs_rmap_free(args->tp, args->agbp, args->agno,
+                                       fbno, 1, &oinfo);
+                       if (error)
+                               goto error0;
+
                        *stat = 0;
                        return 0;
                }
@@ -2264,6 +2277,9 @@ xfs_alloc_log_agf(
                offsetof(xfs_agf_t, agf_longest),
                offsetof(xfs_agf_t, agf_btreeblks),
                offsetof(xfs_agf_t, agf_uuid),
+               offsetof(xfs_agf_t, agf_rmap_blocks),
+               /* needed so that we don't log the whole rest of the structure: */
+               offsetof(xfs_agf_t, agf_spare64),
                sizeof(xfs_agf_t)
        };
 
index b5c213a051cde3f703227f10b73e367470a4df43..08569792fe2096b9f1ae8720cc8f5118e07d8c01 100644 (file)
@@ -1814,6 +1814,10 @@ xfs_btree_lookup(
 
        XFS_BTREE_STATS_INC(cur, lookup);
 
+       /* No such thing as a zero-level tree. */
+       if (cur->bc_nlevels == 0)
+               return -EFSCORRUPTED;
+
        block = NULL;
        keyno = 0;
 
@@ -4554,15 +4558,22 @@ xfs_btree_simple_query_range(
        if (error)
                goto out;
 
+       /* Nothing?  See if there's anything to the right. */
+       if (!stat) {
+               error = xfs_btree_increment(cur, 0, &stat);
+               if (error)
+                       goto out;
+       }
+
        while (stat) {
                /* Find the record. */
                error = xfs_btree_get_rec(cur, &recp, &stat);
                if (error || !stat)
                        break;
-               cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
 
                /* Skip if high_key(rec) < low_key. */
                if (firstrec) {
+                       cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
                        firstrec = false;
                        diff = cur->bc_ops->diff_two_keys(cur, low_key,
                                        &rec_key);
@@ -4571,6 +4582,7 @@ xfs_btree_simple_query_range(
                }
 
                /* Stop if high_key < low_key(rec). */
+               cur->bc_ops->init_key_from_rec(&rec_key, recp);
                diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
                if (diff > 0)
                        break;
index 054a2032fdb392bce4fb6d3a936bc519dfe4596c..c221d0ecd52ed1413a6851dc40b67131c867f028 100644 (file)
@@ -194,7 +194,7 @@ xfs_defer_trans_abort(
        /* Abort intent items. */
        list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
                trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
-               if (dfp->dfp_committed)
+               if (!dfp->dfp_done)
                        dfp->dfp_type->abort_intent(dfp->dfp_intent);
        }
 
@@ -290,7 +290,6 @@ xfs_defer_finish(
        struct xfs_defer_pending        *dfp;
        struct list_head                *li;
        struct list_head                *n;
-       void                            *done_item = NULL;
        void                            *state;
        int                             error = 0;
        void                            (*cleanup_fn)(struct xfs_trans *, void *, int);
@@ -309,19 +308,11 @@ xfs_defer_finish(
                if (error)
                        goto out;
 
-               /* Mark all pending intents as committed. */
-               list_for_each_entry_reverse(dfp, &dop->dop_pending, dfp_list) {
-                       if (dfp->dfp_committed)
-                               break;
-                       trace_xfs_defer_pending_commit((*tp)->t_mountp, dfp);
-                       dfp->dfp_committed = true;
-               }
-
                /* Log an intent-done item for the first pending item. */
                dfp = list_first_entry(&dop->dop_pending,
                                struct xfs_defer_pending, dfp_list);
                trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
-               done_item = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
+               dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
                                dfp->dfp_count);
                cleanup_fn = dfp->dfp_type->finish_cleanup;
 
@@ -331,7 +322,7 @@ xfs_defer_finish(
                        list_del(li);
                        dfp->dfp_count--;
                        error = dfp->dfp_type->finish_item(*tp, dop, li,
-                                       done_item, &state);
+                                       dfp->dfp_done, &state);
                        if (error) {
                                /*
                                 * Clean up after ourselves and jump out.
@@ -428,8 +419,8 @@ xfs_defer_add(
                dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
                                KM_SLEEP | KM_NOFS);
                dfp->dfp_type = defer_op_types[type];
-               dfp->dfp_committed = false;
                dfp->dfp_intent = NULL;
+               dfp->dfp_done = NULL;
                dfp->dfp_count = 0;
                INIT_LIST_HEAD(&dfp->dfp_work);
                list_add_tail(&dfp->dfp_list, &dop->dop_intake);
index cc3981c482968bb0d16276402637bf16d769fdf4..e96533d178cfceb6afead75e874cb69909883dbb 100644 (file)
@@ -30,8 +30,8 @@ struct xfs_defer_op_type;
 struct xfs_defer_pending {
        const struct xfs_defer_op_type  *dfp_type;      /* function pointers */
        struct list_head                dfp_list;       /* pending items */
-       bool                            dfp_committed;  /* committed trans? */
        void                            *dfp_intent;    /* log intent item */
+       void                            *dfp_done;      /* log done item */
        struct list_head                dfp_work;       /* work items */
        unsigned int                    dfp_count;      /* # extent items */
 };
index f814d42c73b2fb7484dd76ad024f63891de155f0..270fb5cf4fa11eb1ed1ce2170a35b6af1754e460 100644 (file)
@@ -640,12 +640,15 @@ typedef struct xfs_agf {
        __be32          agf_btreeblks;  /* # of blocks held in AGF btrees */
        uuid_t          agf_uuid;       /* uuid of filesystem */
 
+       __be32          agf_rmap_blocks;        /* rmapbt blocks used */
+       __be32          agf_padding;            /* padding */
+
        /*
         * reserve some contiguous space for future logged fields before we add
         * the unlogged fields. This makes the range logging via flags and
         * structure offsets much simpler.
         */
-       __be64          agf_spare64[16];
+       __be64          agf_spare64[15];
 
        /* unlogged fields, written during buffer writeback. */
        __be64          agf_lsn;        /* last write sequence */
@@ -670,7 +673,9 @@ typedef struct xfs_agf {
 #define        XFS_AGF_LONGEST         0x00000400
 #define        XFS_AGF_BTREEBLKS       0x00000800
 #define        XFS_AGF_UUID            0x00001000
-#define        XFS_AGF_NUM_BITS        13
+#define        XFS_AGF_RMAP_BLOCKS     0x00002000
+#define        XFS_AGF_SPARE64         0x00004000
+#define        XFS_AGF_NUM_BITS        15
 #define        XFS_AGF_ALL_BITS        ((1 << XFS_AGF_NUM_BITS) - 1)
 
 #define XFS_AGF_FLAGS \
@@ -686,7 +691,9 @@ typedef struct xfs_agf {
        { XFS_AGF_FREEBLKS,     "FREEBLKS" }, \
        { XFS_AGF_LONGEST,      "LONGEST" }, \
        { XFS_AGF_BTREEBLKS,    "BTREEBLKS" }, \
-       { XFS_AGF_UUID,         "UUID" }
+       { XFS_AGF_UUID,         "UUID" }, \
+       { XFS_AGF_RMAP_BLOCKS,  "RMAP_BLOCKS" }, \
+       { XFS_AGF_SPARE64,      "SPARE64" }
 
 /* disk block (xfs_daddr_t) in the AG */
 #define XFS_AGF_DADDR(mp)      ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
index bc1faebc84eccdf658ec177029beb3b4d84455b6..17b8eeb34ac89ffb9260d7ac1c3cf342148eb1da 100644 (file)
@@ -98,6 +98,8 @@ xfs_rmapbt_alloc_block(
        union xfs_btree_ptr     *new,
        int                     *stat)
 {
+       struct xfs_buf          *agbp = cur->bc_private.a.agbp;
+       struct xfs_agf          *agf = XFS_BUF_TO_AGF(agbp);
        int                     error;
        xfs_agblock_t           bno;
 
@@ -124,6 +126,8 @@ xfs_rmapbt_alloc_block(
 
        xfs_trans_agbtree_delta(cur->bc_tp, 1);
        new->s = cpu_to_be32(bno);
+       be32_add_cpu(&agf->agf_rmap_blocks, 1);
+       xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
 
        XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
        *stat = 1;
@@ -143,6 +147,8 @@ xfs_rmapbt_free_block(
        bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
        trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
                        bno, 1);
+       be32_add_cpu(&agf->agf_rmap_blocks, -1);
+       xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
        error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
        if (error)
                return error;
index 0e3d4f5ec33c6f945b30b5ad47c9f39c46139ac0..4aecc5fefe9656e7e1f3812fc52c80cfc7e8eb9b 100644 (file)
@@ -583,7 +583,8 @@ xfs_sb_verify(
         * Only check the in progress field for the primary superblock as
         * mkfs.xfs doesn't clear it from secondary superblocks.
         */
-       return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+       return xfs_mount_validate_sb(mp, &sb,
+                                    bp->b_maps[0].bm_bn == XFS_SB_DADDR,
                                     check_version);
 }
 
index 47a318ce82e0ab5828164e6604ffe9dd5f66a0d6..b5b9bffe352074806910a064ae41a941c97678c8 100644 (file)
@@ -115,7 +115,6 @@ xfs_buf_ioacct_dec(
        if (!(bp->b_flags & _XBF_IN_FLIGHT))
                return;
 
-       ASSERT(bp->b_flags & XBF_ASYNC);
        bp->b_flags &= ~_XBF_IN_FLIGHT;
        percpu_counter_dec(&bp->b_target->bt_io_count);
 }
@@ -1612,7 +1611,7 @@ xfs_wait_buftarg(
         */
        while (percpu_counter_sum(&btp->bt_io_count))
                delay(100);
-       drain_workqueue(btp->bt_mount->m_buf_workqueue);
+       flush_workqueue(btp->bt_mount->m_buf_workqueue);
 
        /* loop until there is nothing left on the lru list. */
        while (list_lru_count(&btp->bt_lru)) {
index ed95e5bb04e692b614983ea83bf870ac373a9b0c..e612a0233710850f34eae93540b098c3ae7cda8c 100644 (file)
@@ -741,9 +741,20 @@ xfs_file_dax_write(
         * page is inserted into the pagecache when we have to serve a write
         * fault on a hole.  It should never be dirtied and can simply be
         * dropped from the pagecache once we get real data for the page.
+        *
+        * XXX: This is racy against mmap, and there's nothing we can do about
+        * it. dax_do_io() should really do this invalidation internally as
+        * it will know if we've allocated over a holei for this specific IO and
+        * if so it needs to update the mapping tree and invalidate existing
+        * PTEs over the newly allocated range. Remove this invalidation when
+        * dax_do_io() is fixed up.
         */
        if (mapping->nrpages) {
-               ret = invalidate_inode_pages2(mapping);
+               loff_t end = iocb->ki_pos + iov_iter_count(from) - 1;
+
+               ret = invalidate_inode_pages2_range(mapping,
+                                                   iocb->ki_pos >> PAGE_SHIFT,
+                                                   end >> PAGE_SHIFT);
                WARN_ON_ONCE(ret);
        }
 
index 0f96847b90e1175d2c6d0f497fe278e67f95e141..0b7f986745c17ff044a4d230329db1951837759a 100644 (file)
@@ -248,6 +248,7 @@ xfs_growfs_data_private(
                        agf->agf_roots[XFS_BTNUM_RMAPi] =
                                                cpu_to_be32(XFS_RMAP_BLOCK(mp));
                        agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
+                       agf->agf_rmap_blocks = cpu_to_be32(1);
                }
 
                agf->agf_flfirst = cpu_to_be32(1);
index 2114d53df433134a35084635b5238e2a775c6f0f..2af0dda1c978d5bc463670a09308577770965e7d 100644 (file)
@@ -715,12 +715,16 @@ xfs_iomap_write_allocate(
                 * is in the delayed allocation extent on which we sit
                 * but before our buffer starts.
                 */
-
                nimaps = 0;
                while (nimaps == 0) {
                        nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
-
-                       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres,
+                       /*
+                        * We have already reserved space for the extent and any
+                        * indirect blocks when creating the delalloc extent,
+                        * there is no need to reserve space in this transaction
+                        * again.
+                        */
+                       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
                                        0, XFS_TRANS_RESERVE, &tp);
                        if (error)
                                return error;
@@ -1037,20 +1041,14 @@ xfs_file_iomap_begin(
                        return error;
 
                trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
-               xfs_bmbt_to_iomap(ip, iomap, &imap);
-       } else if (nimaps) {
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-               trace_xfs_iomap_found(ip, offset, length, 0, &imap);
-               xfs_bmbt_to_iomap(ip, iomap, &imap);
        } else {
+               ASSERT(nimaps);
+
                xfs_iunlock(ip, XFS_ILOCK_EXCL);
-               trace_xfs_iomap_not_found(ip, offset, length, 0, &imap);
-               iomap->blkno = IOMAP_NULL_BLOCK;
-               iomap->type = IOMAP_HOLE;
-               iomap->offset = offset;
-               iomap->length = length;
+               trace_xfs_iomap_found(ip, offset, length, 0, &imap);
        }
 
+       xfs_bmbt_to_iomap(ip, iomap, &imap);
        return 0;
 }
 
@@ -1112,3 +1110,48 @@ struct iomap_ops xfs_iomap_ops = {
        .iomap_begin            = xfs_file_iomap_begin,
        .iomap_end              = xfs_file_iomap_end,
 };
+
+static int
+xfs_xattr_iomap_begin(
+       struct inode            *inode,
+       loff_t                  offset,
+       loff_t                  length,
+       unsigned                flags,
+       struct iomap            *iomap)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
+       xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
+       struct xfs_bmbt_irec    imap;
+       int                     nimaps = 1, error = 0;
+       unsigned                lockmode;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       lockmode = xfs_ilock_data_map_shared(ip);
+
+       /* if there are no attribute fork or extents, return ENOENT */
+       if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
+               error = -ENOENT;
+               goto out_unlock;
+       }
+
+       ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
+       error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
+                              &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
+out_unlock:
+       xfs_iunlock(ip, lockmode);
+
+       if (!error) {
+               ASSERT(nimaps);
+               xfs_bmbt_to_iomap(ip, iomap, &imap);
+       }
+
+       return error;
+}
+
+struct iomap_ops xfs_xattr_iomap_ops = {
+       .iomap_begin            = xfs_xattr_iomap_begin,
+};
index e066d045e2ffe629919517b9003ad88a036b0449..fb8aca3d69ab30c458a94733e6dc9f7f0e2d0b8d 100644 (file)
@@ -35,5 +35,6 @@ void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
                struct xfs_bmbt_irec *);
 
 extern struct iomap_ops xfs_iomap_ops;
+extern struct iomap_ops xfs_xattr_iomap_ops;
 
 #endif /* __XFS_IOMAP_H__*/
index ab820f84ed507c26b6b355d70660814c82f377c0..b24c3102fa93f94fd98b3fd91d0d23e4c624b646 100644 (file)
@@ -1009,7 +1009,14 @@ xfs_vn_fiemap(
        int                     error;
 
        xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
-       error = iomap_fiemap(inode, fieinfo, start, length, &xfs_iomap_ops);
+       if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
+               fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
+               error = iomap_fiemap(inode, fieinfo, start, length,
+                               &xfs_xattr_iomap_ops);
+       } else {
+               error = iomap_fiemap(inode, fieinfo, start, length,
+                               &xfs_iomap_ops);
+       }
        xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
 
        return error;
index 24ef83ef04de2be5c8fd1e77ab62fa3b59106376..fd6be45b3a1e01c81bfe179d67818bafc9f6ec92 100644 (file)
@@ -1574,9 +1574,16 @@ xfs_fs_fill_super(
                }
        }
 
-       if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+       if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+               if (mp->m_sb.sb_rblocks) {
+                       xfs_alert(mp,
+       "EXPERIMENTAL reverse mapping btree not compatible with realtime device!");
+                       error = -EINVAL;
+                       goto out_filestream_unmount;
+               }
                xfs_alert(mp,
        "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
+       }
 
        error = xfs_mountfs(mp);
        if (error)
index 551b7e26980c51886d4bd8edb19c45824fc45198..d303a665dba9df637026e36709a962e261792429 100644 (file)
@@ -1298,7 +1298,6 @@ DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
 DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
 DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
 DEFINE_IOMAP_EVENT(xfs_iomap_found);
-DEFINE_IOMAP_EVENT(xfs_iomap_not_found);
 
 DECLARE_EVENT_CLASS(xfs_simple_io_class,
        TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -2296,7 +2295,7 @@ DECLARE_EVENT_CLASS(xfs_defer_pending_class,
                __entry->dev = mp ? mp->m_super->s_dev : 0;
                __entry->type = dfp->dfp_type->type;
                __entry->intent = dfp->dfp_intent;
-               __entry->committed = dfp->dfp_committed;
+               __entry->committed = dfp->dfp_done != NULL;
                __entry->nr = dfp->dfp_count;
        ),
        TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n",
index 54a8e65e18b622edf10df6d07ca0d84fea9e49ae..7d026bf277131f7bc4c79529cc26488e3a8281ee 100644 (file)
 #include <asm-generic/qrwlock_types.h>
 
 /*
- * Writer states & reader shift and bias
+ * Writer states & reader shift and bias.
+ *
+ *       | +0 | +1 | +2 | +3 |
+ *   ----+----+----+----+----+
+ *    LE | 78 | 56 | 34 | 12 | 0x12345678
+ *   ----+----+----+----+----+
+ *       | wr |      rd      |
+ *       +----+----+----+----+
+ *
+ *   ----+----+----+----+----+
+ *    BE | 12 | 34 | 56 | 78 | 0x12345678
+ *   ----+----+----+----+----+
+ *       |      rd      | wr |
+ *       +----+----+----+----+
  */
 #define        _QW_WAITING     1               /* A writer is waiting     */
 #define        _QW_LOCKED      0xff            /* A writer holds the lock */
@@ -133,13 +146,23 @@ static inline void queued_read_unlock(struct qrwlock *lock)
        (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
 }
 
+/**
+ * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ * Return: the write byte address of a queue rwlock
+ */
+static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
+{
+       return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
+}
+
 /**
  * queued_write_unlock - release write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
 static inline void queued_write_unlock(struct qrwlock *lock)
 {
-       smp_store_release((u8 *)&lock->cnts, 0);
+       smp_store_release(__qrwlock_write_byte(lock), 0);
 }
 
 /*
index 1bfa602958f2a2f7beb16fab8f98263d198c652b..6df9b0749671b26801cbc8bec79f21f8cab34437 100644 (file)
@@ -72,6 +72,7 @@ struct exception_table_entry
 /* Returns 0 if exception not found and fixup otherwise.  */
 extern unsigned long search_exception_table(unsigned long);
 
+
 /*
  * architectures with an MMU should override these two
  */
@@ -230,14 +231,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
        might_fault();                                          \
        access_ok(VERIFY_READ, __p, sizeof(*ptr)) ?             \
                __get_user((x), (__typeof__(*(ptr)) *)__p) :    \
-               -EFAULT;                                        \
+               ((x) = (__typeof__(*(ptr)))0,-EFAULT);          \
 })
 
 #ifndef __get_user_fn
 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
 {
-       size = __copy_from_user(x, ptr, size);
-       return size ? -EFAULT : size;
+       size_t n = __copy_from_user(x, ptr, size);
+       if (unlikely(n)) {
+               memset(x + (size - n), 0, n);
+               return -EFAULT;
+       }
+       return 0;
 }
 
 #define __get_user_fn(sz, u, k)        __get_user_fn(sz, u, k)
@@ -257,11 +262,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
 static inline long copy_from_user(void *to,
                const void __user * from, unsigned long n)
 {
+       unsigned long res = n;
        might_fault();
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_from_user(to, from, n);
-       else
-               return n;
+       if (likely(access_ok(VERIFY_READ, from, n)))
+               res = __copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 static inline long copy_to_user(void __user *to,
index 261b86d20e7718f13c95290c63e277b5fafca96d..9cd8838e1ec36254fe0e53459ba0b9321d2ec801 100644 (file)
@@ -38,6 +38,9 @@ struct analogix_dp_plat_data {
                         struct drm_connector *);
 };
 
+int analogix_dp_enable_psr(struct device *dev);
+int analogix_dp_disable_psr(struct device *dev);
+
 int analogix_dp_resume(struct device *dev);
 int analogix_dp_suspend(struct device *dev);
 
index d3778652e4629ad4765de4aa892a0ff950647b34..c53dc90942e054e8722bea81f35e655957b638b6 100644 (file)
@@ -127,6 +127,7 @@ struct dma_buf_attachment;
  * run-time by echoing the debug value in its sysfs node:
  *   # echo 0xf > /sys/module/drm/parameters/debug
  */
+#define DRM_UT_NONE            0x00
 #define DRM_UT_CORE            0x01
 #define DRM_UT_DRIVER          0x02
 #define DRM_UT_KMS             0x04
@@ -134,11 +135,15 @@ struct dma_buf_attachment;
 #define DRM_UT_ATOMIC          0x10
 #define DRM_UT_VBL             0x20
 
-extern __printf(2, 3)
-void drm_ut_debug_printk(const char *function_name,
-                        const char *format, ...);
-extern __printf(1, 2)
-void drm_err(const char *format, ...);
+extern __printf(6, 7)
+void drm_dev_printk(const struct device *dev, const char *level,
+                   unsigned int category, const char *function_name,
+                   const char *prefix, const char *format, ...);
+
+extern __printf(5, 6)
+void drm_printk(const char *level, unsigned int category,
+               const char *function_name, const char *prefix,
+               const char *format, ...);
 
 /***********************************************************************/
 /** \name DRM template customization defaults */
@@ -146,6 +151,7 @@ void drm_err(const char *format, ...);
 
 /* driver capabilities and requirements mask */
 #define DRIVER_USE_AGP                 0x1
+#define DRIVER_LEGACY                  0x2
 #define DRIVER_PCI_DMA                 0x8
 #define DRIVER_SG                      0x10
 #define DRIVER_HAVE_DMA                        0x20
@@ -162,14 +168,38 @@ void drm_err(const char *format, ...);
 /** \name Macros to make printk easier */
 /*@{*/
 
+#define _DRM_PRINTK(once, level, fmt, ...)                             \
+       do {                                                            \
+               printk##once(KERN_##level "[" DRM_NAME "] " fmt,        \
+                            ##__VA_ARGS__);                            \
+       } while (0)
+
+#define DRM_INFO(fmt, ...)                                             \
+       _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE(fmt, ...)                                             \
+       _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN(fmt, ...)                                             \
+       _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...)                                                \
+       _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE_ONCE(fmt, ...)                                                \
+       _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN_ONCE(fmt, ...)                                                \
+       _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+
 /**
  * Error output.
  *
  * \param fmt printf() like format string.
  * \param arg arguments
  */
-#define DRM_ERROR(fmt, ...)                            \
-       drm_err(fmt, ##__VA_ARGS__)
+#define DRM_DEV_ERROR(dev, fmt, ...)                                   \
+       drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
+                      fmt, ##__VA_ARGS__)
+#define DRM_ERROR(fmt, ...)                                            \
+       drm_printk(KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*", fmt,    \
+                  ##__VA_ARGS__)
 
 /**
  * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
@@ -177,21 +207,30 @@ void drm_err(const char *format, ...);
  * \param fmt printf() like format string.
  * \param arg arguments
  */
-#define DRM_ERROR_RATELIMITED(fmt, ...)                                \
+#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...)                       \
 ({                                                                     \
        static DEFINE_RATELIMIT_STATE(_rs,                              \
                                      DEFAULT_RATELIMIT_INTERVAL,       \
                                      DEFAULT_RATELIMIT_BURST);         \
                                                                        \
        if (__ratelimit(&_rs))                                          \
-               drm_err(fmt, ##__VA_ARGS__);                            \
+               DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__);                 \
 })
+#define DRM_ERROR_RATELIMITED(fmt, ...)                                        \
+       DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
 
-#define DRM_INFO(fmt, ...)                             \
-       printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_DEV_INFO(dev, fmt, ...)                                    \
+       drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt,  \
+                      ##__VA_ARGS__)
 
-#define DRM_INFO_ONCE(fmt, ...)                                \
-       printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_DEV_INFO_ONCE(dev, fmt, ...)                               \
+({                                                                     \
+       static bool __print_once __read_mostly;                         \
+       if (!__print_once) {                                            \
+               __print_once = true;                                    \
+               DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__);                  \
+       }                                                               \
+})
 
 /**
  * Debug output.
@@ -199,37 +238,74 @@ void drm_err(const char *format, ...);
  * \param fmt printf() like format string.
  * \param arg arguments
  */
+#define DRM_DEV_DEBUG(dev, fmt, args...)                               \
+       drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \
+                      ##args)
 #define DRM_DEBUG(fmt, args...)                                                \
-       do {                                                            \
-               if (unlikely(drm_debug & DRM_UT_CORE))                  \
-                       drm_ut_debug_printk(__func__, fmt, ##args);     \
-       } while (0)
+       drm_printk(KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, ##args)
 
+#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...)                                \
+       drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "",    \
+                      fmt, ##args)
 #define DRM_DEBUG_DRIVER(fmt, args...)                                 \
-       do {                                                            \
-               if (unlikely(drm_debug & DRM_UT_DRIVER))                \
-                       drm_ut_debug_printk(__func__, fmt, ##args);     \
-       } while (0)
+       drm_printk(KERN_DEBUG, DRM_UT_DRIVER, __func__, "", fmt, ##args)
+
+#define DRM_DEV_DEBUG_KMS(dev, fmt, args...)                           \
+       drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt,  \
+                      ##args)
 #define DRM_DEBUG_KMS(fmt, args...)                                    \
-       do {                                                            \
-               if (unlikely(drm_debug & DRM_UT_KMS))                   \
-                       drm_ut_debug_printk(__func__, fmt, ##args);     \
-       } while (0)
+       drm_printk(KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, ##args)
+
+#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...)                         \
+       drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "",     \
+                      fmt, ##args)
 #define DRM_DEBUG_PRIME(fmt, args...)                                  \
-       do {                                                            \
-               if (unlikely(drm_debug & DRM_UT_PRIME))                 \
-                       drm_ut_debug_printk(__func__, fmt, ##args);     \
-       } while (0)
+       drm_printk(KERN_DEBUG, DRM_UT_PRIME, __func__, "", fmt, ##args)
+
+#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...)                                \
+       drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "",    \
+                      fmt, ##args)
 #define DRM_DEBUG_ATOMIC(fmt, args...)                                 \
-       do {                                                            \
-               if (unlikely(drm_debug & DRM_UT_ATOMIC))                \
-                       drm_ut_debug_printk(__func__, fmt, ##args);     \
-       } while (0)
+       drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", fmt, ##args)
+
+#define DRM_DEV_DEBUG_VBL(dev, fmt, args...)                           \
+       drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt,  \
+                      ##args)
 #define DRM_DEBUG_VBL(fmt, args...)                                    \
-       do {                                                            \
-               if (unlikely(drm_debug & DRM_UT_VBL))                   \
-                       drm_ut_debug_printk(__func__, fmt, ##args);     \
-       } while (0)
+       drm_printk(KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, ##args)
+
+#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...)    \
+({                                                                     \
+       static DEFINE_RATELIMIT_STATE(_rs,                              \
+                                     DEFAULT_RATELIMIT_INTERVAL,       \
+                                     DEFAULT_RATELIMIT_BURST);         \
+       if (__ratelimit(&_rs))                                          \
+               drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level,       \
+                              __func__, "", fmt, ##args);              \
+})
+
+/**
+ * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...)                   \
+       DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
+#define DRM_DEBUG_RATELIMITED(fmt, args...)                            \
+       DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...)            \
+       _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
+#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...)                     \
+       DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...)               \
+       _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...)                                \
+       DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...)             \
+       _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
+#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...)                      \
+       DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
 
 /*@}*/
 
@@ -320,7 +396,6 @@ struct drm_file {
        unsigned is_master:1;
 
        struct pid *pid;
-       kuid_t uid;
        drm_magic_t magic;
        struct list_head lhead;
        struct drm_minor *minor;
@@ -642,7 +717,7 @@ struct drm_driver {
 };
 
 enum drm_minor_type {
-       DRM_MINOR_LEGACY,
+       DRM_MINOR_PRIMARY,
        DRM_MINOR_CONTROL,
        DRM_MINOR_RENDER,
        DRM_MINOR_CNT,
@@ -856,7 +931,7 @@ static inline bool drm_is_control_client(const struct drm_file *file_priv)
 
 static inline bool drm_is_primary_client(const struct drm_file *file_priv)
 {
-       return file_priv->minor->type == DRM_MINOR_LEGACY;
+       return file_priv->minor->type == DRM_MINOR_PRIMARY;
 }
 
 /******************************************************************/
index 856a9c85a8383b5c382c6537375a4e33f5e6e7c0..9701f2dfb7840fe3d67689482005449054360aac 100644 (file)
 
 #include <drm/drm_crtc.h>
 
+/**
+ * struct drm_crtc_commit - track modeset commits on a CRTC
+ *
+ * This structure is used to track pending modeset changes and atomic commit on
+ * a per-CRTC basis. Since updating the list should never block this structure
+ * is reference counted to allow waiters to safely wait on an event to complete,
+ * without holding any locks.
+ *
+ * It has 3 different events in total to allow a fine-grained synchronization
+ * between outstanding updates::
+ *
+ *     atomic commit thread                    hardware
+ *
+ *     write new state into hardware   ---->   ...
+ *     signal hw_done
+ *                                             switch to new state on next
+ *     ...                                     v/hblank
+ *
+ *     wait for buffers to show up             ...
+ *
+ *     ...                                     send completion irq
+ *                                             irq handler signals flip_done
+ *     cleanup old buffers
+ *
+ *     signal cleanup_done
+ *
+ *     wait for flip_done              <----
+ *     clean up atomic state
+ *
+ * The important bit to know is that cleanup_done is the terminal event, but the
+ * ordering between flip_done and hw_done is entirely up to the specific driver
+ * and modeset state change.
+ *
+ * For an implementation of how to use this look at
+ * drm_atomic_helper_setup_commit() from the atomic helper library.
+ */
+struct drm_crtc_commit {
+       /**
+        * @crtc:
+        *
+        * DRM CRTC for this commit.
+        */
+       struct drm_crtc *crtc;
+
+       /**
+        * @ref:
+        *
+        * Reference count for this structure. Needed to allow blocking on
+        * completions without the risk of the completion disappearing
+        * meanwhile.
+        */
+       struct kref ref;
+
+       /**
+        * @flip_done:
+        *
+        * Will be signaled when the hardware has flipped to the new set of
+        * buffers. Signals at the same time as when the drm event for this
+        * commit is sent to userspace, or when an out-fence is singalled. Note
+        * that for most hardware, in most cases this happens after @hw_done is
+        * signalled.
+        */
+       struct completion flip_done;
+
+       /**
+        * @hw_done:
+        *
+        * Will be signalled when all hw register changes for this commit have
+        * been written out. Especially when disabling a pipe this can be much
+        * later than than @flip_done, since that can signal already when the
+        * screen goes black, whereas to fully shut down a pipe more register
+        * I/O is required.
+        *
+        * Note that this does not need to include separately reference-counted
+        * resources like backing storage buffer pinning, or runtime pm
+        * management.
+        */
+       struct completion hw_done;
+
+       /**
+        * @cleanup_done:
+        *
+        * Will be signalled after old buffers have been cleaned up by calling
+        * drm_atomic_helper_cleanup_planes(). Since this can only happen after
+        * a vblank wait completed it might be a bit later. This completion is
+        * useful to throttle updates and avoid hardware updates getting ahead
+        * of the buffer cleanup too much.
+        */
+       struct completion cleanup_done;
+
+       /**
+        * @commit_entry:
+        *
+        * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
+        */
+       struct list_head commit_entry;
+
+       /**
+        * @event:
+        *
+        * &drm_pending_vblank_event pointer to clean up private events.
+        */
+       struct drm_pending_vblank_event *event;
+};
+
+struct __drm_planes_state {
+       struct drm_plane *ptr;
+       struct drm_plane_state *state;
+};
+
+struct __drm_crtcs_state {
+       struct drm_crtc *ptr;
+       struct drm_crtc_state *state;
+       struct drm_crtc_commit *commit;
+};
+
+struct __drm_connnectors_state {
+       struct drm_connector *ptr;
+       struct drm_connector_state *state;
+};
+
+/**
+ * struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @allow_modeset: allow full modeset
+ * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
+ * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
+ * @planes: pointer to array of structures with per-plane data
+ * @crtcs: pointer to array of CRTC pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of structures with per-connector data
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+       struct drm_device *dev;
+       bool allow_modeset : 1;
+       bool legacy_cursor_update : 1;
+       bool legacy_set_config : 1;
+       struct __drm_planes_state *planes;
+       struct __drm_crtcs_state *crtcs;
+       int num_connector;
+       struct __drm_connnectors_state *connectors;
+
+       struct drm_modeset_acquire_ctx *acquire_ctx;
+
+       /**
+        * @commit_work:
+        *
+        * Work item which can be used by the driver or helpers to execute the
+        * commit without blocking.
+        */
+       struct work_struct commit_work;
+};
+
 void drm_crtc_commit_put(struct drm_crtc_commit *commit);
 static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
 {
index d86ae5dcd7b495fb203ec096068c09d749ab97d1..7ff92b09fd9c423059872d00eb3aa039d6f2b5b9 100644 (file)
@@ -29,6 +29,8 @@
 #define DRM_ATOMIC_HELPER_H_
 
 #include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 struct drm_atomic_state;
 
@@ -43,8 +45,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
                             struct drm_atomic_state *state,
                             bool nonblock);
 
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
-                                       struct drm_atomic_state *state);
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+                                       struct drm_atomic_state *state,
+                                       bool pre_swap);
 bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
                                           struct drm_atomic_state *old_state,
                                           struct drm_crtc *crtc);
@@ -63,14 +66,19 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 
 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
                                     struct drm_atomic_state *state);
+
+#define DRM_PLANE_COMMIT_ACTIVE_ONLY                   BIT(0)
+#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET      BIT(1)
+
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
                                     struct drm_atomic_state *state,
-                                    bool active_only);
+                                    uint32_t flags);
 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
                                      struct drm_atomic_state *old_state);
 void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
-void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
-                                             bool atomic);
+void
+drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
+                                        bool atomic);
 
 void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
                                  bool stall);
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
new file mode 100644 (file)
index 0000000..36baa17
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_BLEND_H__
+#define __DRM_BLEND_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+
+struct drm_device;
+struct drm_atomic_state;
+
+/*
+ * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
+ * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
+ * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
+ *
+ * WARNING: These defines are UABI since they're exposed in the rotation
+ * property.
+ */
+#define DRM_ROTATE_0   BIT(0)
+#define DRM_ROTATE_90  BIT(1)
+#define DRM_ROTATE_180 BIT(2)
+#define DRM_ROTATE_270 BIT(3)
+#define DRM_ROTATE_MASK (DRM_ROTATE_0   | DRM_ROTATE_90 | \
+                        DRM_ROTATE_180 | DRM_ROTATE_270)
+#define DRM_REFLECT_X  BIT(4)
+#define DRM_REFLECT_Y  BIT(5)
+#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+                                                      unsigned int supported_rotations);
+unsigned int drm_rotation_simplify(unsigned int rotation,
+                                  unsigned int supported_rotations);
+
+int drm_plane_create_zpos_property(struct drm_plane *plane,
+                                  unsigned int zpos,
+                                  unsigned int min, unsigned int max);
+int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
+                                            unsigned int zpos);
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+                             struct drm_atomic_state *state);
+#endif
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
new file mode 100644 (file)
index 0000000..530a1d6
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_BRIDGE_H__
+#define __DRM_BRIDGE_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_modes.h>
+
+struct drm_bridge;
+
+/**
+ * struct drm_bridge_funcs - drm_bridge control functions
+ */
+struct drm_bridge_funcs {
+       /**
+        * @attach:
+        *
+        * This callback is invoked whenever our bridge is being attached to a
+        * &drm_encoder.
+        *
+        * The attach callback is optional.
+        *
+        * RETURNS:
+        *
+        * Zero on success, error code on failure.
+        */
+       int (*attach)(struct drm_bridge *bridge);
+
+       /**
+        * @detach:
+        *
+        * This callback is invoked whenever our bridge is being detached from a
+        * &drm_encoder.
+        *
+        * The detach callback is optional.
+        */
+       void (*detach)(struct drm_bridge *bridge);
+
+       /**
+        * @mode_fixup:
+        *
+        * This callback is used to validate and adjust a mode. The paramater
+        * mode is the display mode that should be fed to the next element in
+        * the display chain, either the final &drm_connector or the next
+        * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
+        * requires. It can be modified by this callback and does not need to
+        * match mode.
+        *
+        * This is the only hook that allows a bridge to reject a modeset. If
+        * this function passes all other callbacks must succeed for this
+        * configuration.
+        *
+        * The mode_fixup callback is optional.
+        *
+        * NOTE:
+        *
+        * This function is called in the check phase of atomic modesets, which
+        * can be aborted for any reason (including on userspace's request to
+        * just check whether a configuration would be possible). Drivers MUST
+        * NOT touch any persistent state (hardware or software) or data
+        * structures except the passed in @state parameter.
+        *
+        * RETURNS:
+        *
+        * True if an acceptable configuration is possible, false if the modeset
+        * operation should be rejected.
+        */
+       bool (*mode_fixup)(struct drm_bridge *bridge,
+                          const struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+       /**
+        * @disable:
+        *
+        * This callback should disable the bridge. It is called right before
+        * the preceding element in the display pipe is disabled. If the
+        * preceding element is a bridge this means it's called before that
+        * bridge's ->disable() function. If the preceding element is a
+        * &drm_encoder it's called right before the encoder's ->disable(),
+        * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+        *
+        * The bridge can assume that the display pipe (i.e. clocks and timing
+        * signals) feeding it is still running when this callback is called.
+        *
+        * The disable callback is optional.
+        */
+       void (*disable)(struct drm_bridge *bridge);
+
+       /**
+        * @post_disable:
+        *
+        * This callback should disable the bridge. It is called right after
+        * the preceding element in the display pipe is disabled. If the
+        * preceding element is a bridge this means it's called after that
+        * bridge's ->post_disable() function. If the preceding element is a
+        * &drm_encoder it's called right after the encoder's ->disable(),
+        * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+        *
+        * The bridge must assume that the display pipe (i.e. clocks and timing
+        * singals) feeding it is no longer running when this callback is
+        * called.
+        *
+        * The post_disable callback is optional.
+        */
+       void (*post_disable)(struct drm_bridge *bridge);
+
+       /**
+        * @mode_set:
+        *
+        * This callback should set the given mode on the bridge. It is called
+        * after the ->mode_set() callback for the preceding element in the
+        * display pipeline has been called already. The display pipe (i.e.
+        * clocks and timing signals) is off when this function is called.
+        */
+       void (*mode_set)(struct drm_bridge *bridge,
+                        struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode);
+       /**
+        * @pre_enable:
+        *
+        * This callback should enable the bridge. It is called right before
+        * the preceding element in the display pipe is enabled. If the
+        * preceding element is a bridge this means it's called before that
+        * bridge's ->pre_enable() function. If the preceding element is a
+        * &drm_encoder it's called right before the encoder's ->enable(),
+        * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+        *
+        * The display pipe (i.e. clocks and timing signals) feeding this bridge
+        * will not yet be running when this callback is called. The bridge must
+        * not enable the display link feeding the next bridge in the chain (if
+        * there is one) when this callback is called.
+        *
+        * The pre_enable callback is optional.
+        */
+       void (*pre_enable)(struct drm_bridge *bridge);
+
+       /**
+        * @enable:
+        *
+        * This callback should enable the bridge. It is called right after
+        * the preceding element in the display pipe is enabled. If the
+        * preceding element is a bridge this means it's called after that
+        * bridge's ->enable() function. If the preceding element is a
+        * &drm_encoder it's called right after the encoder's ->enable(),
+        * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+        *
+        * The bridge can assume that the display pipe (i.e. clocks and timing
+        * signals) feeding it is running when this callback is called. This
+        * callback must enable the display link feeding the next bridge in the
+        * chain if there is one.
+        *
+        * The enable callback is optional.
+        */
+       void (*enable)(struct drm_bridge *bridge);
+};
+
+/**
+ * struct drm_bridge - central DRM bridge control structure
+ * @dev: DRM device this bridge belongs to
+ * @encoder: encoder to which this bridge is connected
+ * @next: the next bridge in the encoder chain
+ * @of_node: device node pointer to the bridge
+ * @list: to keep track of all added bridges
+ * @funcs: control functions
+ * @driver_private: pointer to the bridge driver's internal context
+ */
+struct drm_bridge {
+       struct drm_device *dev;
+       struct drm_encoder *encoder;
+       struct drm_bridge *next;
+#ifdef CONFIG_OF
+       struct device_node *of_node;
+#endif
+       struct list_head list;
+
+       const struct drm_bridge_funcs *funcs;
+       void *driver_private;
+};
+
+int drm_bridge_add(struct drm_bridge *bridge);
+void drm_bridge_remove(struct drm_bridge *bridge);
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+void drm_bridge_detach(struct drm_bridge *bridge);
+
+bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);
+void drm_bridge_disable(struct drm_bridge *bridge);
+void drm_bridge_post_disable(struct drm_bridge *bridge);
+void drm_bridge_mode_set(struct drm_bridge *bridge,
+                       struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);
+void drm_bridge_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_enable(struct drm_bridge *bridge);
+
+#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
new file mode 100644 (file)
index 0000000..c767238
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_COLOR_MGMT_H__
+#define __DRM_COLOR_MGMT_H__
+
+#include <linux/ctype.h>
+
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+                               uint degamma_lut_size,
+                               bool has_ctm,
+                               uint gamma_lut_size);
+
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                int gamma_size);
+
+/**
+ * drm_color_lut_extract - clamp&round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+static inline uint32_t drm_color_lut_extract(uint32_t user_input,
+                                            uint32_t bit_precision)
+{
+       uint32_t val = user_input;
+       uint32_t max = 0xffff >> (16 - bit_precision);
+
+       /* Round only if we're not using full precision. */
+       if (bit_precision < 16) {
+               val += 1UL << (16 - bit_precision - 1);
+               val >>= 16 - bit_precision;
+       }
+
+       return clamp_val(val, 0, max);
+}
+
+
+#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
new file mode 100644 (file)
index 0000000..51a15de
--- /dev/null
@@ -0,0 +1,771 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_CONNECTOR_H__
+#define __DRM_CONNECTOR_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+#include <uapi/drm/drm_mode.h>
+
+struct drm_device;
+
+struct drm_connector_helper_funcs;
+struct drm_device;
+struct drm_crtc;
+struct drm_encoder;
+struct drm_property;
+struct drm_property_blob;
+struct edid;
+
+enum drm_connector_force {
+       DRM_FORCE_UNSPECIFIED,
+       DRM_FORCE_OFF,
+       DRM_FORCE_ON,         /* force on analog part normally */
+       DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+/**
+ * enum drm_connector_status - status for a &drm_connector
+ *
+ * This enum is used to track the connector status. There are no separate
+ * #defines for the uapi!
+ */
+enum drm_connector_status {
+       /**
+        * @connector_status_connected: The connector is definitely connected to
+        * a sink device, and can be enabled.
+        */
+       connector_status_connected = 1,
+       /**
+        * @connector_status_disconnected: The connector isn't connected to a
+        * sink device which can be autodetect. For digital outputs like DP or
+        * HDMI (which can be realiable probed) this means there's really
+        * nothing there. It is driver-dependent whether a connector with this
+        * status can be lit up or not.
+        */
+       connector_status_disconnected = 2,
+       /**
+        * @connector_status_unknown: The connector's status could not be
+        * reliably detected. This happens when probing would either cause
+        * flicker (like load-detection when the connector is in use), or when a
+        * hardware resource isn't available (like when load-detection needs a
+        * free CRTC). It should be possible to light up the connector with one
+        * of the listed fallback modes. For default configuration userspace
+        * should only try to light up connectors with unknown status when
+        * there's not connector with @connector_status_connected.
+        */
+       connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+       SubPixelUnknown = 0,
+       SubPixelHorizontalRGB,
+       SubPixelHorizontalBGR,
+       SubPixelVerticalRGB,
+       SubPixelVerticalBGR,
+       SubPixelNone,
+};
+
+/**
+ * struct drm_display_info - runtime data about the connected sink
+ *
+ * Describes a given display (e.g. CRT or flat panel) and its limitations. For
+ * fixed display sinks like built-in panels there's not much difference between
+ * this and struct &drm_connector. But for sinks with a real cable this
+ * structure is meant to describe all the things at the other end of the cable.
+ *
+ * For sinks which provide an EDID this can be filled out by calling
+ * drm_add_edid_modes().
+ */
+struct drm_display_info {
+       /**
+        * @name: Name of the display.
+        */
+       char name[DRM_DISPLAY_INFO_LEN];
+
+       /**
+        * @width_mm: Physical width in mm.
+        */
+        unsigned int width_mm;
+       /**
+        * @height_mm: Physical height in mm.
+        */
+       unsigned int height_mm;
+
+       /**
+        * @pixel_clock: Maximum pixel clock supported by the sink, in units of
+        * 100Hz. This mismatches the clok in &drm_display_mode (which is in
+        * kHZ), because that's what the EDID uses as base unit.
+        */
+       unsigned int pixel_clock;
+       /**
+        * @bpc: Maximum bits per color channel. Used by HDMI and DP outputs.
+        */
+       unsigned int bpc;
+
+       /**
+        * @subpixel_order: Subpixel order of LCD panels.
+        */
+       enum subpixel_order subpixel_order;
+
+#define DRM_COLOR_FORMAT_RGB444                (1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444      (1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422      (1<<2)
+
+       /**
+        * @color_formats: HDMI Color formats, selects between RGB and YCrCb
+        * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones
+        * as used to describe the pixel format in framebuffers, and also don't
+        * match the formats in @bus_formats which are shared with v4l.
+        */
+       u32 color_formats;
+
+       /**
+        * @bus_formats: Pixel data format on the wire, somewhat redundant with
+        * @color_formats. Array of size @num_bus_formats encoded using
+        * MEDIA_BUS_FMT\_ defines shared with v4l and media drivers.
+        */
+       const u32 *bus_formats;
+       /**
+        * @num_bus_formats: Size of @bus_formats array.
+        */
+       unsigned int num_bus_formats;
+
+#define DRM_BUS_FLAG_DE_LOW            (1<<0)
+#define DRM_BUS_FLAG_DE_HIGH           (1<<1)
+/* drive data on pos. edge */
+#define DRM_BUS_FLAG_PIXDATA_POSEDGE   (1<<2)
+/* drive data on neg. edge */
+#define DRM_BUS_FLAG_PIXDATA_NEGEDGE   (1<<3)
+
+       /**
+        * @bus_flags: Additional information (like pixel signal polarity) for
+        * the pixel data on the bus, using DRM_BUS_FLAGS\_ defines.
+        */
+       u32 bus_flags;
+
+       /**
+        * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
+        * more stuff redundant with @bus_formats.
+        */
+       u8 edid_hdmi_dc_modes;
+
+       /**
+        * @cea_rev: CEA revision of the HDMI sink.
+        */
+       u8 cea_rev;
+};
+
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+                                    const u32 *formats,
+                                    unsigned int num_formats);
+
+/**
+ * struct drm_connector_state - mutable connector state
+ * @connector: backpointer to the connector
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+       struct drm_connector *connector;
+
+       /**
+        * @crtc: CRTC to connect connector to, NULL if disabled.
+        *
+        * Do not change this directly, use drm_atomic_set_crtc_for_connector()
+        * instead.
+        */
+       struct drm_crtc *crtc;
+
+       struct drm_encoder *best_encoder;
+
+       struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_connector_funcs - control connectors on a given device
+ *
+ * Each CRTC may have one or more connectors attached to it.  The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+       /**
+        * @dpms:
+        *
+        * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
+        * is exposed as a standard property on the connector, but diverted to
+        * this callback in the drm core. Note that atomic drivers don't
+        * implement the 4 level DPMS support on the connector any more, but
+        * instead only have an on/off "ACTIVE" property on the CRTC object.
+        *
+        * Drivers implementing atomic modeset should use
+        * drm_atomic_helper_connector_dpms() to implement this hook.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*dpms)(struct drm_connector *connector, int mode);
+
+       /**
+        * @reset:
+        *
+        * Reset connector hardware and software state to off. This function isn't
+        * called by the core directly, only through drm_mode_config_reset().
+        * It's not a helper hook only for historical reasons.
+        *
+        * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
+        * atomic state using this hook.
+        */
+       void (*reset)(struct drm_connector *connector);
+
+       /**
+        * @detect:
+        *
+        * Check to see if anything is attached to the connector. The parameter
+        * force is set to false whilst polling, true when checking the
+        * connector due to a user request. force can be used by the driver to
+        * avoid expensive, destructive operations during automated probing.
+        *
+        * FIXME:
+        *
+        * Note that this hook is only called by the probe helper. It's not in
+        * the helper library vtable purely for historical reasons. The only DRM
+        * core entry point to probe connector state is @fill_modes.
+        *
+        * RETURNS:
+        *
+        * drm_connector_status indicating the connector's status.
+        */
+       enum drm_connector_status (*detect)(struct drm_connector *connector,
+                                           bool force);
+
+       /**
+        * @force:
+        *
+        * This function is called to update internal encoder state when the
+        * connector is forced to a certain state by userspace, either through
+        * the sysfs interfaces or on the kernel cmdline. In that case the
+        * @detect callback isn't called.
+        *
+        * FIXME:
+        *
+        * Note that this hook is only called by the probe helper. It's not in
+        * the helper library vtable purely for historical reasons. The only DRM
+        * core entry point to probe connector state is @fill_modes.
+        */
+       void (*force)(struct drm_connector *connector);
+
+       /**
+        * @fill_modes:
+        *
+        * Entry point for output detection and basic mode validation. The
+        * driver should reprobe the output if needed (e.g. when hotplug
+        * handling is unreliable), add all detected modes to connector->modes
+        * and filter out any the device can't support in any configuration. It
+        * also needs to filter out any modes wider or higher than the
+        * parameters max_width and max_height indicate.
+        *
+        * The drivers must also prune any modes no longer valid from
+        * connector->modes. Furthermore it must update connector->status and
+        * connector->edid.  If no EDID has been received for this output
+        * connector->edid must be NULL.
+        *
+        * Drivers using the probe helpers should use
+        * drm_helper_probe_single_connector_modes() or
+        * drm_helper_probe_single_connector_modes_nomerge() to implement this
+        * function.
+        *
+        * RETURNS:
+        *
+        * The number of modes detected and filled into connector->modes.
+        */
+       int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+
+       /**
+        * @set_property:
+        *
+        * This is the legacy entry point to update a property attached to the
+        * connector.
+        *
+        * Drivers implementing atomic modeset should use
+        * drm_atomic_helper_connector_set_property() to implement this hook.
+        *
+        * This callback is optional if the driver does not support any legacy
+        * driver-private properties.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+                            uint64_t val);
+
+       /**
+        * @late_register:
+        *
+        * This optional hook can be used to register additional userspace
+        * interfaces attached to the connector, light backlight control, i2c,
+        * DP aux or similar interfaces. It is called late in the driver load
+        * sequence from drm_connector_register() when registering all the
+        * core drm connector interfaces. Everything added from this callback
+        * should be unregistered in the early_unregister callback.
+        *
+        * Returns:
+        *
+        * 0 on success, or a negative error code on failure.
+        */
+       int (*late_register)(struct drm_connector *connector);
+
+       /**
+        * @early_unregister:
+        *
+        * This optional hook should be used to unregister the additional
+        * userspace interfaces attached to the connector from
+        * late_unregister(). It is called from drm_connector_unregister(),
+        * early in the driver unload sequence to disable userspace access
+        * before data structures are torndown.
+        */
+       void (*early_unregister)(struct drm_connector *connector);
+
+       /**
+        * @destroy:
+        *
+        * Clean up connector resources. This is called at driver unload time
+        * through drm_mode_config_cleanup(). It can also be called at runtime
+        * when a connector is being hot-unplugged for drivers that support
+        * connector hotplugging (e.g. DisplayPort MST).
+        */
+       void (*destroy)(struct drm_connector *connector);
+
+       /**
+        * @atomic_duplicate_state:
+        *
+        * Duplicate the current atomic state for this connector and return it.
+        * The core and helpers gurantee that any atomic state duplicated with
+        * this hook and still owned by the caller (i.e. not transferred to the
+        * driver by calling ->atomic_commit() from struct
+        * &drm_mode_config_funcs) will be cleaned up by calling the
+        * @atomic_destroy_state hook in this structure.
+        *
+        * Atomic drivers which don't subclass struct &drm_connector_state should use
+        * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
+        * state structure to extend it with driver-private state should use
+        * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
+        * duplicated in a consistent fashion across drivers.
+        *
+        * It is an error to call this hook before connector->state has been
+        * initialized correctly.
+        *
+        * NOTE:
+        *
+        * If the duplicate state references refcounted resources this hook must
+        * acquire a reference for each of them. The driver must release these
+        * references again in @atomic_destroy_state.
+        *
+        * RETURNS:
+        *
+        * Duplicated atomic state or NULL when the allocation failed.
+        */
+       struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+
+       /**
+        * @atomic_destroy_state:
+        *
+        * Destroy a state duplicated with @atomic_duplicate_state and release
+        * or unreference all resources it references
+        */
+       void (*atomic_destroy_state)(struct drm_connector *connector,
+                                    struct drm_connector_state *state);
+
+       /**
+        * @atomic_set_property:
+        *
+        * Decode a driver-private property value and store the decoded value
+        * into the passed-in state structure. Since the atomic core decodes all
+        * standardized properties (even for extensions beyond the core set of
+        * properties which might not be implemented by all drivers) this
+        * requires drivers to subclass the state structure.
+        *
+        * Such driver-private properties should really only be implemented for
+        * truly hardware/vendor specific state. Instead it is preferred to
+        * standardize atomic extension and decode the properties used to expose
+        * such an extension in the core.
+        *
+        * Do not call this function directly, use
+        * drm_atomic_connector_set_property() instead.
+        *
+        * This callback is optional if the driver does not support any
+        * driver-private atomic properties.
+        *
+        * NOTE:
+        *
+        * This function is called in the state assembly phase of atomic
+        * modesets, which can be aborted for any reason (including on
+        * userspace's request to just check whether a configuration would be
+        * possible). Drivers MUST NOT touch any persistent state (hardware or
+        * software) or data structures except the passed in @state parameter.
+        *
+        * Also since userspace controls in which order properties are set this
+        * function must not do any input validation (since the state update is
+        * incomplete and hence likely inconsistent). Instead any such input
+        * validation must be done in the various atomic_check callbacks.
+        *
+        * RETURNS:
+        *
+        * 0 if the property has been found, -EINVAL if the property isn't
+        * implemented by the driver (which shouldn't ever happen, the core only
+        * asks for properties attached to this connector). No other validation
+        * is allowed by the driver. The core already checks that the property
+        * value is within the range (integer, valid enum value, ...) the driver
+        * set when registering the property.
+        */
+       int (*atomic_set_property)(struct drm_connector *connector,
+                                  struct drm_connector_state *state,
+                                  struct drm_property *property,
+                                  uint64_t val);
+
+       /**
+        * @atomic_get_property:
+        *
+        * Reads out the decoded driver-private property. This is used to
+        * implement the GETCONNECTOR IOCTL.
+        *
+        * Do not call this function directly, use
+        * drm_atomic_connector_get_property() instead.
+        *
+        * This callback is optional if the driver does not support any
+        * driver-private atomic properties.
+        *
+        * RETURNS:
+        *
+        * 0 on success, -EINVAL if the property isn't implemented by the
+        * driver (which shouldn't ever happen, the core only asks for
+        * properties attached to this connector).
+        */
+       int (*atomic_get_property)(struct drm_connector *connector,
+                                  const struct drm_connector_state *state,
+                                  struct drm_property *property,
+                                  uint64_t *val);
+};
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+       bool specified;
+       bool refresh_specified;
+       bool bpp_specified;
+       int xres, yres;
+       int bpp;
+       int refresh;
+       bool rb;
+       bool interlace;
+       bool cvt;
+       bool margins;
+       enum drm_connector_force force;
+};
+
+/**
+ * struct drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @name: human readable name, can be overwritten by the driver
+ * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
+ * @registered: is this connector exposed (registered) with userspace?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @funcs: connector control functions
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @properties: property tracking for this connector
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
+ * @force: a DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @edid_corrupt: indicates whether the last read EDID was corrupt
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC.  Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+       struct drm_device *dev;
+       struct device *kdev;
+       struct device_attribute *attr;
+       struct list_head head;
+
+       struct drm_mode_object base;
+
+       char *name;
+
+       /**
+        * @index: Compacted connector index, which matches the position inside
+        * the mode_config.list for drivers not supporting hot-add/removing. Can
+        * be used as an array index. It is invariant over the lifetime of the
+        * connector.
+        */
+       unsigned index;
+
+       int connector_type;
+       int connector_type_id;
+       bool interlace_allowed;
+       bool doublescan_allowed;
+       bool stereo_allowed;
+       bool registered;
+       struct list_head modes; /* list of modes on this connector */
+
+       enum drm_connector_status status;
+
+       /* these are modes added by probing with DDC or the BIOS */
+       struct list_head probed_modes;
+
+       /**
+        * @display_info: Display information is filled from EDID information
+        * when a display is detected. For non hot-pluggable displays such as
+        * flat panels in embedded systems, the driver should initialize the
+        * display_info.width_mm and display_info.height_mm fields with the
+        * physical size of the display.
+        */
+       struct drm_display_info display_info;
+       const struct drm_connector_funcs *funcs;
+
+       struct drm_property_blob *edid_blob_ptr;
+       struct drm_object_properties properties;
+
+       /**
+        * @path_blob_ptr:
+        *
+        * DRM blob property data for the DP MST path property.
+        */
+       struct drm_property_blob *path_blob_ptr;
+
+       /**
+        * @tile_blob_ptr:
+        *
+        * DRM blob property data for the tile property (used mostly by DP MST).
+        * This is meant for screens which are driven through separate display
+        * pipelines represented by &drm_crtc, which might not be running with
+        * genlocked clocks. For tiled panels which are genlocked, like
+        * dual-link LVDS or dual-link DSI, the driver should try to not expose
+        * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+        */
+       struct drm_property_blob *tile_blob_ptr;
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+       /**
+        * @polled:
+        *
+        * Connector polling mode, a combination of
+        *
+        * DRM_CONNECTOR_POLL_HPD
+        *     The connector generates hotplug events and doesn't need to be
+        *     periodically polled. The CONNECT and DISCONNECT flags must not
+        *     be set together with the HPD flag.
+        *
+        * DRM_CONNECTOR_POLL_CONNECT
+        *     Periodically poll the connector for connection.
+        *
+        * DRM_CONNECTOR_POLL_DISCONNECT
+        *     Periodically poll the connector for disconnection.
+        *
+        * Set to 0 for connectors that don't support connection status
+        * discovery.
+        */
+       uint8_t polled;
+
+       /* requested DPMS state */
+       int dpms;
+
+       const struct drm_connector_helper_funcs *helper_private;
+
+       /* forced on connector */
+       struct drm_cmdline_mode cmdline_mode;
+       enum drm_connector_force force;
+       bool override_edid;
+
+#define DRM_CONNECTOR_MAX_ENCODER 3
+       uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+       struct drm_encoder *encoder; /* currently active encoder */
+
+#define MAX_ELD_BYTES  128
+       /* EDID bits */
+       uint8_t eld[MAX_ELD_BYTES];
+       bool dvi_dual;
+       int max_tmds_clock;     /* in MHz */
+       bool latency_present[2];
+       int video_latency[2];   /* [0]: progressive, [1]: interlaced */
+       int audio_latency[2];
+       int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+       unsigned bad_edid_counter;
+
+       /* Flag for raw EDID header corruption - used in Displayport
+        * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+        */
+       bool edid_corrupt;
+
+       struct dentry *debugfs_entry;
+
+       struct drm_connector_state *state;
+
+       /* DisplayID bits */
+       bool has_tile;
+       struct drm_tile_group *tile_group;
+       bool tile_is_single_monitor;
+
+       uint8_t num_h_tile, num_v_tile;
+       uint8_t tile_h_loc, tile_v_loc;
+       uint16_t tile_h_size, tile_v_size;
+};
+
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+
+int drm_connector_init(struct drm_device *dev,
+                      struct drm_connector *connector,
+                      const struct drm_connector_funcs *funcs,
+                      int connector_type);
+int drm_connector_register(struct drm_connector *connector);
+void drm_connector_unregister(struct drm_connector *connector);
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+                                     struct drm_encoder *encoder);
+
+void drm_connector_cleanup(struct drm_connector *connector);
+static inline unsigned drm_connector_index(struct drm_connector *connector)
+{
+       return connector->index;
+}
+
+/**
+ * drm_connector_lookup - lookup connector object
+ * @dev: DRM device
+ * @id: connector object id
+ *
+ * This function looks up the connector object specified by id
+ * add takes a reference to it.
+ */
+static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
+               uint32_t id)
+{
+       struct drm_mode_object *mo;
+       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
+       return mo ? obj_to_connector(mo) : NULL;
+}
+
+/**
+ * drm_connector_reference - incr the connector refcnt
+ * @connector: connector
+ *
+ * This function increments the connector's refcount.
+ */
+static inline void drm_connector_reference(struct drm_connector *connector)
+{
+       drm_mode_object_reference(&connector->base);
+}
+
+/**
+ * drm_connector_unreference - unref a connector
+ * @connector: connector to unref
+ *
+ * This function decrements the connector's refcount and frees it if it drops to zero.
+ */
+static inline void drm_connector_unreference(struct drm_connector *connector)
+{
+       drm_mode_object_unreference(&connector->base);
+}
+
+const char *drm_get_connector_status_name(enum drm_connector_status status);
+const char *drm_get_subpixel_order_name(enum subpixel_order order);
+const char *drm_get_dpms_name(int val);
+const char *drm_get_dvi_i_subconnector_name(int val);
+const char *drm_get_dvi_i_select_name(int val);
+const char *drm_get_tv_subconnector_name(int val);
+const char *drm_get_tv_select_name(int val);
+
+int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+int drm_mode_create_tv_properties(struct drm_device *dev,
+                                 unsigned int num_modes,
+                                 const char * const modes[]);
+int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+                                        const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+                                           const struct edid *edid);
+
+/**
+ * drm_for_each_connector - iterate over all connectors
+ * @connector: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all connectors of @dev.
+ */
+#define drm_for_each_connector(connector, dev) \
+       for (assert_drm_connector_list_read_locked(&(dev)->mode_config),        \
+            connector = list_first_entry(&(dev)->mode_config.connector_list,   \
+                                         struct drm_connector, head);          \
+            &connector->head != (&(dev)->mode_config.connector_list);          \
+            connector = list_next_entry(connector, head))
+
+#endif
diff --git a/include/drm/drm_core.h b/include/drm/drm_core.h
deleted file mode 100644 (file)
index 4e75238..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#define CORE_AUTHOR            "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
-
-#define CORE_NAME              "drm"
-#define CORE_DESC              "DRM shared core routines"
-#define CORE_DATE              "20060810"
-
-#define DRM_IF_MAJOR   1
-#define DRM_IF_MINOR   4
-
-#define CORE_MAJOR     1
-#define CORE_MINOR     1
-#define CORE_PATCHLEVEL 0
index 44e070800b6dd499848d5a9a2a593aaf954576bd..a544b750249373f5ac96bfe42167792493ff37b9 100644 (file)
 #include <uapi/drm/drm_mode.h>
 #include <uapi/drm/drm_fourcc.h>
 #include <drm/drm_modeset_lock.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_property.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_color_mgmt.h>
 
 struct drm_device;
 struct drm_mode_set;
-struct drm_framebuffer;
-struct drm_object_properties;
 struct drm_file;
 struct drm_clip_rect;
 struct device_node;
 struct fence;
 struct edid;
 
-struct drm_mode_object {
-       uint32_t id;
-       uint32_t type;
-       struct drm_object_properties *properties;
-       struct kref refcount;
-       void (*free_cb)(struct kref *kref);
-};
-
-#define DRM_OBJECT_MAX_PROPERTY 24
-struct drm_object_properties {
-       int count, atomic_count;
-       /* NOTE: if we ever start dynamically destroying properties (ie.
-        * not at drm_mode_config_cleanup() time), then we'd have to do
-        * a better job of detaching property from mode objects to avoid
-        * dangling property pointers:
-        */
-       struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
-       /* do not read/write values directly, but use drm_object_property_get_value()
-        * and drm_object_property_set_value():
-        */
-       uint64_t values[DRM_OBJECT_MAX_PROPERTY];
-};
-
 static inline int64_t U642I64(uint64_t val)
 {
        return (int64_t)*((int64_t *)&val);
@@ -78,84 +65,6 @@ static inline uint64_t I642U64(int64_t val)
        return (uint64_t)*((uint64_t *)&val);
 }
 
-/*
- * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
- * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
- * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
- */
-#define DRM_ROTATE_MASK 0x0f
-#define DRM_ROTATE_0   0
-#define DRM_ROTATE_90  1
-#define DRM_ROTATE_180 2
-#define DRM_ROTATE_270 3
-#define DRM_REFLECT_MASK (~DRM_ROTATE_MASK)
-#define DRM_REFLECT_X  4
-#define DRM_REFLECT_Y  5
-
-enum drm_connector_force {
-       DRM_FORCE_UNSPECIFIED,
-       DRM_FORCE_OFF,
-       DRM_FORCE_ON,         /* force on analog part normally */
-       DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
-};
-
-#include <drm/drm_modes.h>
-
-enum drm_connector_status {
-       connector_status_connected = 1,
-       connector_status_disconnected = 2,
-       connector_status_unknown = 3,
-};
-
-enum subpixel_order {
-       SubPixelUnknown = 0,
-       SubPixelHorizontalRGB,
-       SubPixelHorizontalBGR,
-       SubPixelVerticalRGB,
-       SubPixelVerticalBGR,
-       SubPixelNone,
-};
-
-#define DRM_COLOR_FORMAT_RGB444                (1<<0)
-#define DRM_COLOR_FORMAT_YCRCB444      (1<<1)
-#define DRM_COLOR_FORMAT_YCRCB422      (1<<2)
-
-#define DRM_BUS_FLAG_DE_LOW            (1<<0)
-#define DRM_BUS_FLAG_DE_HIGH           (1<<1)
-/* drive data on pos. edge */
-#define DRM_BUS_FLAG_PIXDATA_POSEDGE   (1<<2)
-/* drive data on neg. edge */
-#define DRM_BUS_FLAG_PIXDATA_NEGEDGE   (1<<3)
-
-/*
- * Describes a given display (e.g. CRT or flat panel) and its limitations.
- */
-struct drm_display_info {
-       char name[DRM_DISPLAY_INFO_LEN];
-
-       /* Physical size */
-        unsigned int width_mm;
-       unsigned int height_mm;
-
-       /* Clock limits FIXME: storage format */
-       unsigned int min_vfreq, max_vfreq;
-       unsigned int min_hfreq, max_hfreq;
-       unsigned int pixel_clock;
-       unsigned int bpc;
-
-       enum subpixel_order subpixel_order;
-       u32 color_formats;
-
-       const u32 *bus_formats;
-       unsigned int num_bus_formats;
-       u32 bus_flags;
-
-       /* Mask of supported hdmi deep color modes */
-       u8 edid_hdmi_dc_modes;
-
-       u8 cea_rev;
-};
-
 /* data corresponds to displayid vend/prod/serial */
 struct drm_tile_group {
        struct kref refcount;
@@ -164,130 +73,7 @@ struct drm_tile_group {
        u8 group_data[8];
 };
 
-/**
- * struct drm_framebuffer_funcs - framebuffer hooks
- */
-struct drm_framebuffer_funcs {
-       /**
-        * @destroy:
-        *
-        * Clean up framebuffer resources, specifically also unreference the
-        * backing storage. The core guarantees to call this function for every
-        * framebuffer successfully created by ->fb_create() in
-        * &drm_mode_config_funcs. Drivers must also call
-        * drm_framebuffer_cleanup() to release DRM core resources for this
-        * framebuffer.
-        */
-       void (*destroy)(struct drm_framebuffer *framebuffer);
-
-       /**
-        * @create_handle:
-        *
-        * Create a buffer handle in the driver-specific buffer manager (either
-        * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
-        * the core to implement the GETFB IOCTL, which returns (for
-        * sufficiently priviledged user) also a native buffer handle. This can
-        * be used for seamless transitions between modesetting clients by
-        * copying the current screen contents to a private buffer and blending
-        * between that and the new contents.
-        *
-        * GEM based drivers should call drm_gem_handle_create() to create the
-        * handle.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*create_handle)(struct drm_framebuffer *fb,
-                            struct drm_file *file_priv,
-                            unsigned int *handle);
-       /**
-        * @dirty:
-        *
-        * Optional callback for the dirty fb IOCTL.
-        *
-        * Userspace can notify the driver via this callback that an area of the
-        * framebuffer has changed and should be flushed to the display
-        * hardware. This can also be used internally, e.g. by the fbdev
-        * emulation, though that's not the case currently.
-        *
-        * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
-        * for more information as all the semantics and arguments have a one to
-        * one mapping on this function.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*dirty)(struct drm_framebuffer *framebuffer,
-                    struct drm_file *file_priv, unsigned flags,
-                    unsigned color, struct drm_clip_rect *clips,
-                    unsigned num_clips);
-};
-
-struct drm_framebuffer {
-       struct drm_device *dev;
-       /*
-        * Note that the fb is refcounted for the benefit of driver internals,
-        * for example some hw, disabling a CRTC/plane is asynchronous, and
-        * scanout does not actually complete until the next vblank.  So some
-        * cleanup (like releasing the reference(s) on the backing GEM bo(s))
-        * should be deferred.  In cases like this, the driver would like to
-        * hold a ref to the fb even though it has already been removed from
-        * userspace perspective.
-        * The refcount is stored inside the mode object.
-        */
-       /*
-        * Place on the dev->mode_config.fb_list, access protected by
-        * dev->mode_config.fb_lock.
-        */
-       struct list_head head;
-       struct drm_mode_object base;
-       const struct drm_framebuffer_funcs *funcs;
-       unsigned int pitches[4];
-       unsigned int offsets[4];
-       uint64_t modifier[4];
-       unsigned int width;
-       unsigned int height;
-       /* depth can be 15 or 16 */
-       unsigned int depth;
-       int bits_per_pixel;
-       int flags;
-       uint32_t pixel_format; /* fourcc format */
-       int hot_x;
-       int hot_y;
-       struct list_head filp_head;
-};
-
-struct drm_property_blob {
-       struct drm_mode_object base;
-       struct drm_device *dev;
-       struct list_head head_global;
-       struct list_head head_file;
-       size_t length;
-       unsigned char data[];
-};
-
-struct drm_property_enum {
-       uint64_t value;
-       struct list_head head;
-       char name[DRM_PROP_NAME_LEN];
-};
-
-struct drm_property {
-       struct list_head head;
-       struct drm_mode_object base;
-       uint32_t flags;
-       char name[DRM_PROP_NAME_LEN];
-       uint32_t num_values;
-       uint64_t *values;
-       struct drm_device *dev;
-
-       struct list_head enum_list;
-};
-
 struct drm_crtc;
-struct drm_connector;
 struct drm_encoder;
 struct drm_pending_vblank_event;
 struct drm_plane;
@@ -296,7 +82,6 @@ struct drm_atomic_state;
 
 struct drm_crtc_helper_funcs;
 struct drm_encoder_helper_funcs;
-struct drm_connector_helper_funcs;
 struct drm_plane_helper_funcs;
 
 /**
@@ -545,16 +330,6 @@ struct drm_crtc_funcs {
         * counter and timestamp tracking though, e.g. if they have accurate
         * timestamp registers in hardware.
         *
-        * FIXME:
-        *
-        * Up to that point drivers need to manage events themselves and can use
-        * even->base.list freely for that. Specifically they need to ensure
-        * that they don't send out page flip (or vblank) events for which the
-        * corresponding drm file has been closed already. The drm core
-        * unfortunately does not (yet) take care of that. Therefore drivers
-        * currently must clean up and release pending events in their
-        * ->preclose driver function.
-        *
         * This callback is optional.
         *
         * NOTE:
@@ -580,6 +355,24 @@ struct drm_crtc_funcs {
                         struct drm_pending_vblank_event *event,
                         uint32_t flags);
 
+       /**
+        * @page_flip_target:
+        *
+        * Same as @page_flip but with an additional parameter specifying the
+        * absolute target vertical blank period (as reported by
+        * drm_crtc_vblank_count()) when the flip should take effect.
+        *
+        * Note that the core code calls drm_crtc_vblank_get before this entry
+        * point, and will call drm_crtc_vblank_put if this entry point returns
+        * any non-0 error code. It's the driver's responsibility to call
+        * drm_crtc_vblank_put after this entry point returns 0, typically when
+        * the flip completes.
+        */
+       int (*page_flip_target)(struct drm_crtc *crtc,
+                               struct drm_framebuffer *fb,
+                               struct drm_pending_vblank_event *event,
+                               uint32_t flags, uint32_t target);
+
        /**
         * @set_property:
         *
@@ -753,955 +546,17 @@ struct drm_crtc_funcs {
  * @x: x position on screen
  * @y: y position on screen
  * @funcs: CRTC control functions
- * @gamma_size: size of gamma ramp
- * @gamma_store: gamma ramp values
- * @helper_private: mid-layer private data
- * @properties: property tracking for this CRTC
- *
- * Each CRTC may have one or more connectors associated with it.  This structure
- * allows the CRTC to be controlled.
- */
-struct drm_crtc {
-       struct drm_device *dev;
-       struct device_node *port;
-       struct list_head head;
-
-       char *name;
-
-       /**
-        * @mutex:
-        *
-        * This provides a read lock for the overall crtc state (mode, dpms
-        * state, ...) and a write lock for everything which can be update
-        * without a full modeset (fb, cursor data, crtc properties ...). Full
-        * modeset also need to grab dev->mode_config.connection_mutex.
-        */
-       struct drm_modeset_lock mutex;
-
-       struct drm_mode_object base;
-
-       /* primary and cursor planes for CRTC */
-       struct drm_plane *primary;
-       struct drm_plane *cursor;
-
-       /**
-        * @index: Position inside the mode_config.list, can be used as an array
-        * index. It is invariant over the lifetime of the CRTC.
-        */
-       unsigned index;
-
-       /* position of cursor plane on crtc */
-       int cursor_x;
-       int cursor_y;
-
-       bool enabled;
-
-       /* Requested mode from modesetting. */
-       struct drm_display_mode mode;
-
-       /* Programmed mode in hw, after adjustments for encoders,
-        * crtc, panel scaling etc. Needed for timestamping etc.
-        */
-       struct drm_display_mode hwmode;
-
-       int x, y;
-       const struct drm_crtc_funcs *funcs;
-
-       /* Legacy FB CRTC gamma size for reporting to userspace */
-       uint32_t gamma_size;
-       uint16_t *gamma_store;
-
-       /* if you are using the helper */
-       const struct drm_crtc_helper_funcs *helper_private;
-
-       struct drm_object_properties properties;
-
-       /**
-        * @state:
-        *
-        * Current atomic state for this CRTC.
-        */
-       struct drm_crtc_state *state;
-
-       /**
-        * @commit_list:
-        *
-        * List of &drm_crtc_commit structures tracking pending commits.
-        * Protected by @commit_lock. This list doesn't hold its own full
-        * reference, but burrows it from the ongoing commit. Commit entries
-        * must be removed from this list once the commit is fully completed,
-        * but before it's correspoding &drm_atomic_state gets destroyed.
-        */
-       struct list_head commit_list;
-
-       /**
-        * @commit_lock:
-        *
-        * Spinlock to protect @commit_list.
-        */
-       spinlock_t commit_lock;
-
-       /**
-        * @acquire_ctx:
-        *
-        * Per-CRTC implicit acquire context used by atomic drivers for legacy
-        * IOCTLs, so that atomic drivers can get at the locking acquire
-        * context.
-        */
-       struct drm_modeset_acquire_ctx *acquire_ctx;
-};
-
-/**
- * struct drm_connector_state - mutable connector state
- * @connector: backpointer to the connector
- * @crtc: CRTC to connect connector to, NULL if disabled
- * @best_encoder: can be used by helpers and drivers to select the encoder
- * @state: backpointer to global drm_atomic_state
- */
-struct drm_connector_state {
-       struct drm_connector *connector;
-
-       struct drm_crtc *crtc;  /* do not write directly, use drm_atomic_set_crtc_for_connector() */
-
-       struct drm_encoder *best_encoder;
-
-       struct drm_atomic_state *state;
-};
-
-/**
- * struct drm_connector_funcs - control connectors on a given device
- *
- * Each CRTC may have one or more connectors attached to it.  The functions
- * below allow the core DRM code to control connectors, enumerate available modes,
- * etc.
- */
-struct drm_connector_funcs {
-       /**
-        * @dpms:
-        *
-        * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
-        * is exposed as a standard property on the connector, but diverted to
-        * this callback in the drm core. Note that atomic drivers don't
-        * implement the 4 level DPMS support on the connector any more, but
-        * instead only have an on/off "ACTIVE" property on the CRTC object.
-        *
-        * Drivers implementing atomic modeset should use
-        * drm_atomic_helper_connector_dpms() to implement this hook.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*dpms)(struct drm_connector *connector, int mode);
-
-       /**
-        * @reset:
-        *
-        * Reset connector hardware and software state to off. This function isn't
-        * called by the core directly, only through drm_mode_config_reset().
-        * It's not a helper hook only for historical reasons.
-        *
-        * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
-        * atomic state using this hook.
-        */
-       void (*reset)(struct drm_connector *connector);
-
-       /**
-        * @detect:
-        *
-        * Check to see if anything is attached to the connector. The parameter
-        * force is set to false whilst polling, true when checking the
-        * connector due to a user request. force can be used by the driver to
-        * avoid expensive, destructive operations during automated probing.
-        *
-        * FIXME:
-        *
-        * Note that this hook is only called by the probe helper. It's not in
-        * the helper library vtable purely for historical reasons. The only DRM
-        * core entry point to probe connector state is @fill_modes.
-        *
-        * RETURNS:
-        *
-        * drm_connector_status indicating the connector's status.
-        */
-       enum drm_connector_status (*detect)(struct drm_connector *connector,
-                                           bool force);
-
-       /**
-        * @force:
-        *
-        * This function is called to update internal encoder state when the
-        * connector is forced to a certain state by userspace, either through
-        * the sysfs interfaces or on the kernel cmdline. In that case the
-        * @detect callback isn't called.
-        *
-        * FIXME:
-        *
-        * Note that this hook is only called by the probe helper. It's not in
-        * the helper library vtable purely for historical reasons. The only DRM
-        * core entry point to probe connector state is @fill_modes.
-        */
-       void (*force)(struct drm_connector *connector);
-
-       /**
-        * @fill_modes:
-        *
-        * Entry point for output detection and basic mode validation. The
-        * driver should reprobe the output if needed (e.g. when hotplug
-        * handling is unreliable), add all detected modes to connector->modes
-        * and filter out any the device can't support in any configuration. It
-        * also needs to filter out any modes wider or higher than the
-        * parameters max_width and max_height indicate.
-        *
-        * The drivers must also prune any modes no longer valid from
-        * connector->modes. Furthermore it must update connector->status and
-        * connector->edid.  If no EDID has been received for this output
-        * connector->edid must be NULL.
-        *
-        * Drivers using the probe helpers should use
-        * drm_helper_probe_single_connector_modes() or
-        * drm_helper_probe_single_connector_modes_nomerge() to implement this
-        * function.
-        *
-        * RETURNS:
-        *
-        * The number of modes detected and filled into connector->modes.
-        */
-       int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
-
-       /**
-        * @set_property:
-        *
-        * This is the legacy entry point to update a property attached to the
-        * connector.
-        *
-        * Drivers implementing atomic modeset should use
-        * drm_atomic_helper_connector_set_property() to implement this hook.
-        *
-        * This callback is optional if the driver does not support any legacy
-        * driver-private properties.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*set_property)(struct drm_connector *connector, struct drm_property *property,
-                            uint64_t val);
-
-       /**
-        * @late_register:
-        *
-        * This optional hook can be used to register additional userspace
-        * interfaces attached to the connector, light backlight control, i2c,
-        * DP aux or similar interfaces. It is called late in the driver load
-        * sequence from drm_connector_register() when registering all the
-        * core drm connector interfaces. Everything added from this callback
-        * should be unregistered in the early_unregister callback.
-        *
-        * Returns:
-        *
-        * 0 on success, or a negative error code on failure.
-        */
-       int (*late_register)(struct drm_connector *connector);
-
-       /**
-        * @early_unregister:
-        *
-        * This optional hook should be used to unregister the additional
-        * userspace interfaces attached to the connector from
-        * late_unregister(). It is called from drm_connector_unregister(),
-        * early in the driver unload sequence to disable userspace access
-        * before data structures are torndown.
-        */
-       void (*early_unregister)(struct drm_connector *connector);
-
-       /**
-        * @destroy:
-        *
-        * Clean up connector resources. This is called at driver unload time
-        * through drm_mode_config_cleanup(). It can also be called at runtime
-        * when a connector is being hot-unplugged for drivers that support
-        * connector hotplugging (e.g. DisplayPort MST).
-        */
-       void (*destroy)(struct drm_connector *connector);
-
-       /**
-        * @atomic_duplicate_state:
-        *
-        * Duplicate the current atomic state for this connector and return it.
-        * The core and helpers gurantee that any atomic state duplicated with
-        * this hook and still owned by the caller (i.e. not transferred to the
-        * driver by calling ->atomic_commit() from struct
-        * &drm_mode_config_funcs) will be cleaned up by calling the
-        * @atomic_destroy_state hook in this structure.
-        *
-        * Atomic drivers which don't subclass struct &drm_connector_state should use
-        * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
-        * state structure to extend it with driver-private state should use
-        * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
-        * duplicated in a consistent fashion across drivers.
-        *
-        * It is an error to call this hook before connector->state has been
-        * initialized correctly.
-        *
-        * NOTE:
-        *
-        * If the duplicate state references refcounted resources this hook must
-        * acquire a reference for each of them. The driver must release these
-        * references again in @atomic_destroy_state.
-        *
-        * RETURNS:
-        *
-        * Duplicated atomic state or NULL when the allocation failed.
-        */
-       struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
-
-       /**
-        * @atomic_destroy_state:
-        *
-        * Destroy a state duplicated with @atomic_duplicate_state and release
-        * or unreference all resources it references
-        */
-       void (*atomic_destroy_state)(struct drm_connector *connector,
-                                    struct drm_connector_state *state);
-
-       /**
-        * @atomic_set_property:
-        *
-        * Decode a driver-private property value and store the decoded value
-        * into the passed-in state structure. Since the atomic core decodes all
-        * standardized properties (even for extensions beyond the core set of
-        * properties which might not be implemented by all drivers) this
-        * requires drivers to subclass the state structure.
-        *
-        * Such driver-private properties should really only be implemented for
-        * truly hardware/vendor specific state. Instead it is preferred to
-        * standardize atomic extension and decode the properties used to expose
-        * such an extension in the core.
-        *
-        * Do not call this function directly, use
-        * drm_atomic_connector_set_property() instead.
-        *
-        * This callback is optional if the driver does not support any
-        * driver-private atomic properties.
-        *
-        * NOTE:
-        *
-        * This function is called in the state assembly phase of atomic
-        * modesets, which can be aborted for any reason (including on
-        * userspace's request to just check whether a configuration would be
-        * possible). Drivers MUST NOT touch any persistent state (hardware or
-        * software) or data structures except the passed in @state parameter.
-        *
-        * Also since userspace controls in which order properties are set this
-        * function must not do any input validation (since the state update is
-        * incomplete and hence likely inconsistent). Instead any such input
-        * validation must be done in the various atomic_check callbacks.
-        *
-        * RETURNS:
-        *
-        * 0 if the property has been found, -EINVAL if the property isn't
-        * implemented by the driver (which shouldn't ever happen, the core only
-        * asks for properties attached to this connector). No other validation
-        * is allowed by the driver. The core already checks that the property
-        * value is within the range (integer, valid enum value, ...) the driver
-        * set when registering the property.
-        */
-       int (*atomic_set_property)(struct drm_connector *connector,
-                                  struct drm_connector_state *state,
-                                  struct drm_property *property,
-                                  uint64_t val);
-
-       /**
-        * @atomic_get_property:
-        *
-        * Reads out the decoded driver-private property. This is used to
-        * implement the GETCONNECTOR IOCTL.
-        *
-        * Do not call this function directly, use
-        * drm_atomic_connector_get_property() instead.
-        *
-        * This callback is optional if the driver does not support any
-        * driver-private atomic properties.
-        *
-        * RETURNS:
-        *
-        * 0 on success, -EINVAL if the property isn't implemented by the
-        * driver (which shouldn't ever happen, the core only asks for
-        * properties attached to this connector).
-        */
-       int (*atomic_get_property)(struct drm_connector *connector,
-                                  const struct drm_connector_state *state,
-                                  struct drm_property *property,
-                                  uint64_t *val);
-};
-
-/**
- * struct drm_encoder_funcs - encoder controls
- *
- * Encoders sit between CRTCs and connectors.
- */
-struct drm_encoder_funcs {
-       /**
-        * @reset:
-        *
-        * Reset encoder hardware and software state to off. This function isn't
-        * called by the core directly, only through drm_mode_config_reset().
-        * It's not a helper hook only for historical reasons.
-        */
-       void (*reset)(struct drm_encoder *encoder);
-
-       /**
-        * @destroy:
-        *
-        * Clean up encoder resources. This is only called at driver unload time
-        * through drm_mode_config_cleanup() since an encoder cannot be
-        * hotplugged in DRM.
-        */
-       void (*destroy)(struct drm_encoder *encoder);
-
-       /**
-        * @late_register:
-        *
-        * This optional hook can be used to register additional userspace
-        * interfaces attached to the encoder like debugfs interfaces.
-        * It is called late in the driver load sequence from drm_dev_register().
-        * Everything added from this callback should be unregistered in
-        * the early_unregister callback.
-        *
-        * Returns:
-        *
-        * 0 on success, or a negative error code on failure.
-        */
-       int (*late_register)(struct drm_encoder *encoder);
-
-       /**
-        * @early_unregister:
-        *
-        * This optional hook should be used to unregister the additional
-        * userspace interfaces attached to the encoder from
-        * late_unregister(). It is called from drm_dev_unregister(),
-        * early in the driver unload sequence to disable userspace access
-        * before data structures are torndown.
-        */
-       void (*early_unregister)(struct drm_encoder *encoder);
-};
-
-#define DRM_CONNECTOR_MAX_ENCODER 3
-
-/**
- * struct drm_encoder - central DRM encoder structure
- * @dev: parent DRM device
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
- * @possible_crtcs: bitmask of potential CRTC bindings
- * @possible_clones: bitmask of potential sibling encoders for cloning
- * @crtc: currently bound CRTC
- * @bridge: bridge associated to the encoder
- * @funcs: control functions
- * @helper_private: mid-layer private data
- *
- * CRTCs drive pixels to encoders, which convert them into signals
- * appropriate for a given connector or set of connectors.
- */
-struct drm_encoder {
-       struct drm_device *dev;
-       struct list_head head;
-
-       struct drm_mode_object base;
-       char *name;
-       int encoder_type;
-
-       /**
-        * @index: Position inside the mode_config.list, can be used as an array
-        * index. It is invariant over the lifetime of the encoder.
-        */
-       unsigned index;
-
-       uint32_t possible_crtcs;
-       uint32_t possible_clones;
-
-       struct drm_crtc *crtc;
-       struct drm_bridge *bridge;
-       const struct drm_encoder_funcs *funcs;
-       const struct drm_encoder_helper_funcs *helper_private;
-};
-
-/* should we poll this connector for connects and disconnects */
-/* hot plug detectable */
-#define DRM_CONNECTOR_POLL_HPD (1 << 0)
-/* poll for connections */
-#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
-/* can cleanly poll for disconnections without flickering the screen */
-/* DACs should rarely do this without a lot of testing */
-#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
-
-#define MAX_ELD_BYTES  128
-
-/**
- * struct drm_connector - central DRM connector control structure
- * @dev: parent DRM device
- * @kdev: kernel device for sysfs attributes
- * @attr: sysfs attributes
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
- * @connector_type_id: index into connector type enum
- * @interlace_allowed: can this connector handle interlaced modes?
- * @doublescan_allowed: can this connector handle doublescan?
- * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
- * @modes: modes available on this connector (from fill_modes() + user)
- * @status: one of the drm_connector_status enums (connected, not, or unknown)
- * @probed_modes: list of modes derived directly from the display
- * @display_info: information about attached display (e.g. from EDID)
- * @funcs: connector control functions
- * @edid_blob_ptr: DRM property containing EDID if present
- * @properties: property tracking for this connector
- * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
- * @dpms: current dpms state
- * @helper_private: mid-layer private data
- * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
- * @force: a %DRM_FORCE_<foo> state for forced mode sets
- * @override_edid: has the EDID been overwritten through debugfs for testing?
- * @encoder_ids: valid encoders for this connector
- * @encoder: encoder driving this connector, if any
- * @eld: EDID-like data, if present
- * @dvi_dual: dual link DVI, if found
- * @max_tmds_clock: max clock rate, if found
- * @latency_present: AV delay info from ELD, if found
- * @video_latency: video latency info from ELD, if found
- * @audio_latency: audio latency info from ELD, if found
- * @null_edid_counter: track sinks that give us all zeros for the EDID
- * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
- * @edid_corrupt: indicates whether the last read EDID was corrupt
- * @debugfs_entry: debugfs directory for this connector
- * @state: current atomic state for this connector
- * @has_tile: is this connector connected to a tiled monitor
- * @tile_group: tile group for the connected monitor
- * @tile_is_single_monitor: whether the tile is one monitor housing
- * @num_h_tile: number of horizontal tiles in the tile group
- * @num_v_tile: number of vertical tiles in the tile group
- * @tile_h_loc: horizontal location of this tile
- * @tile_v_loc: vertical location of this tile
- * @tile_h_size: horizontal size of this tile.
- * @tile_v_size: vertical size of this tile.
- *
- * Each connector may be connected to one or more CRTCs, or may be clonable by
- * another connector if they can share a CRTC.  Each connector also has a specific
- * position in the broader display (referred to as a 'screen' though it could
- * span multiple monitors).
- */
-struct drm_connector {
-       struct drm_device *dev;
-       struct device *kdev;
-       struct device_attribute *attr;
-       struct list_head head;
-
-       struct drm_mode_object base;
-
-       char *name;
-
-       /**
-        * @index: Compacted connector index, which matches the position inside
-        * the mode_config.list for drivers not supporting hot-add/removing. Can
-        * be used as an array index. It is invariant over the lifetime of the
-        * connector.
-        */
-       unsigned index;
-
-       int connector_type;
-       int connector_type_id;
-       bool interlace_allowed;
-       bool doublescan_allowed;
-       bool stereo_allowed;
-       bool registered;
-       struct list_head modes; /* list of modes on this connector */
-
-       enum drm_connector_status status;
-
-       /* these are modes added by probing with DDC or the BIOS */
-       struct list_head probed_modes;
-
-       struct drm_display_info display_info;
-       const struct drm_connector_funcs *funcs;
-
-       struct drm_property_blob *edid_blob_ptr;
-       struct drm_object_properties properties;
-
-       /**
-        * @path_blob_ptr:
-        *
-        * DRM blob property data for the DP MST path property.
-        */
-       struct drm_property_blob *path_blob_ptr;
-
-       /**
-        * @tile_blob_ptr:
-        *
-        * DRM blob property data for the tile property (used mostly by DP MST).
-        * This is meant for screens which are driven through separate display
-        * pipelines represented by &drm_crtc, which might not be running with
-        * genlocked clocks. For tiled panels which are genlocked, like
-        * dual-link LVDS or dual-link DSI, the driver should try to not expose
-        * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
-        */
-       struct drm_property_blob *tile_blob_ptr;
-
-       uint8_t polled; /* DRM_CONNECTOR_POLL_* */
-
-       /* requested DPMS state */
-       int dpms;
-
-       const struct drm_connector_helper_funcs *helper_private;
-
-       /* forced on connector */
-       struct drm_cmdline_mode cmdline_mode;
-       enum drm_connector_force force;
-       bool override_edid;
-       uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
-       struct drm_encoder *encoder; /* currently active encoder */
-
-       /* EDID bits */
-       uint8_t eld[MAX_ELD_BYTES];
-       bool dvi_dual;
-       int max_tmds_clock;     /* in MHz */
-       bool latency_present[2];
-       int video_latency[2];   /* [0]: progressive, [1]: interlaced */
-       int audio_latency[2];
-       int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
-       unsigned bad_edid_counter;
-
-       /* Flag for raw EDID header corruption - used in Displayport
-        * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
-        */
-       bool edid_corrupt;
-
-       struct dentry *debugfs_entry;
-
-       struct drm_connector_state *state;
-
-       /* DisplayID bits */
-       bool has_tile;
-       struct drm_tile_group *tile_group;
-       bool tile_is_single_monitor;
-
-       uint8_t num_h_tile, num_v_tile;
-       uint8_t tile_h_loc, tile_v_loc;
-       uint16_t tile_h_size, tile_v_size;
-};
-
-/**
- * struct drm_plane_state - mutable plane state
- * @plane: backpointer to the plane
- * @crtc: currently bound CRTC, NULL if disabled
- * @fb: currently bound framebuffer
- * @fence: optional fence to wait for before scanning out @fb
- * @crtc_x: left position of visible portion of plane on crtc
- * @crtc_y: upper position of visible portion of plane on crtc
- * @crtc_w: width of visible portion of plane on crtc
- * @crtc_h: height of visible portion of plane on crtc
- * @src_x: left position of visible portion of plane within
- *     plane (in 16.16)
- * @src_y: upper position of visible portion of plane within
- *     plane (in 16.16)
- * @src_w: width of visible portion of plane (in 16.16)
- * @src_h: height of visible portion of plane (in 16.16)
- * @rotation: rotation of the plane
- * @zpos: priority of the given plane on crtc (optional)
- * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- *     where N is the number of active planes for given crtc
- * @state: backpointer to global drm_atomic_state
- */
-struct drm_plane_state {
-       struct drm_plane *plane;
-
-       struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
-       struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
-       struct fence *fence;
-
-       /* Signed dest location allows it to be partially off screen */
-       int32_t crtc_x, crtc_y;
-       uint32_t crtc_w, crtc_h;
-
-       /* Source values are 16.16 fixed point */
-       uint32_t src_x, src_y;
-       uint32_t src_h, src_w;
-
-       /* Plane rotation */
-       unsigned int rotation;
-
-       /* Plane zpos */
-       unsigned int zpos;
-       unsigned int normalized_zpos;
-
-       struct drm_atomic_state *state;
-};
-
-
-/**
- * struct drm_plane_funcs - driver plane control functions
- */
-struct drm_plane_funcs {
-       /**
-        * @update_plane:
-        *
-        * This is the legacy entry point to enable and configure the plane for
-        * the given CRTC and framebuffer. It is never called to disable the
-        * plane, i.e. the passed-in crtc and fb paramters are never NULL.
-        *
-        * The source rectangle in frame buffer memory coordinates is given by
-        * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
-        * values). Devices that don't support subpixel plane coordinates can
-        * ignore the fractional part.
-        *
-        * The destination rectangle in CRTC coordinates is given by the
-        * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
-        * Devices scale the source rectangle to the destination rectangle. If
-        * scaling is not supported, and the source rectangle size doesn't match
-        * the destination rectangle size, the driver must return a
-        * -<errorname>EINVAL</errorname> error.
-        *
-        * Drivers implementing atomic modeset should use
-        * drm_atomic_helper_update_plane() to implement this hook.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*update_plane)(struct drm_plane *plane,
-                           struct drm_crtc *crtc, struct drm_framebuffer *fb,
-                           int crtc_x, int crtc_y,
-                           unsigned int crtc_w, unsigned int crtc_h,
-                           uint32_t src_x, uint32_t src_y,
-                           uint32_t src_w, uint32_t src_h);
-
-       /**
-        * @disable_plane:
-        *
-        * This is the legacy entry point to disable the plane. The DRM core
-        * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
-        * with the frame buffer ID set to 0.  Disabled planes must not be
-        * processed by the CRTC.
-        *
-        * Drivers implementing atomic modeset should use
-        * drm_atomic_helper_disable_plane() to implement this hook.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*disable_plane)(struct drm_plane *plane);
-
-       /**
-        * @destroy:
-        *
-        * Clean up plane resources. This is only called at driver unload time
-        * through drm_mode_config_cleanup() since a plane cannot be hotplugged
-        * in DRM.
-        */
-       void (*destroy)(struct drm_plane *plane);
-
-       /**
-        * @reset:
-        *
-        * Reset plane hardware and software state to off. This function isn't
-        * called by the core directly, only through drm_mode_config_reset().
-        * It's not a helper hook only for historical reasons.
-        *
-        * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
-        * atomic state using this hook.
-        */
-       void (*reset)(struct drm_plane *plane);
-
-       /**
-        * @set_property:
-        *
-        * This is the legacy entry point to update a property attached to the
-        * plane.
-        *
-        * Drivers implementing atomic modeset should use
-        * drm_atomic_helper_plane_set_property() to implement this hook.
-        *
-        * This callback is optional if the driver does not support any legacy
-        * driver-private properties.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*set_property)(struct drm_plane *plane,
-                           struct drm_property *property, uint64_t val);
-
-       /**
-        * @atomic_duplicate_state:
-        *
-        * Duplicate the current atomic state for this plane and return it.
-        * The core and helpers gurantee that any atomic state duplicated with
-        * this hook and still owned by the caller (i.e. not transferred to the
-        * driver by calling ->atomic_commit() from struct
-        * &drm_mode_config_funcs) will be cleaned up by calling the
-        * @atomic_destroy_state hook in this structure.
-        *
-        * Atomic drivers which don't subclass struct &drm_plane_state should use
-        * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
-        * state structure to extend it with driver-private state should use
-        * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
-        * duplicated in a consistent fashion across drivers.
-        *
-        * It is an error to call this hook before plane->state has been
-        * initialized correctly.
-        *
-        * NOTE:
-        *
-        * If the duplicate state references refcounted resources this hook must
-        * acquire a reference for each of them. The driver must release these
-        * references again in @atomic_destroy_state.
-        *
-        * RETURNS:
-        *
-        * Duplicated atomic state or NULL when the allocation failed.
-        */
-       struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
-
-       /**
-        * @atomic_destroy_state:
-        *
-        * Destroy a state duplicated with @atomic_duplicate_state and release
-        * or unreference all resources it references
-        */
-       void (*atomic_destroy_state)(struct drm_plane *plane,
-                                    struct drm_plane_state *state);
-
-       /**
-        * @atomic_set_property:
-        *
-        * Decode a driver-private property value and store the decoded value
-        * into the passed-in state structure. Since the atomic core decodes all
-        * standardized properties (even for extensions beyond the core set of
-        * properties which might not be implemented by all drivers) this
-        * requires drivers to subclass the state structure.
-        *
-        * Such driver-private properties should really only be implemented for
-        * truly hardware/vendor specific state. Instead it is preferred to
-        * standardize atomic extension and decode the properties used to expose
-        * such an extension in the core.
-        *
-        * Do not call this function directly, use
-        * drm_atomic_plane_set_property() instead.
-        *
-        * This callback is optional if the driver does not support any
-        * driver-private atomic properties.
-        *
-        * NOTE:
-        *
-        * This function is called in the state assembly phase of atomic
-        * modesets, which can be aborted for any reason (including on
-        * userspace's request to just check whether a configuration would be
-        * possible). Drivers MUST NOT touch any persistent state (hardware or
-        * software) or data structures except the passed in @state parameter.
-        *
-        * Also since userspace controls in which order properties are set this
-        * function must not do any input validation (since the state update is
-        * incomplete and hence likely inconsistent). Instead any such input
-        * validation must be done in the various atomic_check callbacks.
-        *
-        * RETURNS:
-        *
-        * 0 if the property has been found, -EINVAL if the property isn't
-        * implemented by the driver (which shouldn't ever happen, the core only
-        * asks for properties attached to this plane). No other validation is
-        * allowed by the driver. The core already checks that the property
-        * value is within the range (integer, valid enum value, ...) the driver
-        * set when registering the property.
-        */
-       int (*atomic_set_property)(struct drm_plane *plane,
-                                  struct drm_plane_state *state,
-                                  struct drm_property *property,
-                                  uint64_t val);
-
-       /**
-        * @atomic_get_property:
-        *
-        * Reads out the decoded driver-private property. This is used to
-        * implement the GETPLANE IOCTL.
-        *
-        * Do not call this function directly, use
-        * drm_atomic_plane_get_property() instead.
-        *
-        * This callback is optional if the driver does not support any
-        * driver-private atomic properties.
-        *
-        * RETURNS:
-        *
-        * 0 on success, -EINVAL if the property isn't implemented by the
-        * driver (which should never happen, the core only asks for
-        * properties attached to this plane).
-        */
-       int (*atomic_get_property)(struct drm_plane *plane,
-                                  const struct drm_plane_state *state,
-                                  struct drm_property *property,
-                                  uint64_t *val);
-       /**
-        * @late_register:
-        *
-        * This optional hook can be used to register additional userspace
-        * interfaces attached to the plane like debugfs interfaces.
-        * It is called late in the driver load sequence from drm_dev_register().
-        * Everything added from this callback should be unregistered in
-        * the early_unregister callback.
-        *
-        * Returns:
-        *
-        * 0 on success, or a negative error code on failure.
-        */
-       int (*late_register)(struct drm_plane *plane);
-
-       /**
-        * @early_unregister:
-        *
-        * This optional hook should be used to unregister the additional
-        * userspace interfaces attached to the plane from
-        * late_unregister(). It is called from drm_dev_unregister(),
-        * early in the driver unload sequence to disable userspace access
-        * before data structures are torndown.
-        */
-       void (*early_unregister)(struct drm_plane *plane);
-};
-
-enum drm_plane_type {
-       DRM_PLANE_TYPE_OVERLAY,
-       DRM_PLANE_TYPE_PRIMARY,
-       DRM_PLANE_TYPE_CURSOR,
-};
-
-
-/**
- * struct drm_plane - central DRM plane control structure
- * @dev: DRM device this plane belongs to
- * @head: for list management
- * @name: human readable name, can be overwritten by the driver
- * @base: base mode object
- * @possible_crtcs: pipes this plane can be bound to
- * @format_types: array of formats supported by this plane
- * @format_count: number of formats supported
- * @format_default: driver hasn't supplied supported formats for the plane
- * @crtc: currently bound CRTC
- * @fb: currently bound fb
- * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
- *     drm_mode_set_config_internal() to implement correct refcounting.
- * @funcs: helper functions
- * @properties: property tracking for this plane
- * @type: type of plane (overlay, primary, cursor)
- * @state: current atomic state for this plane
- * @zpos_property: zpos property for this plane
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
  * @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
+ *
+ * Each CRTC may have one or more connectors associated with it.  This structure
+ * allows the CRTC to be controlled.
  */
-struct drm_plane {
+struct drm_crtc {
        struct drm_device *dev;
+       struct device_node *port;
        struct list_head head;
 
        char *name;
@@ -1709,343 +564,86 @@ struct drm_plane {
        /**
         * @mutex:
         *
-        * Protects modeset plane state, together with the mutex of &drm_crtc
-        * this plane is linked to (when active, getting actived or getting
-        * disabled).
+        * This provides a read lock for the overall crtc state (mode, dpms
+        * state, ...) and a write lock for everything which can be update
+        * without a full modeset (fb, cursor data, crtc properties ...). Full
+        * modeset also need to grab dev->mode_config.connection_mutex.
         */
        struct drm_modeset_lock mutex;
 
        struct drm_mode_object base;
 
-       uint32_t possible_crtcs;
-       uint32_t *format_types;
-       unsigned int format_count;
-       bool format_default;
-
-       struct drm_crtc *crtc;
-       struct drm_framebuffer *fb;
-
-       struct drm_framebuffer *old_fb;
-
-       const struct drm_plane_funcs *funcs;
-
-       struct drm_object_properties properties;
-
-       enum drm_plane_type type;
+       /* primary and cursor planes for CRTC */
+       struct drm_plane *primary;
+       struct drm_plane *cursor;
 
        /**
         * @index: Position inside the mode_config.list, can be used as an array
-        * index. It is invariant over the lifetime of the plane.
+        * index. It is invariant over the lifetime of the CRTC.
         */
        unsigned index;
 
-       const struct drm_plane_helper_funcs *helper_private;
-
-       struct drm_plane_state *state;
-
-       struct drm_property *zpos_property;
-};
-
-/**
- * struct drm_bridge_funcs - drm_bridge control functions
- * @attach: Called during drm_bridge_attach
- */
-struct drm_bridge_funcs {
-       int (*attach)(struct drm_bridge *bridge);
-
-       /**
-        * @mode_fixup:
-        *
-        * This callback is used to validate and adjust a mode. The paramater
-        * mode is the display mode that should be fed to the next element in
-        * the display chain, either the final &drm_connector or the next
-        * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
-        * requires. It can be modified by this callback and does not need to
-        * match mode.
-        *
-        * This is the only hook that allows a bridge to reject a modeset. If
-        * this function passes all other callbacks must succeed for this
-        * configuration.
-        *
-        * NOTE:
-        *
-        * This function is called in the check phase of atomic modesets, which
-        * can be aborted for any reason (including on userspace's request to
-        * just check whether a configuration would be possible). Drivers MUST
-        * NOT touch any persistent state (hardware or software) or data
-        * structures except the passed in @state parameter.
-        *
-        * RETURNS:
-        *
-        * True if an acceptable configuration is possible, false if the modeset
-        * operation should be rejected.
-        */
-       bool (*mode_fixup)(struct drm_bridge *bridge,
-                          const struct drm_display_mode *mode,
-                          struct drm_display_mode *adjusted_mode);
-       /**
-        * @disable:
-        *
-        * This callback should disable the bridge. It is called right before
-        * the preceding element in the display pipe is disabled. If the
-        * preceding element is a bridge this means it's called before that
-        * bridge's ->disable() function. If the preceding element is a
-        * &drm_encoder it's called right before the encoder's ->disable(),
-        * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-        *
-        * The bridge can assume that the display pipe (i.e. clocks and timing
-        * signals) feeding it is still running when this callback is called.
-        *
-        * The disable callback is optional.
-        */
-       void (*disable)(struct drm_bridge *bridge);
+       /* position of cursor plane on crtc */
+       int cursor_x;
+       int cursor_y;
 
-       /**
-        * @post_disable:
-        *
-        * This callback should disable the bridge. It is called right after
-        * the preceding element in the display pipe is disabled. If the
-        * preceding element is a bridge this means it's called after that
-        * bridge's ->post_disable() function. If the preceding element is a
-        * &drm_encoder it's called right after the encoder's ->disable(),
-        * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-        *
-        * The bridge must assume that the display pipe (i.e. clocks and timing
-        * singals) feeding it is no longer running when this callback is
-        * called.
-        *
-        * The post_disable callback is optional.
-        */
-       void (*post_disable)(struct drm_bridge *bridge);
+       bool enabled;
 
-       /**
-        * @mode_set:
-        *
-        * This callback should set the given mode on the bridge. It is called
-        * after the ->mode_set() callback for the preceding element in the
-        * display pipeline has been called already. The display pipe (i.e.
-        * clocks and timing signals) is off when this function is called.
-        */
-       void (*mode_set)(struct drm_bridge *bridge,
-                        struct drm_display_mode *mode,
-                        struct drm_display_mode *adjusted_mode);
-       /**
-        * @pre_enable:
-        *
-        * This callback should enable the bridge. It is called right before
-        * the preceding element in the display pipe is enabled. If the
-        * preceding element is a bridge this means it's called before that
-        * bridge's ->pre_enable() function. If the preceding element is a
-        * &drm_encoder it's called right before the encoder's ->enable(),
-        * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-        *
-        * The display pipe (i.e. clocks and timing signals) feeding this bridge
-        * will not yet be running when this callback is called. The bridge must
-        * not enable the display link feeding the next bridge in the chain (if
-        * there is one) when this callback is called.
-        *
-        * The pre_enable callback is optional.
-        */
-       void (*pre_enable)(struct drm_bridge *bridge);
+       /* Requested mode from modesetting. */
+       struct drm_display_mode mode;
 
-       /**
-        * @enable:
-        *
-        * This callback should enable the bridge. It is called right after
-        * the preceding element in the display pipe is enabled. If the
-        * preceding element is a bridge this means it's called after that
-        * bridge's ->enable() function. If the preceding element is a
-        * &drm_encoder it's called right after the encoder's ->enable(),
-        * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-        *
-        * The bridge can assume that the display pipe (i.e. clocks and timing
-        * signals) feeding it is running when this callback is called. This
-        * callback must enable the display link feeding the next bridge in the
-        * chain if there is one.
-        *
-        * The enable callback is optional.
+       /* Programmed mode in hw, after adjustments for encoders,
+        * crtc, panel scaling etc. Needed for timestamping etc.
         */
-       void (*enable)(struct drm_bridge *bridge);
-};
+       struct drm_display_mode hwmode;
 
-/**
- * struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
- */
-struct drm_bridge {
-       struct drm_device *dev;
-       struct drm_encoder *encoder;
-       struct drm_bridge *next;
-#ifdef CONFIG_OF
-       struct device_node *of_node;
-#endif
-       struct list_head list;
-
-       const struct drm_bridge_funcs *funcs;
-       void *driver_private;
-};
+       int x, y;
+       const struct drm_crtc_funcs *funcs;
 
-/**
- * struct drm_crtc_commit - track modeset commits on a CRTC
- *
- * This structure is used to track pending modeset changes and atomic commit on
- * a per-CRTC basis. Since updating the list should never block this structure
- * is reference counted to allow waiters to safely wait on an event to complete,
- * without holding any locks.
- *
- * It has 3 different events in total to allow a fine-grained synchronization
- * between outstanding updates::
- *
- *     atomic commit thread                    hardware
- *
- *     write new state into hardware   ---->   ...
- *     signal hw_done
- *                                             switch to new state on next
- *     ...                                     v/hblank
- *
- *     wait for buffers to show up             ...
- *
- *     ...                                     send completion irq
- *                                             irq handler signals flip_done
- *     cleanup old buffers
- *
- *     signal cleanup_done
- *
- *     wait for flip_done              <----
- *     clean up atomic state
- *
- * The important bit to know is that cleanup_done is the terminal event, but the
- * ordering between flip_done and hw_done is entirely up to the specific driver
- * and modeset state change.
- *
- * For an implementation of how to use this look at
- * drm_atomic_helper_setup_commit() from the atomic helper library.
- */
-struct drm_crtc_commit {
-       /**
-        * @crtc:
-        *
-        * DRM CRTC for this commit.
-        */
-       struct drm_crtc *crtc;
+       /* Legacy FB CRTC gamma size for reporting to userspace */
+       uint32_t gamma_size;
+       uint16_t *gamma_store;
 
-       /**
-        * @ref:
-        *
-        * Reference count for this structure. Needed to allow blocking on
-        * completions without the risk of the completion disappearing
-        * meanwhile.
-        */
-       struct kref ref;
+       /* if you are using the helper */
+       const struct drm_crtc_helper_funcs *helper_private;
 
-       /**
-        * @flip_done:
-        *
-        * Will be signaled when the hardware has flipped to the new set of
-        * buffers. Signals at the same time as when the drm event for this
-        * commit is sent to userspace, or when an out-fence is singalled. Note
-        * that for most hardware, in most cases this happens after @hw_done is
-        * signalled.
-        */
-       struct completion flip_done;
+       struct drm_object_properties properties;
 
        /**
-        * @hw_done:
-        *
-        * Will be signalled when all hw register changes for this commit have
-        * been written out. Especially when disabling a pipe this can be much
-        * later than than @flip_done, since that can signal already when the
-        * screen goes black, whereas to fully shut down a pipe more register
-        * I/O is required.
+        * @state:
         *
-        * Note that this does not need to include separately reference-counted
-        * resources like backing storage buffer pinning, or runtime pm
-        * management.
+        * Current atomic state for this CRTC.
         */
-       struct completion hw_done;
+       struct drm_crtc_state *state;
 
        /**
-        * @cleanup_done:
+        * @commit_list:
         *
-        * Will be signalled after old buffers have been cleaned up by calling
-        * drm_atomic_helper_cleanup_planes(). Since this can only happen after
-        * a vblank wait completed it might be a bit later. This completion is
-        * useful to throttle updates and avoid hardware updates getting ahead
-        * of the buffer cleanup too much.
+        * List of &drm_crtc_commit structures tracking pending commits.
+        * Protected by @commit_lock. This list doesn't hold its own full
+        * reference, but burrows it from the ongoing commit. Commit entries
+        * must be removed from this list once the commit is fully completed,
+        * but before it's correspoding &drm_atomic_state gets destroyed.
         */
-       struct completion cleanup_done;
+       struct list_head commit_list;
 
        /**
-        * @commit_entry:
+        * @commit_lock:
         *
-        * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
+        * Spinlock to protect @commit_list.
         */
-       struct list_head commit_entry;
+       spinlock_t commit_lock;
 
        /**
-        * @event:
+        * @acquire_ctx:
         *
-        * &drm_pending_vblank_event pointer to clean up private events.
+        * Per-CRTC implicit acquire context used by atomic drivers for legacy
+        * IOCTLs, so that atomic drivers can get at the locking acquire
+        * context.
         */
-       struct drm_pending_vblank_event *event;
-};
-
-struct __drm_planes_state {
-       struct drm_plane *ptr;
-       struct drm_plane_state *state;
-};
-
-struct __drm_crtcs_state {
-       struct drm_crtc *ptr;
-       struct drm_crtc_state *state;
-       struct drm_crtc_commit *commit;
-};
-
-struct __drm_connnectors_state {
-       struct drm_connector *ptr;
-       struct drm_connector_state *state;
-};
-
-/**
- * struct drm_atomic_state - the global state object for atomic updates
- * @dev: parent DRM device
- * @allow_modeset: allow full modeset
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
- * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
- * @planes: pointer to array of structures with per-plane data
- * @crtcs: pointer to array of CRTC pointers
- * @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of structures with per-connector data
- * @acquire_ctx: acquire context for this atomic modeset state update
- */
-struct drm_atomic_state {
-       struct drm_device *dev;
-       bool allow_modeset : 1;
-       bool legacy_cursor_update : 1;
-       bool legacy_set_config : 1;
-       struct __drm_planes_state *planes;
-       struct __drm_crtcs_state *crtcs;
-       int num_connector;
-       struct __drm_connnectors_state *connectors;
-
        struct drm_modeset_acquire_ctx *acquire_ctx;
-
-       /**
-        * @commit_work:
-        *
-        * Work item which can be used by the driver or helpers to execute the
-        * commit without blocking.
-        */
-       struct work_struct commit_work;
 };
 
-
 /**
  * struct drm_mode_set - new values for a CRTC config change
  * @fb: framebuffer to use for new config
@@ -2635,12 +1233,6 @@ struct drm_mode_config {
         * HDMI infoframe aspect ratio setting.
         */
        struct drm_property *aspect_ratio_property;
-       /**
-        * @dirty_info_property: Optional connector property to give userspace a
-        * hint that the DIRTY_FB ioctl should be used.
-        */
-       struct drm_property *dirty_info_property;
-
        /**
         * @degamma_lut_property: Optional CRTC property to set the LUT used to
         * convert the framebuffer's colors to linear gamma.
@@ -2702,43 +1294,7 @@ struct drm_mode_config {
        struct drm_mode_config_helper_funcs *helper_private;
 };
 
-/**
- * drm_for_each_plane_mask - iterate over planes specified by bitmask
- * @plane: the loop cursor
- * @dev: the DRM device
- * @plane_mask: bitmask of plane indices
- *
- * Iterate over all planes specified by bitmask.
- */
-#define drm_for_each_plane_mask(plane, dev, plane_mask) \
-       list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
-               for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
-
-/**
- * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
- * @encoder: the loop cursor
- * @dev: the DRM device
- * @encoder_mask: bitmask of encoder indices
- *
- * Iterate over all encoders specified by bitmask.
- */
-#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
-       list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
-               for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
-
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
-#define obj_to_connector(x) container_of(x, struct drm_connector, base)
-#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
-#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
-#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
-#define obj_to_property(x) container_of(x, struct drm_property, base)
-#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
-#define obj_to_plane(x) container_of(x, struct drm_plane, base)
-
-struct drm_prop_enum_list {
-       int type;
-       char *name;
-};
 
 extern __printf(6, 7)
 int drm_crtc_init_with_planes(struct drm_device *dev,
@@ -2773,184 +1329,17 @@ static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
        return 1 << drm_crtc_index(crtc);
 }
 
-int drm_connector_init(struct drm_device *dev,
-                      struct drm_connector *connector,
-                      const struct drm_connector_funcs *funcs,
-                      int connector_type);
-int drm_connector_register(struct drm_connector *connector);
-void drm_connector_unregister(struct drm_connector *connector);
-
-extern void drm_connector_cleanup(struct drm_connector *connector);
-static inline unsigned drm_connector_index(struct drm_connector *connector)
-{
-       return connector->index;
-}
-
-extern __printf(5, 6)
-int drm_encoder_init(struct drm_device *dev,
-                    struct drm_encoder *encoder,
-                    const struct drm_encoder_funcs *funcs,
-                    int encoder_type, const char *name, ...);
-
-/**
- * drm_encoder_index - find the index of a registered encoder
- * @encoder: encoder to find index for
- *
- * Given a registered encoder, return the index of that encoder within a DRM
- * device's list of encoders.
- */
-static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
-{
-       return encoder->index;
-}
-
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
-                                      struct drm_crtc *crtc)
-{
-       return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
-}
-
-extern __printf(8, 9)
-int drm_universal_plane_init(struct drm_device *dev,
-                            struct drm_plane *plane,
-                            unsigned long possible_crtcs,
-                            const struct drm_plane_funcs *funcs,
-                            const uint32_t *formats,
-                            unsigned int format_count,
-                            enum drm_plane_type type,
-                            const char *name, ...);
-extern int drm_plane_init(struct drm_device *dev,
-                         struct drm_plane *plane,
-                         unsigned long possible_crtcs,
-                         const struct drm_plane_funcs *funcs,
-                         const uint32_t *formats, unsigned int format_count,
-                         bool is_primary);
-extern void drm_plane_cleanup(struct drm_plane *plane);
-
-/**
- * drm_plane_index - find the index of a registered plane
- * @plane: plane to find index for
- *
- * Given a registered plane, return the index of that plane within a DRM
- * device's list of planes.
- */
-static inline unsigned int drm_plane_index(struct drm_plane *plane)
-{
-       return plane->index;
-}
-extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
-extern void drm_plane_force_disable(struct drm_plane *plane);
 extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
                                   int *hdisplay, int *vdisplay);
 extern int drm_crtc_force_disable(struct drm_crtc *crtc);
 extern int drm_crtc_force_disable_all(struct drm_device *dev);
 
-extern void drm_encoder_cleanup(struct drm_encoder *encoder);
-
-extern const char *drm_get_connector_status_name(enum drm_connector_status status);
-extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
-extern const char *drm_get_dpms_name(int val);
-extern const char *drm_get_dvi_i_subconnector_name(int val);
-extern const char *drm_get_dvi_i_select_name(int val);
-extern const char *drm_get_tv_subconnector_name(int val);
-extern const char *drm_get_tv_select_name(int val);
 extern void drm_mode_config_init(struct drm_device *dev);
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 
-extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
-                                               const char *path);
-int drm_mode_connector_set_tile_property(struct drm_connector *connector);
-extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-                                                  const struct edid *edid);
-
-extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
-                                           const u32 *formats,
-                                           unsigned int num_formats);
-
-static inline bool drm_property_type_is(struct drm_property *property,
-               uint32_t type)
-{
-       /* instanceof for props.. handles extended type vs original types: */
-       if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
-               return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
-       return property->flags & type;
-}
-
-extern int drm_object_property_set_value(struct drm_mode_object *obj,
-                                        struct drm_property *property,
-                                        uint64_t val);
-extern int drm_object_property_get_value(struct drm_mode_object *obj,
-                                        struct drm_property *property,
-                                        uint64_t *value);
-extern int drm_framebuffer_init(struct drm_device *dev,
-                               struct drm_framebuffer *fb,
-                               const struct drm_framebuffer_funcs *funcs);
-extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
-                                                     uint32_t id);
-extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
-extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
-extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
-
-extern void drm_object_attach_property(struct drm_mode_object *obj,
-                                      struct drm_property *property,
-                                      uint64_t init_val);
-extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
-                                               const char *name, int num_values);
-extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
-                                        const char *name,
-                                        const struct drm_prop_enum_list *props,
-                                        int num_values);
-struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
-                                        int flags, const char *name,
-                                        const struct drm_prop_enum_list *props,
-                                        int num_props,
-                                        uint64_t supported_bits);
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
-                                        const char *name,
-                                        uint64_t min, uint64_t max);
-struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
-                                        int flags, const char *name,
-                                        int64_t min, int64_t max);
-struct drm_property *drm_property_create_object(struct drm_device *dev,
-                                        int flags, const char *name, uint32_t type);
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
-                                        const char *name);
-struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
-                                                   size_t length,
-                                                   const void *data);
-struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
-                                                   uint32_t id);
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
-void drm_property_unreference_blob(struct drm_property_blob *blob);
-extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
-extern int drm_property_add_enum(struct drm_property *property, int index,
-                                uint64_t value, const char *name);
-extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
-extern int drm_mode_create_tv_properties(struct drm_device *dev,
-                                        unsigned int num_modes,
-                                        const char * const modes[]);
-extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
-extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
-extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-
-extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
-                                            struct drm_encoder *encoder);
-extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
-                                        int gamma_size);
-
 extern int drm_mode_set_config_internal(struct drm_mode_set *set);
 
-extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-
 extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
                                                         char topology[8]);
 extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
@@ -2958,40 +1347,7 @@ extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
 extern void drm_mode_put_tile_group(struct drm_device *dev,
                                   struct drm_tile_group *tg);
 
-extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
-                                      struct drm_property *property,
-                                      uint64_t value);
-
-extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-                                                             unsigned int supported_rotations);
-extern unsigned int drm_rotation_simplify(unsigned int rotation,
-                                         unsigned int supported_rotations);
-extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
-                                      uint degamma_lut_size,
-                                      bool has_ctm,
-                                      uint gamma_lut_size);
-
-int drm_plane_create_zpos_property(struct drm_plane *plane,
-                                  unsigned int zpos,
-                                  unsigned int min, unsigned int max);
-
-int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
-                                            unsigned int zpos);
-
 /* Helpers */
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
-                                            uint32_t id, uint32_t type);
-void drm_mode_object_reference(struct drm_mode_object *obj);
-void drm_mode_object_unreference(struct drm_mode_object *obj);
-
-static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
-               uint32_t id)
-{
-       struct drm_mode_object *mo;
-       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
-       return mo ? obj_to_plane(mo) : NULL;
-}
-
 static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
        uint32_t id)
 {
@@ -3000,120 +1356,6 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
        return mo ? obj_to_crtc(mo) : NULL;
 }
 
-static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
-       uint32_t id)
-{
-       struct drm_mode_object *mo;
-       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
-       return mo ? obj_to_encoder(mo) : NULL;
-}
-
-/**
- * drm_connector_lookup - lookup connector object
- * @dev: DRM device
- * @id: connector object id
- *
- * This function looks up the connector object specified by id
- * add takes a reference to it.
- */
-static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
-               uint32_t id)
-{
-       struct drm_mode_object *mo;
-       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
-       return mo ? obj_to_connector(mo) : NULL;
-}
-
-static inline struct drm_property *drm_property_find(struct drm_device *dev,
-               uint32_t id)
-{
-       struct drm_mode_object *mo;
-       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
-       return mo ? obj_to_property(mo) : NULL;
-}
-
-/*
- * Extract a degamma/gamma LUT value provided by user and round it to the
- * precision supported by the hardware.
- */
-static inline uint32_t drm_color_lut_extract(uint32_t user_input,
-                                            uint32_t bit_precision)
-{
-       uint32_t val = user_input;
-       uint32_t max = 0xffff >> (16 - bit_precision);
-
-       /* Round only if we're not using full precision. */
-       if (bit_precision < 16) {
-               val += 1UL << (16 - bit_precision - 1);
-               val >>= 16 - bit_precision;
-       }
-
-       return clamp_val(val, 0, max);
-}
-
-/**
- * drm_framebuffer_reference - incr the fb refcnt
- * @fb: framebuffer
- *
- * This functions increments the fb's refcount.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
-       drm_mode_object_reference(&fb->base);
-}
-
-/**
- * drm_framebuffer_unreference - unref a framebuffer
- * @fb: framebuffer to unref
- *
- * This functions decrements the fb's refcount and frees it if it drops to zero.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
-       drm_mode_object_unreference(&fb->base);
-}
-
-/**
- * drm_framebuffer_read_refcount - read the framebuffer reference count.
- * @fb: framebuffer
- *
- * This functions returns the framebuffer's reference count.
- */
-static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
-{
-       return atomic_read(&fb->base.refcount.refcount);
-}
-
-/**
- * drm_connector_reference - incr the connector refcnt
- * @connector: connector
- *
- * This function increments the connector's refcount.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
-       drm_mode_object_reference(&connector->base);
-}
-
-/**
- * drm_connector_unreference - unref a connector
- * @connector: connector to unref
- *
- * This function decrements the connector's refcount and frees it if it drops to zero.
- */
-static inline void drm_connector_unreference(struct drm_connector *connector)
-{
-       drm_mode_object_unreference(&connector->base);
-}
-
-/* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, dev) \
-       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
-               for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-
-#define drm_for_each_plane(plane, dev) \
-       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
-
 #define drm_for_each_crtc(crtc, dev) \
        list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
 
@@ -3131,67 +1373,4 @@ assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
                !drm_modeset_is_locked(&mode_config->connection_mutex));
 }
 
-#define drm_for_each_connector(connector, dev) \
-       for (assert_drm_connector_list_read_locked(&(dev)->mode_config),        \
-            connector = list_first_entry(&(dev)->mode_config.connector_list,   \
-                                         struct drm_connector, head);          \
-            &connector->head != (&(dev)->mode_config.connector_list);          \
-            connector = list_next_entry(connector, head))
-
-#define drm_for_each_encoder(encoder, dev) \
-       list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
-
-#define drm_for_each_fb(fb, dev) \
-       for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),            \
-            fb = list_first_entry(&(dev)->mode_config.fb_list, \
-                                         struct drm_framebuffer, head);        \
-            &fb->head != (&(dev)->mode_config.fb_list);                        \
-            fb = list_next_entry(fb, head))
-
-/* drm_edid.c */
-bool drm_probe_ddc(struct i2c_adapter *adapter);
-struct edid *drm_get_edid(struct drm_connector *connector,
-                         struct i2c_adapter *adapter);
-struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
-                                    struct i2c_adapter *adapter);
-struct edid *drm_edid_duplicate(const struct edid *edid);
-int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
-
-u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
-bool drm_detect_hdmi_monitor(struct edid *edid);
-bool drm_detect_monitor_audio(struct edid *edid);
-bool drm_rgb_quant_range_selectable(struct edid *edid);
-int drm_add_modes_noedid(struct drm_connector *connector,
-                        int hdisplay, int vdisplay);
-void drm_set_preferred_mode(struct drm_connector *connector,
-                           int hpref, int vpref);
-
-int drm_edid_header_is_valid(const u8 *raw_edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
-                         bool *edid_corrupt);
-bool drm_edid_is_valid(struct edid *edid);
-void drm_edid_get_monitor_name(struct edid *edid, char *name,
-                              int buflen);
-struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
-                                          int hsize, int vsize, int fresh,
-                                          bool rb);
-
-/* drm_bridge.c */
-extern int drm_bridge_add(struct drm_bridge *bridge);
-extern void drm_bridge_remove(struct drm_bridge *bridge);
-extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
-
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
-                       const struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
-
 #endif /* __DRM_CRTC_H__ */
index 4b37afa2b73b4a106d7beef61f7b5be0198fe983..982c299e435a09703de12d32b4deec08a54d44e0 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
 extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
@@ -53,11 +54,6 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
 extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
-extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
-
-extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-                                          const struct drm_mode_fb_cmd2 *mode_cmd);
-
 extern void drm_helper_resume_force_mode(struct drm_device *dev);
 
 int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
diff --git a/include/drm/drm_dp_aux_dev.h b/include/drm/drm_dp_aux_dev.h
deleted file mode 100644 (file)
index 1b76d99..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Rafael Antognolli <rafael.antognolli@intel.com>
- *
- */
-
-#ifndef DRM_DP_AUX_DEV
-#define DRM_DP_AUX_DEV
-
-#include <drm/drm_dp_helper.h>
-
-#ifdef CONFIG_DRM_DP_AUX_CHARDEV
-
-int drm_dp_aux_dev_init(void);
-void drm_dp_aux_dev_exit(void);
-int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
-void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
-
-#else
-
-static inline int drm_dp_aux_dev_init(void)
-{
-       return 0;
-}
-
-static inline void drm_dp_aux_dev_exit(void)
-{
-}
-
-static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
-{
-       return 0;
-}
-
-static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
-{
-}
-
-#endif
-
-#endif
index 63b8bd50244479a4cebe0d9ddf766fca375a7bcf..2a79882cb68e8551d3719a599b482787a018fdbb 100644 (file)
 # define DP_DS_PORT_TYPE_DVI               2
 # define DP_DS_PORT_TYPE_HDMI              3
 # define DP_DS_PORT_TYPE_NON_EDID          4
+# define DP_DS_PORT_TYPE_DP_DUALMODE        5
+# define DP_DS_PORT_TYPE_WIRELESS           6
 # define DP_DS_PORT_HPD                            (1 << 3)
 /* offset 1 for VGA is maximum megapixels per second / 8 */
 /* offset 2 */
-# define DP_DS_VGA_MAX_BPC_MASK                    (3 << 0)
-# define DP_DS_VGA_8BPC                            0
-# define DP_DS_VGA_10BPC                   1
-# define DP_DS_VGA_12BPC                   2
-# define DP_DS_VGA_16BPC                   3
+# define DP_DS_MAX_BPC_MASK                (3 << 0)
+# define DP_DS_8BPC                        0
+# define DP_DS_10BPC                       1
+# define DP_DS_12BPC                       2
+# define DP_DS_16BPC                       3
 
 /* link configuration */
 #define        DP_LINK_BW_SET                      0x100
 #define DP_SOURCE_OUI                      0x300
 #define DP_SINK_OUI                        0x400
 #define DP_BRANCH_OUI                      0x500
+#define DP_BRANCH_ID                        0x503
+#define DP_BRANCH_HW_REV                    0x509
+#define DP_BRANCH_SW_REV                    0x50A
 
 #define DP_SET_POWER                        0x600
 # define DP_SET_POWER_D0                    0x1
@@ -813,6 +818,13 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                               const u8 port_cap[4]);
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                             const u8 port_cap[4]);
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
+void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                            const u8 port_cap[4], struct drm_dp_aux *aux);
 
 void drm_dp_aux_init(struct drm_dp_aux *aux);
 int drm_dp_aux_register(struct drm_dp_aux *aux);
index 919933d1beb42241e233e9332a99911eeaa4c9cd..c3a7d440bc11f623735f43514f6d28d6fb21bafe 100644 (file)
@@ -25,6 +25,9 @@
 
 #include <linux/types.h>
 
+struct drm_device;
+struct i2c_adapter;
+
 #define EDID_LENGTH 128
 #define DDC_ADDR 0x50
 #define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
@@ -423,9 +426,36 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
        return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
 }
 
+bool drm_probe_ddc(struct i2c_adapter *adapter);
 struct edid *drm_do_get_edid(struct drm_connector *connector,
        int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
                              size_t len),
        void *data);
+struct edid *drm_get_edid(struct drm_connector *connector,
+                         struct i2c_adapter *adapter);
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+                                    struct i2c_adapter *adapter);
+struct edid *drm_edid_duplicate(const struct edid *edid);
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
+bool drm_detect_hdmi_monitor(struct edid *edid);
+bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_rgb_quant_range_selectable(struct edid *edid);
+int drm_add_modes_noedid(struct drm_connector *connector,
+                        int hdisplay, int vdisplay);
+void drm_set_preferred_mode(struct drm_connector *connector,
+                           int hpref, int vpref);
+
+int drm_edid_header_is_valid(const u8 *raw_edid);
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+                         bool *edid_corrupt);
+bool drm_edid_is_valid(struct edid *edid);
+void drm_edid_get_monitor_name(struct edid *edid, char *name,
+                              int buflen);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+                                          int hsize, int vsize, int fresh,
+                                          bool rb);
 
 #endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
new file mode 100644 (file)
index 0000000..387e33a
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_ENCODER_H__
+#define __DRM_ENCODER_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+/**
+ * struct drm_encoder_funcs - encoder controls
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
+struct drm_encoder_funcs {
+       /**
+        * @reset:
+        *
+        * Reset encoder hardware and software state to off. This function isn't
+        * called by the core directly, only through drm_mode_config_reset().
+        * It's not a helper hook only for historical reasons.
+        */
+       void (*reset)(struct drm_encoder *encoder);
+
+       /**
+        * @destroy:
+        *
+        * Clean up encoder resources. This is only called at driver unload time
+        * through drm_mode_config_cleanup() since an encoder cannot be
+        * hotplugged in DRM.
+        */
+       void (*destroy)(struct drm_encoder *encoder);
+
+       /**
+        * @late_register:
+        *
+        * This optional hook can be used to register additional userspace
+        * interfaces attached to the encoder like debugfs interfaces.
+        * It is called late in the driver load sequence from drm_dev_register().
+        * Everything added from this callback should be unregistered in
+        * the early_unregister callback.
+        *
+        * Returns:
+        *
+        * 0 on success, or a negative error code on failure.
+        */
+       int (*late_register)(struct drm_encoder *encoder);
+
+       /**
+        * @early_unregister:
+        *
+        * This optional hook should be used to unregister the additional
+        * userspace interfaces attached to the encoder from
+        * late_unregister(). It is called from drm_dev_unregister(),
+        * early in the driver unload sequence to disable userspace access
+        * before data structures are torndown.
+        */
+       void (*early_unregister)(struct drm_encoder *encoder);
+};
+
+/**
+ * struct drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @name: human readable name, can be overwritten by the driver
+ * @crtc: currently bound CRTC
+ * @bridge: bridge associated to the encoder
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
+ */
+struct drm_encoder {
+       struct drm_device *dev;
+       struct list_head head;
+
+       struct drm_mode_object base;
+       char *name;
+       /**
+        * @encoder_type:
+        *
+        * One of the DRM_MODE_ENCODER_<foo> types in drm_mode.h. The following
+        * encoder types are defined thus far:
+        *
+        * - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A.
+        *
+        * - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort.
+        *
+        * - DRM_MODE_ENCODER_LVDS for display panels, or in general any panel
+        *   with a proprietary parallel connector.
+        *
+        * - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
+        *   Component, SCART).
+        *
+        * - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+        *
+        * - DRM_MODE_ENCODER_DSI for panels connected using the DSI serial bus.
+        *
+        * - DRM_MODE_ENCODER_DPI for panels connected using the DPI parallel
+        *   bus.
+        *
+        * - DRM_MODE_ENCODER_DPMST for special fake encoders used to allow
+        *   mutliple DP MST streams to share one physical encoder.
+        */
+       int encoder_type;
+
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the encoder.
+        */
+       unsigned index;
+
+       /**
+        * @possible_crtcs: Bitmask of potential CRTC bindings, using
+        * drm_crtc_index() as the index into the bitfield. The driver must set
+        * the bits for all &drm_crtc objects this encoder can be connected to
+        * before calling drm_encoder_init().
+        *
+        * In reality almost every driver gets this wrong.
+        *
+        * Note that since CRTC objects can't be hotplugged the assigned indices
+        * are stable and hence known before registering all objects.
+        */
+       uint32_t possible_crtcs;
+
+       /**
+        * @possible_clones: Bitmask of potential sibling encoders for cloning,
+        * using drm_encoder_index() as the index into the bitfield. The driver
+        * must set the bits for all &drm_encoder objects which can clone a
+        * &drm_crtc together with this encoder before calling
+        * drm_encoder_init(). Drivers should set the bit representing the
+        * encoder itself, too. Cloning bits should be set such that when two
+        * encoders can be used in a cloned configuration, they both should have
+        * each another bits set.
+        *
+        * In reality almost every driver gets this wrong.
+        *
+        * Note that since encoder objects can't be hotplugged the assigned indices
+        * are stable and hence known before registering all objects.
+        */
+       uint32_t possible_clones;
+
+       struct drm_crtc *crtc;
+       struct drm_bridge *bridge;
+       const struct drm_encoder_funcs *funcs;
+       const struct drm_encoder_helper_funcs *helper_private;
+};
+
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+
+__printf(5, 6)
+int drm_encoder_init(struct drm_device *dev,
+                    struct drm_encoder *encoder,
+                    const struct drm_encoder_funcs *funcs,
+                    int encoder_type, const char *name, ...);
+
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+       return encoder->index;
+}
+
+/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
+static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Returns false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+                                      struct drm_crtc *crtc)
+{
+       return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
+}
+
+/**
+ * drm_encoder_find - find a &drm_encoder
+ * @dev: DRM device
+ * @id: encoder id
+ *
+ * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around
+ * drm_mode_object_find().
+ */
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+                                                  uint32_t id)
+{
+       struct drm_mode_object *mo;
+
+       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+
+       return mo ? obj_to_encoder(mo) : NULL;
+}
+
+void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+/**
+ * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ * @encoder_mask: bitmask of encoder indices
+ *
+ * Iterate over all encoders specified by bitmask.
+ */
+#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
+       list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
+               for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
+
+/**
+ * drm_for_each_encoder - iterate over all encoders
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all encoders of @dev.
+ */
+#define drm_for_each_encoder(encoder, dev) \
+       list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#endif
index db8d4780eaa27607baf7857111c72b50bbae5fd4..e19458dd1a43d53b0aad1f4b0db10890d2792db7 100644 (file)
@@ -32,6 +32,7 @@
 
 struct drm_fb_helper;
 
+#include <drm/drm_crtc.h>
 #include <linux/kgdb.h>
 
 enum mode_set_atomic {
@@ -176,6 +177,7 @@ struct drm_fb_helper_connector {
  *              the screen buffer
  * @dirty_lock: spinlock protecting @dirty_clip
  * @dirty_work: worker used to flush the framebuffer
+ * @resume_work: worker used during resume if the console lock is already taken
  *
  * This is the main structure used by the fbdev helpers. Drivers supporting
  * fbdev emulation should embedded this into their overall driver structure.
@@ -196,6 +198,7 @@ struct drm_fb_helper {
        struct drm_clip_rect dirty_clip;
        spinlock_t dirty_lock;
        struct work_struct dirty_work;
+       struct work_struct resume_work;
 
        /**
         * @kernel_fb_list:
@@ -215,7 +218,6 @@ struct drm_fb_helper {
 };
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
                           const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
@@ -263,7 +265,9 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info,
 void drm_fb_helper_cfb_imageblit(struct fb_info *info,
                                 const struct fb_image *image);
 
-void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend);
+void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
+                                       bool suspend);
 
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
@@ -283,11 +287,6 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
                                       struct drm_connector *connector);
 #else
-static inline int drm_fb_helper_modinit(void)
-{
-       return 0;
-}
-
 static inline void drm_fb_helper_prepare(struct drm_device *dev,
                                        struct drm_fb_helper *helper,
                                        const struct drm_fb_helper_funcs *funcs)
@@ -417,7 +416,12 @@ static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
 }
 
 static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
-                                            int state)
+                                            bool suspend)
+{
+}
+
+static inline void
+drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend)
 {
 }
 
@@ -475,5 +479,18 @@ drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 {
        return 0;
 }
+
 #endif
+
+static inline int
+drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
+                                             const char *name, bool primary)
+{
+#if IS_REACHABLE(CONFIG_FB)
+       return remove_conflicting_framebuffers(a, name, primary);
+#else
+       return 0;
+#endif
+}
+
 #endif
index 7f90a396cf2b468e2d2df6838ae4d92d245bbfe0..30c30fa87ee8892177ecd9179b035dda8fc6ae2d 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/types.h>
 #include <uapi/drm/drm_fourcc.h>
 
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
 void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
 int drm_format_num_planes(uint32_t format);
 int drm_format_plane_cpp(uint32_t format, int plane);
@@ -32,6 +33,6 @@ int drm_format_horz_chroma_subsampling(uint32_t format);
 int drm_format_vert_chroma_subsampling(uint32_t format);
 int drm_format_plane_width(int width, uint32_t format, int plane);
 int drm_format_plane_height(int height, uint32_t format, int plane);
-const char *drm_get_format_name(uint32_t format);
+char *drm_get_format_name(uint32_t format) __malloc;
 
 #endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
new file mode 100644 (file)
index 0000000..f5ae1f4
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_FRAMEBUFFER_H__
+#define __DRM_FRAMEBUFFER_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_framebuffer;
+struct drm_file;
+struct drm_device;
+
+/**
+ * struct drm_framebuffer_funcs - framebuffer hooks
+ */
+struct drm_framebuffer_funcs {
+       /**
+        * @destroy:
+        *
+        * Clean up framebuffer resources, specifically also unreference the
+        * backing storage. The core guarantees to call this function for every
+        * framebuffer successfully created by ->fb_create() in
+        * &drm_mode_config_funcs. Drivers must also call
+        * drm_framebuffer_cleanup() to release DRM core resources for this
+        * framebuffer.
+        */
+       void (*destroy)(struct drm_framebuffer *framebuffer);
+
+       /**
+        * @create_handle:
+        *
+        * Create a buffer handle in the driver-specific buffer manager (either
+        * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
+        * the core to implement the GETFB IOCTL, which returns (for
+        * sufficiently priviledged user) also a native buffer handle. This can
+        * be used for seamless transitions between modesetting clients by
+        * copying the current screen contents to a private buffer and blending
+        * between that and the new contents.
+        *
+        * GEM based drivers should call drm_gem_handle_create() to create the
+        * handle.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*create_handle)(struct drm_framebuffer *fb,
+                            struct drm_file *file_priv,
+                            unsigned int *handle);
+       /**
+        * @dirty:
+        *
+        * Optional callback for the dirty fb IOCTL.
+        *
+        * Userspace can notify the driver via this callback that an area of the
+        * framebuffer has changed and should be flushed to the display
+        * hardware. This can also be used internally, e.g. by the fbdev
+        * emulation, though that's not the case currently.
+        *
+        * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
+        * for more information as all the semantics and arguments have a one to
+        * one mapping on this function.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*dirty)(struct drm_framebuffer *framebuffer,
+                    struct drm_file *file_priv, unsigned flags,
+                    unsigned color, struct drm_clip_rect *clips,
+                    unsigned num_clips);
+};
+
+/**
+ * struct drm_framebuffer - frame buffer object
+ *
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank.  So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred.  In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective. See drm_framebuffer_reference() and
+ * drm_framebuffer_unreference().
+ *
+ * The refcount is stored inside the mode object @base.
+ */
+struct drm_framebuffer {
+       /**
+        * @dev: DRM device this framebuffer belongs to
+        */
+       struct drm_device *dev;
+       /**
+        * @head: Place on the dev->mode_config.fb_list, access protected by
+        * dev->mode_config.fb_lock.
+        */
+       struct list_head head;
+
+       /**
+        * @base: base modeset object structure, contains the reference count.
+        */
+       struct drm_mode_object base;
+       /**
+        * @funcs: framebuffer vfunc table
+        */
+       const struct drm_framebuffer_funcs *funcs;
+       /**
+        * @pitches: Line stride per buffer. For userspace created object this
+        * is copied from drm_mode_fb_cmd2.
+        */
+       unsigned int pitches[4];
+       /**
+        * @offsets: Offset from buffer start to the actual pixel data in bytes,
+        * per buffer. For userspace created object this is copied from
+        * drm_mode_fb_cmd2.
+        *
+        * Note that this is a linear offset and does not take into account
+        * tiling or buffer laytou per @modifier. It meant to be used when the
+        * actual pixel data for this framebuffer plane starts at an offset,
+        * e.g.  when multiple planes are allocated within the same backing
+        * storage buffer object. For tiled layouts this generally means it
+        * @offsets must at least be tile-size aligned, but hardware often has
+        * stricter requirements.
+        *
+        * This should not be used to specifiy x/y pixel offsets into the buffer
+        * data (even for linear buffers). Specifying an x/y pixel offset is
+        * instead done through the source rectangle in struct &drm_plane_state.
+        */
+       unsigned int offsets[4];
+       /**
+        * @modifier: Data layout modifier, per buffer. This is used to describe
+        * tiling, or also special layouts (like compression) of auxiliary
+        * buffers. For userspace created object this is copied from
+        * drm_mode_fb_cmd2.
+        */
+       uint64_t modifier[4];
+       /**
+        * @width: Logical width of the visible area of the framebuffer, in
+        * pixels.
+        */
+       unsigned int width;
+       /**
+        * @height: Logical height of the visible area of the framebuffer, in
+        * pixels.
+        */
+       unsigned int height;
+       /**
+        * @depth: Depth in bits per pixel for RGB formats. 0 for everything
+        * else. Legacy information derived from @pixel_format, it's suggested to use
+        * the DRM FOURCC codes and helper functions directly instead.
+        */
+       unsigned int depth;
+       /**
+        * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for
+        * everything else. Legacy information derived from @pixel_format, it's
+        * suggested to use the DRM FOURCC codes and helper functions directly
+        * instead.
+        */
+       int bits_per_pixel;
+       /**
+        * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or
+        * DRM_MODE_FB_MODIFIERS.
+        */
+       int flags;
+       /**
+        * @pixel_format: DRM FOURCC code describing the pixel format.
+        */
+       uint32_t pixel_format; /* fourcc format */
+       /**
+        * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
+        * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
+        * universal plane.
+        */
+       int hot_x;
+       /**
+        * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
+        * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
+        * universal plane.
+        */
+       int hot_y;
+       /**
+        * @filp_head: Placed on struct &drm_file fbs list_head, protected by
+        * fbs_lock in the same structure.
+        */
+       struct list_head filp_head;
+};
+
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+
+int drm_framebuffer_init(struct drm_device *dev,
+                        struct drm_framebuffer *fb,
+                        const struct drm_framebuffer_funcs *funcs);
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+                                              uint32_t id);
+void drm_framebuffer_remove(struct drm_framebuffer *fb);
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
+ */
+static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+       drm_mode_object_reference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
+ *
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
+ */
+static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+       drm_mode_object_unreference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_read_refcount - read the framebuffer reference count.
+ * @fb: framebuffer
+ *
+ * This functions returns the framebuffer's reference count.
+ */
+static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
+{
+       return atomic_read(&fb->base.refcount.refcount);
+}
+
+/**
+ * drm_for_each_fb - iterate over all framebuffers
+ * @fb: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all framebuffers of @dev. User must hold the fb_lock from
+ * &drm_mode_config.
+ */
+#define drm_for_each_fb(fb, dev) \
+       for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),            \
+            fb = list_first_entry(&(dev)->mode_config.fb_list, \
+                                         struct drm_framebuffer, head);        \
+            &fb->head != (&(dev)->mode_config.fb_list);                        \
+            fb = list_next_entry(fb, head))
+#endif
index fca1cd1b9c26805c9459166f49060bdf988c0e68..9f63736e616348cda860aa63e20331d5a90787f8 100644 (file)
@@ -210,8 +210,8 @@ drm_gem_object_reference(struct drm_gem_object *obj)
  * drm_gem_object_unreference_unlocked().
  *
  * Drivers should never call this directly in their code. Instead they should
- * wrap it up into a driver_gem_object_unreference(struct driver_gem_object
- * *obj) wrapper function, and use that. Shared code should never call this, to
+ * wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object
+ * *obj)`` wrapper function, and use that. Shared code should never call this, to
  * avoid breaking drivers by accident which still depend upon dev->struct_mutex
  * locking.
  */
index 47ac92584d766d8dddb2b0526ea255911d7361c3..4fef19064b0f12cad6b70614112493fd54e51729 100644 (file)
@@ -265,11 +265,15 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
                                    u16 end);
 int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
                                  u16 end);
-int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
 int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
 int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
                             enum mipi_dsi_dcs_tear_mode mode);
 int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
+int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
+                                       u16 brightness);
+int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+                                       u16 *brightness);
 
 /**
  * struct mipi_dsi_driver - DSI driver
index fc65118e5077811c69f7a5bb6af6b33419a3bb0a..205ddcf6d55de7c574f3442785433265d4c492e0 100644 (file)
@@ -37,6 +37,7 @@
  * Generic range manager structs
  */
 #include <linux/bug.h>
+#include <linux/rbtree.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
@@ -61,6 +62,7 @@ enum drm_mm_allocator_flags {
 struct drm_mm_node {
        struct list_head node_list;
        struct list_head hole_stack;
+       struct rb_node rb;
        unsigned hole_follows : 1;
        unsigned scanned_block : 1;
        unsigned scanned_prev_free : 1;
@@ -70,6 +72,7 @@ struct drm_mm_node {
        unsigned long color;
        u64 start;
        u64 size;
+       u64 __subtree_last;
        struct drm_mm *mm;
 };
 
@@ -79,6 +82,9 @@ struct drm_mm {
        /* head_node.node_list is the list of all memory nodes, ordered
         * according to the (increasing) start address of the memory node. */
        struct drm_mm_node head_node;
+       /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
+       struct rb_root interval_tree;
+
        unsigned int scan_check_range : 1;
        unsigned scan_alignment;
        unsigned long scan_color;
@@ -295,6 +301,12 @@ void drm_mm_init(struct drm_mm *mm,
 void drm_mm_takedown(struct drm_mm *mm);
 bool drm_mm_clean(struct drm_mm *mm);
 
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
+
 void drm_mm_init_scan(struct drm_mm *mm,
                      u64 size,
                      unsigned alignment,
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
new file mode 100644 (file)
index 0000000..43460b2
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_MODESET_H__
+#define __DRM_MODESET_H__
+
+#include <linux/kref.h>
+struct drm_object_properties;
+struct drm_property;
+struct drm_device;
+
+/**
+ * struct drm_mode_object - base structure for modeset objects
+ * @id: userspace visible identifier
+ * @type: type of the object, one of DRM_MODE_OBJECT\_\*
+ * @properties: properties attached to this object, including values
+ * @refcount: reference count for objects which with dynamic lifetime
+ * @free_cb: free function callback, only set for objects with dynamic lifetime
+ *
+ * Base structure for modeset objects visible to userspace. Objects can be
+ * looked up using drm_mode_object_find(). Besides basic uapi interface
+ * properties like @id and @type it provides two services:
+ *
+ * - It tracks attached properties and their values. This is used by &drm_crtc,
+ *   &drm_plane and &drm_connector. Properties are attached by calling
+ *   drm_object_attach_property() before the object is visible to userspace.
+ *
+ * - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it
+ *   provides reference counting through drm_mode_object_reference() and
+ *   drm_mode_object_unreference(). This is used by &drm_framebuffer,
+ *   &drm_connector and &drm_property_blob. These objects provide specialized
+ *   reference counting wrappers.
+ */
+struct drm_mode_object {
+       uint32_t id;
+       uint32_t type;
+       struct drm_object_properties *properties;
+       struct kref refcount;
+       void (*free_cb)(struct kref *kref);
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 24
+/**
+ * struct drm_object_properties - property tracking for &drm_mode_object
+ */
+struct drm_object_properties {
+       /**
+        * @count: number of valid properties, must be less than or equal to
+        * DRM_OBJECT_MAX_PROPERTY.
+        */
+
+       int count;
+       /**
+        * @properties: Array of pointers to &drm_property.
+        *
+        * NOTE: if we ever start dynamically destroying properties (ie.
+        * not at drm_mode_config_cleanup() time), then we'd have to do
+        * a better job of detaching property from mode objects to avoid
+        * dangling property pointers:
+        */
+       struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
+
+       /**
+        * @values: Array to store the property values, matching @properties. Do
+        * not read/write values directly, but use
+        * drm_object_property_get_value() and drm_object_property_set_value().
+        *
+        * Note that atomic drivers do not store mutable properties in this
+        * array, but only the decoded values in the corresponding state
+        * structure. The decoding is done using the ->atomic_get_property and
+        * ->atomic_set_property hooks of the corresponding object. Hence atomic
+        * drivers should not use drm_object_property_set_value() and
+        * drm_object_property_get_value() on mutable objects, i.e. those
+        * without the DRM_MODE_PROP_IMMUTABLE flag set.
+        */
+       uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
+/* Avoid boilerplate.  I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list)                         \
+       const char *fnname(int val)                             \
+       {                                                       \
+               int i;                                          \
+               for (i = 0; i < ARRAY_SIZE(list); i++) {        \
+                       if (list[i].type == val)                \
+                               return list[i].name;            \
+               }                                               \
+               return "(unknown)";                             \
+       }
+
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+                                            uint32_t id, uint32_t type);
+void drm_mode_object_reference(struct drm_mode_object *obj);
+void drm_mode_object_unreference(struct drm_mode_object *obj);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+                                 struct drm_property *property,
+                                 uint64_t val);
+int drm_object_property_get_value(struct drm_mode_object *obj,
+                                 struct drm_property *property,
+                                 uint64_t *value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+                               struct drm_property *property,
+                               uint64_t init_val);
+#endif
index ff481770d76bbccd6b34a6b9791d60f22ec3e5d7..9934d91619c1823c80d277a50cc025873903b3a1 100644 (file)
 #ifndef __DRM_MODES_H__
 #define __DRM_MODES_H__
 
+#include <linux/hdmi.h>
+
+#include <drm/drm_mode_object.h>
+#include <drm/drm_connector.h>
+
+struct videomode;
+
 /*
  * Note on terminology:  here, for brevity and convenience, we refer to connector
  * control chips as 'CRTCs'.  They can control any type of connector, VGA, LVDS,
@@ -400,20 +407,7 @@ struct drm_display_mode {
        enum hdmi_picture_aspect picture_aspect_ratio;
 };
 
-/* mode specified on the command line */
-struct drm_cmdline_mode {
-       bool specified;
-       bool refresh_specified;
-       bool bpp_specified;
-       int xres, yres;
-       int bpp;
-       int refresh;
-       bool rb;
-       bool interlace;
-       bool cvt;
-       bool margins;
-       enum drm_connector_force force;
-};
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
 
 /**
  * drm_mode_is_stereo - check for stereo mode flags
@@ -434,7 +428,7 @@ struct drm_cmdline_mode;
 struct drm_display_mode *drm_mode_create(struct drm_device *dev);
 void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
 void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
-                               const struct drm_display_mode *in);
+                              const struct drm_display_mode *in);
 int drm_mode_convert_umode(struct drm_display_mode *out,
                           const struct drm_mode_modeinfo *in);
 void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
@@ -457,8 +451,9 @@ void drm_display_mode_from_videomode(const struct videomode *vm,
                                     struct drm_display_mode *dmode);
 void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
                                   struct videomode *vm);
+void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
 int of_get_drm_display_mode(struct device_node *np,
-                           struct drm_display_mode *dmode,
+                           struct drm_display_mode *dmode, u32 *bus_flags,
                            int index);
 
 void drm_mode_set_name(struct drm_display_mode *mode);
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
new file mode 100644 (file)
index 0000000..b8051d5
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_KMS_HELPER_H__
+#define __DRM_KMS_HELPER_H__
+
+#include <drm/drmP.h>
+
+void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+                                   const struct drm_mode_fb_cmd2 *mode_cmd);
+
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                 const struct drm_crtc_funcs *funcs);
+
+#endif
index b55f21857a986611f937a152a7caaa7103a3207f..10e449c86dbd6bf26a7570a44da2b4b82dafa7c2 100644 (file)
@@ -266,6 +266,8 @@ struct drm_crtc_helper_funcs {
         * disable anything at the CRTC level. To ensure that runtime PM
         * handling (using either DPMS or the new "ACTIVE" property) works
         * @disable must be the inverse of @enable for atomic drivers.
+        * Atomic drivers should consider to use @atomic_disable instead of
+        * this one.
         *
         * NOTE:
         *
@@ -391,6 +393,28 @@ struct drm_crtc_helper_funcs {
         */
        void (*atomic_flush)(struct drm_crtc *crtc,
                             struct drm_crtc_state *old_crtc_state);
+
+       /**
+        * @atomic_disable:
+        *
+        * This callback should be used to disable the CRTC. With the atomic
+        * drivers it is called after all encoders connected to this CRTC have
+        * been shut off already using their own ->disable hook. If that
+        * sequence is too simple drivers can just add their own hooks and call
+        * it from this CRTC callback here by looping over all encoders
+        * connected to it using for_each_encoder_on_crtc().
+        *
+        * This hook is used only by atomic helpers. Atomic drivers don't
+        * need to implement it if there's no need to disable anything at the
+        * CRTC level.
+        *
+        * Comparing to @disable, this one provides the additional input
+        * parameter @old_crtc_state which could be used to access the old
+        * state. Atomic drivers should consider to use this one instead
+        * of @disable.
+        */
+       void (*atomic_disable)(struct drm_crtc *crtc,
+                              struct drm_crtc_state *old_crtc_state);
 };
 
 /**
@@ -523,11 +547,40 @@ struct drm_encoder_helper_funcs {
         *
         * This callback is used both by the legacy CRTC helpers and the atomic
         * modeset helpers. It is optional in the atomic helpers.
+        *
+        * NOTE:
+        *
+        * If the driver uses the atomic modeset helpers and needs to inspect
+        * the connector state or connector display info during mode setting,
+        * @atomic_mode_set can be used instead.
         */
        void (*mode_set)(struct drm_encoder *encoder,
                         struct drm_display_mode *mode,
                         struct drm_display_mode *adjusted_mode);
 
+       /**
+        * @atomic_mode_set:
+        *
+        * This callback is used to update the display mode of an encoder.
+        *
+        * Note that the display pipe is completely off when this function is
+        * called. Drivers which need hardware to be running before they program
+        * the new display mode (because they implement runtime PM) should not
+        * use this hook, because the helper library calls it only once and not
+        * every time the display pipeline is suspended using either DPMS or the
+        * new "ACTIVE" property. Such drivers should instead move all their
+        * encoder setup into the ->enable() callback.
+        *
+        * This callback is used by the atomic modeset helpers in place of the
+        * @mode_set callback, if set by the driver. It is optional and should
+        * be used instead of @mode_set if the driver needs to inspect the
+        * connector state or display info, since there is no direct way to
+        * go from the encoder to the current connector.
+        */
+       void (*atomic_mode_set)(struct drm_encoder *encoder,
+                               struct drm_crtc_state *crtc_state,
+                               struct drm_connector_state *conn_state);
+
        /**
         * @get_crtc:
         *
@@ -826,7 +879,7 @@ struct drm_plane_helper_funcs {
         * everything else must complete successfully.
         */
        int (*prepare_fb)(struct drm_plane *plane,
-                         const struct drm_plane_state *new_state);
+                         struct drm_plane_state *new_state);
        /**
         * @cleanup_fb:
         *
@@ -837,7 +890,7 @@ struct drm_plane_helper_funcs {
         * transitional plane helpers, but it is optional.
         */
        void (*cleanup_fb)(struct drm_plane *plane,
-                          const struct drm_plane_state *old_state);
+                          struct drm_plane_state *old_state);
 
        /**
         * @atomic_check:
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
new file mode 100644 (file)
index 0000000..43cf193
--- /dev/null
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_PLANE_H__
+#define __DRM_PLANE_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_crtc;
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @plane: backpointer to the plane
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ *     plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ *     plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @rotation: rotation of the plane
+ * @zpos: priority of the given plane on crtc (optional)
+ * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
+ *     where N is the number of active planes for given crtc
+ * @src: clipped source coordinates of the plane (in 16.16)
+ * @dst: clipped destination coordinates of the plane
+ * @visible: visibility of the plane
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+       struct drm_plane *plane;
+
+       struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+       struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
+       struct fence *fence;
+
+       /* Signed dest location allows it to be partially off screen */
+       int32_t crtc_x, crtc_y;
+       uint32_t crtc_w, crtc_h;
+
+       /* Source values are 16.16 fixed point */
+       uint32_t src_x, src_y;
+       uint32_t src_h, src_w;
+
+       /* Plane rotation */
+       unsigned int rotation;
+
+       /* Plane zpos */
+       unsigned int zpos;
+       unsigned int normalized_zpos;
+
+       /* Clipped coordinates */
+       struct drm_rect src, dst;
+
+       /*
+        * Is the plane actually visible? Can be false even
+        * if fb!=NULL and crtc!=NULL, due to clipping.
+        */
+       bool visible;
+
+       struct drm_atomic_state *state;
+};
+
+
+/**
+ * struct drm_plane_funcs - driver plane control functions
+ */
+struct drm_plane_funcs {
+       /**
+        * @update_plane:
+        *
+        * This is the legacy entry point to enable and configure the plane for
+        * the given CRTC and framebuffer. It is never called to disable the
+        * plane, i.e. the passed-in crtc and fb paramters are never NULL.
+        *
+        * The source rectangle in frame buffer memory coordinates is given by
+        * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
+        * values). Devices that don't support subpixel plane coordinates can
+        * ignore the fractional part.
+        *
+        * The destination rectangle in CRTC coordinates is given by the
+        * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
+        * Devices scale the source rectangle to the destination rectangle. If
+        * scaling is not supported, and the source rectangle size doesn't match
+        * the destination rectangle size, the driver must return a
+        * -<errorname>EINVAL</errorname> error.
+        *
+        * Drivers implementing atomic modeset should use
+        * drm_atomic_helper_update_plane() to implement this hook.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*update_plane)(struct drm_plane *plane,
+                           struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                           int crtc_x, int crtc_y,
+                           unsigned int crtc_w, unsigned int crtc_h,
+                           uint32_t src_x, uint32_t src_y,
+                           uint32_t src_w, uint32_t src_h);
+
+       /**
+        * @disable_plane:
+        *
+        * This is the legacy entry point to disable the plane. The DRM core
+        * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
+        * with the frame buffer ID set to 0.  Disabled planes must not be
+        * processed by the CRTC.
+        *
+        * Drivers implementing atomic modeset should use
+        * drm_atomic_helper_disable_plane() to implement this hook.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*disable_plane)(struct drm_plane *plane);
+
+       /**
+        * @destroy:
+        *
+        * Clean up plane resources. This is only called at driver unload time
+        * through drm_mode_config_cleanup() since a plane cannot be hotplugged
+        * in DRM.
+        */
+       void (*destroy)(struct drm_plane *plane);
+
+       /**
+        * @reset:
+        *
+        * Reset plane hardware and software state to off. This function isn't
+        * called by the core directly, only through drm_mode_config_reset().
+        * It's not a helper hook only for historical reasons.
+        *
+        * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
+        * atomic state using this hook.
+        */
+       void (*reset)(struct drm_plane *plane);
+
+       /**
+        * @set_property:
+        *
+        * This is the legacy entry point to update a property attached to the
+        * plane.
+        *
+        * Drivers implementing atomic modeset should use
+        * drm_atomic_helper_plane_set_property() to implement this hook.
+        *
+        * This callback is optional if the driver does not support any legacy
+        * driver-private properties.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*set_property)(struct drm_plane *plane,
+                           struct drm_property *property, uint64_t val);
+
+       /**
+        * @atomic_duplicate_state:
+        *
+        * Duplicate the current atomic state for this plane and return it.
+        * The core and helpers gurantee that any atomic state duplicated with
+        * this hook and still owned by the caller (i.e. not transferred to the
+        * driver by calling ->atomic_commit() from struct
+        * &drm_mode_config_funcs) will be cleaned up by calling the
+        * @atomic_destroy_state hook in this structure.
+        *
+        * Atomic drivers which don't subclass struct &drm_plane_state should use
+        * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
+        * state structure to extend it with driver-private state should use
+        * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
+        * duplicated in a consistent fashion across drivers.
+        *
+        * It is an error to call this hook before plane->state has been
+        * initialized correctly.
+        *
+        * NOTE:
+        *
+        * If the duplicate state references refcounted resources this hook must
+        * acquire a reference for each of them. The driver must release these
+        * references again in @atomic_destroy_state.
+        *
+        * RETURNS:
+        *
+        * Duplicated atomic state or NULL when the allocation failed.
+        */
+       struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+
+       /**
+        * @atomic_destroy_state:
+        *
+        * Destroy a state duplicated with @atomic_duplicate_state and release
+        * or unreference all resources it references
+        */
+       void (*atomic_destroy_state)(struct drm_plane *plane,
+                                    struct drm_plane_state *state);
+
+       /**
+        * @atomic_set_property:
+        *
+        * Decode a driver-private property value and store the decoded value
+        * into the passed-in state structure. Since the atomic core decodes all
+        * standardized properties (even for extensions beyond the core set of
+        * properties which might not be implemented by all drivers) this
+        * requires drivers to subclass the state structure.
+        *
+        * Such driver-private properties should really only be implemented for
+        * truly hardware/vendor specific state. Instead it is preferred to
+        * standardize atomic extension and decode the properties used to expose
+        * such an extension in the core.
+        *
+        * Do not call this function directly, use
+        * drm_atomic_plane_set_property() instead.
+        *
+        * This callback is optional if the driver does not support any
+        * driver-private atomic properties.
+        *
+        * NOTE:
+        *
+        * This function is called in the state assembly phase of atomic
+        * modesets, which can be aborted for any reason (including on
+        * userspace's request to just check whether a configuration would be
+        * possible). Drivers MUST NOT touch any persistent state (hardware or
+        * software) or data structures except the passed in @state parameter.
+        *
+        * Also since userspace controls in which order properties are set this
+        * function must not do any input validation (since the state update is
+        * incomplete and hence likely inconsistent). Instead any such input
+        * validation must be done in the various atomic_check callbacks.
+        *
+        * RETURNS:
+        *
+        * 0 if the property has been found, -EINVAL if the property isn't
+        * implemented by the driver (which shouldn't ever happen, the core only
+        * asks for properties attached to this plane). No other validation is
+        * allowed by the driver. The core already checks that the property
+        * value is within the range (integer, valid enum value, ...) the driver
+        * set when registering the property.
+        */
+       int (*atomic_set_property)(struct drm_plane *plane,
+                                  struct drm_plane_state *state,
+                                  struct drm_property *property,
+                                  uint64_t val);
+
+       /**
+        * @atomic_get_property:
+        *
+        * Reads out the decoded driver-private property. This is used to
+        * implement the GETPLANE IOCTL.
+        *
+        * Do not call this function directly, use
+        * drm_atomic_plane_get_property() instead.
+        *
+        * This callback is optional if the driver does not support any
+        * driver-private atomic properties.
+        *
+        * RETURNS:
+        *
+        * 0 on success, -EINVAL if the property isn't implemented by the
+        * driver (which should never happen, the core only asks for
+        * properties attached to this plane).
+        */
+       int (*atomic_get_property)(struct drm_plane *plane,
+                                  const struct drm_plane_state *state,
+                                  struct drm_property *property,
+                                  uint64_t *val);
+       /**
+        * @late_register:
+        *
+        * This optional hook can be used to register additional userspace
+        * interfaces attached to the plane like debugfs interfaces.
+        * It is called late in the driver load sequence from drm_dev_register().
+        * Everything added from this callback should be unregistered in
+        * the early_unregister callback.
+        *
+        * Returns:
+        *
+        * 0 on success, or a negative error code on failure.
+        */
+       int (*late_register)(struct drm_plane *plane);
+
+       /**
+        * @early_unregister:
+        *
+        * This optional hook should be used to unregister the additional
+        * userspace interfaces attached to the plane from
+        * late_unregister(). It is called from drm_dev_unregister(),
+        * early in the driver unload sequence to disable userspace access
+        * before data structures are torndown.
+        */
+       void (*early_unregister)(struct drm_plane *plane);
+};
+
+/**
+ * enum drm_plane_type - uapi plane type enumeration
+ *
+ * For historical reasons not all planes are made the same. This enumeration is
+ * used to tell the different types of planes apart to implement the different
+ * uapi semantics for them. For userspace which is universal plane aware and
+ * which is using that atomic IOCTL there's no difference between these planes
+ * (beyong what the driver and hardware can support of course).
+ *
+ * For compatibility with legacy userspace, only overlay planes are made
+ * available to userspace by default. Userspace clients may set the
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * wish to receive a universal plane list containing all plane types. See also
+ * drm_for_each_legacy_plane().
+ *
+ * WARNING: The values of this enum is UABI since they're exposed in the "type"
+ * property.
+ */
+enum drm_plane_type {
+       /**
+        * @DRM_PLANE_TYPE_OVERLAY:
+        *
+        * Overlay planes represent all non-primary, non-cursor planes. Some
+        * drivers refer to these types of planes as "sprites" internally.
+        */
+       DRM_PLANE_TYPE_OVERLAY,
+
+       /**
+        * @DRM_PLANE_TYPE_PRIMARY:
+        *
+        * Primary planes represent a "main" plane for a CRTC.  Primary planes
+        * are the planes operated upon by CRTC modesetting and flipping
+        * operations described in the page_flip and set_config hooks in struct
+        * &drm_crtc_funcs.
+        */
+       DRM_PLANE_TYPE_PRIMARY,
+
+       /**
+        * @DRM_PLANE_TYPE_CURSOR:
+        *
+        * Cursor planes represent a "cursor" plane for a CRTC.  Cursor planes
+        * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
+        * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+        */
+       DRM_PLANE_TYPE_CURSOR,
+};
+
+
+/**
+ * struct drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @name: human readable name, can be overwritten by the driver
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @format_default: driver hasn't supplied supported formats for the plane
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ *     drm_mode_set_config_internal() to implement correct refcounting.
+ * @funcs: helper functions
+ * @properties: property tracking for this plane
+ * @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
+ * @zpos_property: zpos property for this plane
+ * @helper_private: mid-layer private data
+ */
+struct drm_plane {
+       struct drm_device *dev;
+       struct list_head head;
+
+       char *name;
+
+       /**
+        * @mutex:
+        *
+        * Protects modeset plane state, together with the mutex of &drm_crtc
+        * this plane is linked to (when active, getting actived or getting
+        * disabled).
+        */
+       struct drm_modeset_lock mutex;
+
+       struct drm_mode_object base;
+
+       uint32_t possible_crtcs;
+       uint32_t *format_types;
+       unsigned int format_count;
+       bool format_default;
+
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+
+       struct drm_framebuffer *old_fb;
+
+       const struct drm_plane_funcs *funcs;
+
+       struct drm_object_properties properties;
+
+       enum drm_plane_type type;
+
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the plane.
+        */
+       unsigned index;
+
+       const struct drm_plane_helper_funcs *helper_private;
+
+       struct drm_plane_state *state;
+
+       struct drm_property *zpos_property;
+};
+
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+extern __printf(8, 9)
+int drm_universal_plane_init(struct drm_device *dev,
+                            struct drm_plane *plane,
+                            unsigned long possible_crtcs,
+                            const struct drm_plane_funcs *funcs,
+                            const uint32_t *formats,
+                            unsigned int format_count,
+                            enum drm_plane_type type,
+                            const char *name, ...);
+extern int drm_plane_init(struct drm_device *dev,
+                         struct drm_plane *plane,
+                         unsigned long possible_crtcs,
+                         const struct drm_plane_funcs *funcs,
+                         const uint32_t *formats, unsigned int format_count,
+                         bool is_primary);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
+/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that plane within a DRM
+ * device's list of planes.
+ */
+static inline unsigned int drm_plane_index(struct drm_plane *plane)
+{
+       return plane->index;
+}
+extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
+extern void drm_plane_force_disable(struct drm_plane *plane);
+
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+                                      struct drm_property *property,
+                                      uint64_t value);
+
+/**
+ * drm_plane_find - find a &drm_plane
+ * @dev: DRM device
+ * @id: plane id
+ *
+ * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around
+ * drm_mode_object_find().
+ */
+static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
+               uint32_t id)
+{
+       struct drm_mode_object *mo;
+       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
+       return mo ? obj_to_plane(mo) : NULL;
+}
+
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+       list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+               for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+/**
+ * drm_for_each_legacy_plane - iterate over all planes for legacy userspace
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all legacy planes of @dev, excluding primary and cursor planes.
+ * This is useful for implementing userspace apis when userspace is not
+ * universal plane aware. See also enum &drm_plane_type.
+ */
+#define drm_for_each_legacy_plane(plane, dev) \
+       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
+               for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+
+/**
+ * drm_for_each_plane - iterate over all planes
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all planes of @dev, include primary and cursor planes.
+ */
+#define drm_for_each_plane(plane, dev) \
+       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+
+#endif
index 0e0c3573cce0552ea8a9b1b7d82c33c8079d5f9e..c18959685c06c301ffae57084a4a1f0a9805d98d 100644 (file)
@@ -27,6 +27,7 @@
 #include <drm/drm_rect.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 /*
  * Drivers that don't allow primary plane scaling may pass this macro in place
  */
 #define DRM_PLANE_HELPER_NO_SCALING (1<<16)
 
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
-                 const struct drm_crtc_funcs *funcs);
-
+int drm_plane_helper_check_state(struct drm_plane_state *state,
+                                const struct drm_rect *clip,
+                                int min_scale, int max_scale,
+                                bool can_position,
+                                bool can_update_disabled);
 int drm_plane_helper_check_update(struct drm_plane *plane,
                                  struct drm_crtc *crtc,
                                  struct drm_framebuffer *fb,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
new file mode 100644 (file)
index 0000000..43c4b6a
--- /dev/null
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_PROPERTY_H__
+#define __DRM_PROPERTY_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+/**
+ * struct drm_property_enum - symbolic values for enumerations
+ * @value: numeric property value for this enum entry
+ * @head: list of enum values, linked to enum_list in &drm_property
+ * @name: symbolic name for the enum
+ *
+ * For enumeration and bitmask properties this structure stores the symbolic
+ * decoding for each value. This is used for example for the rotation property.
+ */
+struct drm_property_enum {
+       uint64_t value;
+       struct list_head head;
+       char name[DRM_PROP_NAME_LEN];
+};
+
+/**
+ * struct drm_property - modeset object property
+ *
+ * This structure represent a modeset object property. It combines both the name
+ * of the property with the set of permissible values. This means that when a
+ * driver wants to use a property with the same name on different objects, but
+ * with different value ranges, then it must create property for each one. An
+ * example would be rotation of &drm_plane, when e.g. the primary plane cannot
+ * be rotated. But if both the name and the value range match, then the same
+ * property structure can be instantiated multiple times for the same object.
+ * Userspace must be able to cope with this and cannot assume that the same
+ * symbolic property will have the same modeset object ID on all modeset
+ * objects.
+ *
+ * Properties are created by one of the special functions, as explained in
+ * detail in the @flags structure member.
+ *
+ * To actually expose a property it must be attached to each object using
+ * drm_object_attach_property(). Currently properties can only be attached to
+ * &drm_connector, &drm_crtc and &drm_plane.
+ *
+ * Properties are also used as the generic metadatatransport for the atomic
+ * IOCTL. Everything that was set directly in structures in the legacy modeset
+ * IOCTLs (like the plane source or destination windows, or e.g. the links to
+ * the CRTC) is exposed as a property with the DRM_MODE_PROP_ATOMIC flag set.
+ */
+struct drm_property {
+       /**
+        * @head: per-device list of properties, for cleanup.
+        */
+       struct list_head head;
+
+       /**
+        * @base: base KMS object
+        */
+       struct drm_mode_object base;
+
+       /**
+        * @flags:
+        *
+        * Property flags and type. A property needs to be one of the following
+        * types:
+        *
+        * DRM_MODE_PROP_RANGE
+        *     Range properties report their minimum and maximum admissible unsigned values.
+        *     The KMS core verifies that values set by application fit in that
+        *     range. The range is unsigned. Range properties are created using
+        *     drm_property_create_range().
+        *
+        * DRM_MODE_PROP_SIGNED_RANGE
+        *     Range properties report their minimum and maximum admissible unsigned values.
+        *     The KMS core verifies that values set by application fit in that
+        *     range. The range is signed. Range properties are created using
+        *     drm_property_create_signed_range().
+        *
+        * DRM_MODE_PROP_ENUM
+        *     Enumerated properties take a numerical value that ranges from 0 to
+        *     the number of enumerated values defined by the property minus one,
+        *     and associate a free-formed string name to each value. Applications
+        *     can retrieve the list of defined value-name pairs and use the
+        *     numerical value to get and set property instance values. Enum
+        *     properties are created using drm_property_create_enum().
+        *
+        * DRM_MODE_PROP_BITMASK
+        *     Bitmask properties are enumeration properties that additionally
+        *     restrict all enumerated values to the 0..63 range. Bitmask property
+        *     instance values combine one or more of the enumerated bits defined
+        *     by the property. Bitmask properties are created using
+        *     drm_property_create_bitmask().
+        *
+        * DRM_MODE_PROB_OBJECT
+        *     Object properties are used to link modeset objects. This is used
+        *     extensively in the atomic support to create the display pipeline,
+        *     by linking &drm_framebuffer to &drm_plane, &drm_plane to
+        *     &drm_crtc and &drm_connector to &drm_crtc. An object property can
+        *     only link to a specific type of &drm_mode_object, this limit is
+        *     enforced by the core. Object properties are created using
+        *     drm_property_create_object().
+        *
+        *     Object properties work like blob properties, but in a more
+        *     general fashion. They are limited to atomic drivers and must have
+        *     the DRM_MODE_PROP_ATOMIC flag set.
+        *
+        * DRM_MODE_PROP_BLOB
+        *     Blob properties store a binary blob without any format restriction.
+        *     The binary blobs are created as KMS standalone objects, and blob
+        *     property instance values store the ID of their associated blob
+        *     object. Blob properties are created by calling
+        *     drm_property_create() with DRM_MODE_PROP_BLOB as the type.
+        *
+        *     Actual blob objects to contain blob data are created using
+        *     drm_property_create_blob(), or through the corresponding IOCTL.
+        *
+        *     Besides the built-in limit to only accept blob objects blob
+        *     properties work exactly like object properties. The only reasons
+        *     blob properties exist is backwards compatibility with existing
+        *     userspace.
+        *
+        * In addition a property can have any combination of the below flags:
+        *
+        * DRM_MODE_PROP_ATOMIC
+        *     Set for properties which encode atomic modeset state. Such
+        *     properties are not exposed to legacy userspace.
+        *
+        * DRM_MODE_PROP_IMMUTABLE
+        *     Set for properties where userspace cannot be changed by
+        *     userspace. The kernel is allowed to update the value of these
+        *     properties. This is generally used to expose probe state to
+        *     usersapce, e.g. the EDID, or the connector path property on DP
+        *     MST sinks.
+        */
+       uint32_t flags;
+
+       /**
+        * @name: symbolic name of the properties
+        */
+       char name[DRM_PROP_NAME_LEN];
+
+       /**
+        * @num_values: size of the @values array.
+        */
+       uint32_t num_values;
+
+       /**
+        * @values:
+        *
+        * Array with limits and values for the property. The
+        * interpretation of these limits is dependent upon the type per @flags.
+        */
+       uint64_t *values;
+
+       /**
+        * @dev: DRM device
+        */
+       struct drm_device *dev;
+
+       /**
+        * @enum_list:
+        *
+        * List of &drm_prop_enum_list structures with the symbolic names for
+        * enum and bitmask values.
+        */
+       struct list_head enum_list;
+};
+
+/**
+ * struct drm_property_blob - Blob data for &drm_property
+ * @base: base KMS object
+ * @dev: DRM device
+ * @head_global: entry on the global blob list in &drm_mode_config
+ *     property_blob_list.
+ * @head_file: entry on the per-file blob list in &drm_file blobs list.
+ * @length: size of the blob in bytes, invariant over the lifetime of the object
+ * @data: actual data, embedded at the end of this structure
+ *
+ * Blobs are used to store bigger values than what fits directly into the 64
+ * bits available for a &drm_property.
+ *
+ * Blobs are reference counted using drm_property_reference_blob() and
+ * drm_property_unreference_blob(). They are created using
+ * drm_property_create_blob().
+ */
+struct drm_property_blob {
+       struct drm_mode_object base;
+       struct drm_device *dev;
+       struct list_head head_global;
+       struct list_head head_file;
+       size_t length;
+       unsigned char data[];
+};
+
+struct drm_prop_enum_list {
+       int type;
+       char *name;
+};
+
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+
+/**
+ * drm_property_type_is - check the type of a property
+ * @property: property to check
+ * @type: property type to compare with
+ *
+ * This is a helper function becauase the uapi encoding of property types is
+ * a bit special for historical reasons.
+ */
+static inline bool drm_property_type_is(struct drm_property *property,
+                                       uint32_t type)
+{
+       /* instanceof for props.. handles extended type vs original types: */
+       if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+               return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
+       return property->flags & type;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                        const char *name, int num_values);
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+                                             const char *name,
+                                             const struct drm_prop_enum_list *props,
+                                             int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+                                                int flags, const char *name,
+                                                const struct drm_prop_enum_list *props,
+                                                int num_props,
+                                                uint64_t supported_bits);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+                                              const char *name,
+                                              uint64_t min, uint64_t max);
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+                                                     int flags, const char *name,
+                                                     int64_t min, int64_t max);
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+                                               int flags, const char *name, uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+                                             const char *name);
+int drm_property_add_enum(struct drm_property *property, int index,
+                         uint64_t value, const char *name);
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+
+struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
+                                                  size_t length,
+                                                  const void *data);
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+                                                  uint32_t id);
+int drm_property_replace_global_blob(struct drm_device *dev,
+                                    struct drm_property_blob **replace,
+                                    size_t length,
+                                    const void *data,
+                                    struct drm_mode_object *obj_holds_id,
+                                    struct drm_property *prop_holds_id);
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
+void drm_property_unreference_blob(struct drm_property_blob *blob);
+
+/**
+ * drm_connector_find - find property object
+ * @dev: DRM device
+ * @id: property object id
+ *
+ * This function looks up the property object specified by id and returns it.
+ */
+static inline struct drm_property *drm_property_find(struct drm_device *dev,
+                                                    uint32_t id)
+{
+       struct drm_mode_object *mo;
+       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
+       return mo ? obj_to_property(mo) : NULL;
+}
+
+#endif
index 269039722f91f458872b24db5a373d0871dd6d1c..5d112f75e04c0255e54b9c74ef8093ab46aa03d9 100644 (file)
@@ -60,6 +60,12 @@ struct drm_simple_display_pipe_funcs {
         *
         * This function is called when the underlying plane state is updated.
         * This hook is optional.
+        *
+        * This is the function drivers should submit the
+        * &drm_pending_vblank_event from. Using either
+        * drm_crtc_arm_vblank_event(), when the driver supports vblank
+        * interrupt handling, or drm_crtc_send_vblank_event() directly in case
+        * the hardware lacks vblank support entirely.
         */
        void (*update)(struct drm_simple_display_pipe *pipe,
                       struct drm_plane_state *plane_state);
@@ -85,6 +91,11 @@ struct drm_simple_display_pipe {
        const struct drm_simple_display_pipe_funcs *funcs;
 };
 
+int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
+                                         struct drm_bridge *bridge);
+
+void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe);
+
 int drm_simple_display_pipe_init(struct drm_device *dev,
                        struct drm_simple_display_pipe *pipe,
                        const struct drm_simple_display_pipe_funcs *funcs,
index 06ea8e077ec2f6435a9a34b38275db9408307fca..9c03895dc479dae56b84b8dd2f912bd499709eef 100644 (file)
  */
 
 #include <drm/drm_mm.h>
-#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
+struct drm_file;
+
 struct drm_vma_offset_file {
        struct rb_node vm_rb;
-       struct file *vm_filp;
+       struct drm_file *vm_tag;
        unsigned long vm_count;
 };
 
 struct drm_vma_offset_node {
        rwlock_t vm_lock;
        struct drm_mm_node vm_node;
-       struct rb_node vm_rb;
        struct rb_root vm_files;
 };
 
 struct drm_vma_offset_manager {
        rwlock_t vm_lock;
-       struct rb_root vm_addr_space_rb;
        struct drm_mm vm_addr_space_mm;
 };
 
@@ -62,10 +61,11 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
                           struct drm_vma_offset_node *node);
 
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+                        struct drm_file *tag);
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
-                            struct file *filp);
+                            struct drm_file *tag);
 
 /**
  * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
@@ -216,9 +216,9 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
 /**
  * drm_vma_node_verify_access() - Access verification helper for TTM
  * @node: Offset node
- * @filp: Open-file
+ * @tag: Tag of file to check
  *
- * This checks whether @filp is granted access to @node. It is the same as
+ * This checks whether @tag is granted access to @node. It is the same as
  * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
  * verify_access() callbacks.
  *
@@ -226,9 +226,9 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
  * 0 if access is granted, -EACCES otherwise.
  */
 static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
-                                            struct file *filp)
+                                            struct drm_file *tag)
 {
-       return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
+       return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
 }
 
 #endif /* __DRM_VMA_MANAGER_H__ */
index 3e419d92cf5ab7740b2ae4ac36da2522fcf9a021..a25483090cd57923d1e375238ff61e3e0ac4e7d6 100644 (file)
@@ -1,6 +1,24 @@
 #ifndef __DRM_I2C_TDA998X_H__
 #define __DRM_I2C_TDA998X_H__
 
+#include <linux/hdmi.h>
+#include <dt-bindings/display/tda998x.h>
+
+enum {
+       AFMT_UNUSED =   0,
+       AFMT_SPDIF =    TDA998x_SPDIF,
+       AFMT_I2S =      TDA998x_I2S,
+};
+
+struct tda998x_audio_params {
+       u8 config;
+       u8 format;
+       unsigned sample_width;
+       unsigned sample_rate;
+       struct hdmi_audio_infoframe cea;
+       u8 status[5];
+};
+
 struct tda998x_encoder_params {
        u8 swap_b:3;
        u8 mirr_b:1;
@@ -15,16 +33,7 @@ struct tda998x_encoder_params {
        u8 swap_e:3;
        u8 mirr_e:1;
 
-       u8 audio_cfg;
-       u8 audio_clk_cfg;
-       u8 audio_frame[6];
-
-       enum {
-               AFMT_SPDIF,
-               AFMT_I2S
-       } audio_format;
-
-       unsigned audio_sample_rate;
+       struct tda998x_audio_params audio_params;
 };
 
 #endif
index b1755f8db36b602e0883e4d5622db703edd76f2b..4e1b274e116431fa8431e540ec272bfe9c71aabe 100644 (file)
@@ -93,6 +93,6 @@ extern bool i915_gpu_turbo_disable(void);
 #define    I845_TSEG_SIZE_1M   (3 << 1)
 
 #define INTEL_BSM 0x5c
-#define   INTEL_BSM_MASK (0xFFFF << 20)
+#define   INTEL_BSM_MASK       (-(1u << 20))
 
 #endif                         /* _I915_DRM_H_ */
index 33466bfc6440363d4f36d1ca2f09d44098098e8c..0d5f4268d75f001ccf472360b74bbaa601d92681 100644 (file)
 #define INTEL_IVB_Q_IDS(info) \
        INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
 
-#define INTEL_HSW_D_IDS(info) \
+#define INTEL_HSW_IDS(info) \
        INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
        INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
        INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
        INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
        INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
        INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
-       INTEL_VGA_DEVICE(0x0D2E, info)  /* CRW GT3 reserved */ \
-
-#define INTEL_HSW_M_IDS(info) \
+       INTEL_VGA_DEVICE(0x0D2E, info),  /* CRW GT3 reserved */ \
        INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
        INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0D26, info)  /* CRW GT3 mobile */
 
-#define INTEL_VLV_M_IDS(info) \
+#define INTEL_VLV_IDS(info) \
        INTEL_VGA_DEVICE(0x0f30, info), \
        INTEL_VGA_DEVICE(0x0f31, info), \
        INTEL_VGA_DEVICE(0x0f32, info), \
        INTEL_VGA_DEVICE(0x0f33, info), \
-       INTEL_VGA_DEVICE(0x0157, info)
-
-#define INTEL_VLV_D_IDS(info) \
+       INTEL_VGA_DEVICE(0x0157, info), \
        INTEL_VGA_DEVICE(0x0155, info)
 
-#define INTEL_BDW_GT12M_IDS(info)  \
+#define INTEL_BDW_GT12_IDS(info)  \
        INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
        INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
        INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
        INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
        INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
        INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
-       INTEL_VGA_DEVICE(0x161E, info)  /* GT2 ULX */
-
-#define INTEL_BDW_GT12D_IDS(info) \
+       INTEL_VGA_DEVICE(0x161E, info),  /* GT2 ULX */ \
        INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
        INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
        INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
        INTEL_VGA_DEVICE(0x161D, info)  /* GT2 Workstation */
 
-#define INTEL_BDW_GT3M_IDS(info) \
+#define INTEL_BDW_GT3_IDS(info) \
        INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
        INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
        INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
-       INTEL_VGA_DEVICE(0x162E, info)  /* ULX */
-
-#define INTEL_BDW_GT3D_IDS(info) \
+       INTEL_VGA_DEVICE(0x162E, info),  /* ULX */\
        INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
        INTEL_VGA_DEVICE(0x162D, info)  /* Workstation */
 
        INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
        INTEL_VGA_DEVICE(0x163D, info)  /* Workstation */
 
-#define INTEL_BDW_M_IDS(info) \
-       INTEL_BDW_GT12M_IDS(info), \
-       INTEL_BDW_GT3M_IDS(info), \
-       INTEL_BDW_RSVDM_IDS(info)
-
-#define INTEL_BDW_D_IDS(info) \
-       INTEL_BDW_GT12D_IDS(info), \
-       INTEL_BDW_GT3D_IDS(info), \
+#define INTEL_BDW_IDS(info) \
+       INTEL_BDW_GT12_IDS(info), \
+       INTEL_BDW_GT3_IDS(info), \
+       INTEL_BDW_RSVDM_IDS(info), \
+       INTEL_BDW_GT12_IDS(info), \
+       INTEL_BDW_GT3_IDS(info), \
        INTEL_BDW_RSVDD_IDS(info)
 
 #define INTEL_CHV_IDS(info) \
index 6f2c59887ba6d920527475416650976247a2794a..9eb940d6755feba2163526cefc2ef230a6d4ea69 100644 (file)
@@ -45,37 +45,7 @@ struct ttm_bo_device;
 
 struct drm_mm_node;
 
-/**
- * struct ttm_place
- *
- * @fpfn:      first valid page frame number to put the object
- * @lpfn:      last valid page frame number to put the object
- * @flags:     memory domain and caching flags for the object
- *
- * Structure indicating a possible place to put an object.
- */
-struct ttm_place {
-       unsigned        fpfn;
-       unsigned        lpfn;
-       uint32_t        flags;
-};
-
-/**
- * struct ttm_placement
- *
- * @num_placement:     number of preferred placements
- * @placement:         preferred placements
- * @num_busy_placement:        number of preferred placements when need to evict buffer
- * @busy_placement:    preferred placements when need to evict buffer
- *
- * Structure indicating the placement you request for an object.
- */
-struct ttm_placement {
-       unsigned                num_placement;
-       const struct ttm_place  *placement;
-       unsigned                num_busy_placement;
-       const struct ttm_place  *busy_placement;
-};
+struct ttm_placement;
 
 /**
  * struct ttm_bus_placement
index 4348d6d5877a213b95c4b4e546287c9032b44a1a..4f0a92185995db9b52d2062d2eecf1d1b80c3abb 100644 (file)
@@ -133,7 +133,6 @@ struct ttm_tt {
  * struct ttm_dma_tt
  *
  * @ttm: Base ttm_tt struct.
- * @cpu_address: The CPU address of the pages
  * @dma_address: The DMA (bus) addresses of the pages
  * @pages_list: used by some page allocation backend
  *
@@ -143,7 +142,6 @@ struct ttm_tt {
  */
 struct ttm_dma_tt {
        struct ttm_tt ttm;
-       void **cpu_address;
        dma_addr_t *dma_address;
        struct list_head pages_list;
 };
@@ -961,7 +959,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
  * ttm_bo_move_ttm
  *
  * @bo: A pointer to a struct ttm_buffer_object.
- * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -976,14 +974,13 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                          bool evict, bool no_wait_gpu,
+                          bool interruptible, bool no_wait_gpu,
                           struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
- * @evict: 1: This is an eviction. Don't try to pipeline.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
@@ -999,8 +996,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                             bool evict, bool interruptible,
-                             bool no_wait_gpu,
+                             bool interruptible, bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem);
 
 /**
index 72dcbe81dd0706ee1e7e8c95c5f7d06ef38158da..c4520890f2677dd6af75e3d7aa026c2b88959c5e 100644 (file)
@@ -155,4 +155,5 @@ extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
 extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
                                     struct page *page);
 extern size_t ttm_round_pot(size_t size);
+extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
 #endif
index 8ed44f9bbdfba00412df22a7ba16b71126e209cf..932be0c8086e325dc985947d15e9b1da3385e81e 100644 (file)
@@ -30,6 +30,9 @@
 
 #ifndef _TTM_PLACEMENT_H_
 #define _TTM_PLACEMENT_H_
+
+#include <linux/types.h>
+
 /*
  * Memory regions for data placement.
  */
 #define TTM_PL_SYSTEM           0
 #define TTM_PL_TT               1
 #define TTM_PL_VRAM             2
-#define TTM_PL_PRIV0            3
-#define TTM_PL_PRIV1            4
-#define TTM_PL_PRIV2            5
-#define TTM_PL_PRIV3            6
-#define TTM_PL_PRIV4            7
-#define TTM_PL_PRIV5            8
-#define TTM_PL_SWAPPED          15
+#define TTM_PL_PRIV             3
 
 #define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
 #define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
 #define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
-#define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
-#define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
-#define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
-#define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
-#define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
-#define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
-#define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
+#define TTM_PL_FLAG_PRIV        (1 << TTM_PL_PRIV)
 #define TTM_PL_MASK_MEM         0x0000FFFF
 
 /*
@@ -72,7 +63,6 @@
 #define TTM_PL_FLAG_CACHED      (1 << 16)
 #define TTM_PL_FLAG_UNCACHED    (1 << 17)
 #define TTM_PL_FLAG_WC          (1 << 18)
-#define TTM_PL_FLAG_SHARED      (1 << 20)
 #define TTM_PL_FLAG_NO_EVICT    (1 << 21)
 #define TTM_PL_FLAG_TOPDOWN     (1 << 22)
 
 
 #define TTM_PL_MASK_MEMTYPE     (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
 
-/*
- * Access flags to be used for CPU- and GPU- mappings.
- * The idea is that the TTM synchronization mechanism will
- * allow concurrent READ access and exclusive write access.
- * Currently GPU- and CPU accesses are exclusive.
+/**
+ * struct ttm_place
+ *
+ * @fpfn:      first valid page frame number to put the object
+ * @lpfn:      last valid page frame number to put the object
+ * @flags:     memory domain and caching flags for the object
+ *
+ * Structure indicating a possible place to put an object.
  */
+struct ttm_place {
+       unsigned        fpfn;
+       unsigned        lpfn;
+       uint32_t        flags;
+};
 
-#define TTM_ACCESS_READ         (1 << 0)
-#define TTM_ACCESS_WRITE        (1 << 1)
+/**
+ * struct ttm_placement
+ *
+ * @num_placement:     number of preferred placements
+ * @placement:         preferred placements
+ * @num_busy_placement:        number of preferred placements when need to evict buffer
+ * @busy_placement:    preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+       unsigned                num_placement;
+       const struct ttm_place  *placement;
+       unsigned                num_busy_placement;
+       const struct ttm_place  *busy_placement;
+};
 
 #endif
diff --git a/include/dt-bindings/display/tda998x.h b/include/dt-bindings/display/tda998x.h
new file mode 100644 (file)
index 0000000..34757a3
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _DT_BINDINGS_TDA998X_H
+#define _DT_BINDINGS_TDA998X_H
+
+#define TDA998x_SPDIF  1
+#define TDA998x_I2S    2
+
+#endif /*_DT_BINDINGS_TDA998X_H */
index 4d8452c2384b6c23fbffa50352886e2b4ba04075..c5eaf2f80a4c4c76c9cd6029e8e67e0d8d0449c2 100644 (file)
@@ -1056,7 +1056,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
        return NULL;
 }
 
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
        static const void * __acpi_table_##name[]                       \
                __attribute__((unused))                                 \
                 = { (void *) table_id,                                 \
index 59ffaa68b11bbe1fda638fcf0d7128f3f5d2dc9d..23ddf4b46a9b016ef7adab062604c2b08f5563f7 100644 (file)
@@ -71,7 +71,8 @@ static inline bool bio_has_data(struct bio *bio)
 {
        if (bio &&
            bio->bi_iter.bi_size &&
-           bio_op(bio) != REQ_OP_DISCARD)
+           bio_op(bio) != REQ_OP_DISCARD &&
+           bio_op(bio) != REQ_OP_SECURE_ERASE)
                return true;
 
        return false;
@@ -79,7 +80,9 @@ static inline bool bio_has_data(struct bio *bio)
 
 static inline bool bio_no_advance_iter(struct bio *bio)
 {
-       return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME;
+       return bio_op(bio) == REQ_OP_DISCARD ||
+              bio_op(bio) == REQ_OP_SECURE_ERASE ||
+              bio_op(bio) == REQ_OP_WRITE_SAME;
 }
 
 static inline bool bio_is_rw(struct bio *bio)
@@ -199,6 +202,9 @@ static inline unsigned bio_segments(struct bio *bio)
        if (bio_op(bio) == REQ_OP_DISCARD)
                return 1;
 
+       if (bio_op(bio) == REQ_OP_SECURE_ERASE)
+               return 1;
+
        if (bio_op(bio) == REQ_OP_WRITE_SAME)
                return 1;
 
index 2c210b6a7bcf8ba3cd6a48571ff0fe8d84b21e91..e79055c8b577995c1a668f8ab228aa0e593c3432 100644 (file)
@@ -882,7 +882,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
                                                     int op)
 {
-       if (unlikely(op == REQ_OP_DISCARD))
+       if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
                return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
 
        if (unlikely(op == REQ_OP_WRITE_SAME))
@@ -913,7 +913,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
        if (unlikely(rq->cmd_type != REQ_TYPE_FS))
                return q->limits.max_hw_sectors;
 
-       if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
+       if (!q->limits.chunk_sectors ||
+           req_op(rq) == REQ_OP_DISCARD ||
+           req_op(rq) == REQ_OP_SECURE_ERASE)
                return blk_queue_get_max_sectors(q, req_op(rq));
 
        return min(blk_max_size_offset(q, offset),
index 701b64a3b7c5e3e94b0487f0ec73801d06715817..89b65b82d98f5c5e77c34f967e856dcc6028dabe 100644 (file)
@@ -74,7 +74,8 @@ static inline void bvec_iter_advance(const struct bio_vec *bv,
                  "Attempted to advance past end of bvec iter\n");
 
        while (bytes) {
-               unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+               unsigned iter_len = bvec_iter_len(bv, *iter);
+               unsigned len = min(bytes, iter_len);
 
                bytes -= len;
                iter->bi_size -= len;
index 82c3d3b7269dda3c2fe864f24bc1dfca6982d168..138bbf721e70cae221544a4dbcd7a3f6175d54cd 100644 (file)
@@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg)
 
 
 /* One Touch Record Feature */
-static inline void cec_msg_record_off(struct cec_msg *msg)
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
 {
        msg->len = 2;
        msg->msg[1] = CEC_MSG_RECORD_OFF;
+       msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
 }
 
 struct cec_op_arib_data {
@@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg,
        if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
                *msg++ = (digital->channel.channel_number_fmt << 2) |
                         (digital->channel.major >> 8);
-               *msg++ = digital->channel.major && 0xff;
+               *msg++ = digital->channel.major & 0xff;
                *msg++ = digital->channel.minor >> 8;
                *msg++ = digital->channel.minor & 0xff;
                *msg++ = 0;
@@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
 }
 
 static inline void cec_msg_record_on(struct cec_msg *msg,
+                                    bool reply,
                                     const struct cec_op_record_src *rec_src)
 {
        switch (rec_src->type) {
@@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg,
                                            rec_src->ext_phys_addr.phys_addr);
                break;
        }
+       msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
 }
 
 static inline void cec_ops_record_on(const struct cec_msg *msg,
@@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
        msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
 }
 
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+                                         __u8 size, const __u8 *vendor_cmd)
+{
+       if (size > 14)
+               size = 14;
+       msg->len = 2 + size;
+       msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+       memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+                                         __u8 *size,
+                                         const __u8 **vendor_cmd)
+{
+       *size = msg->len - 2;
+
+       if (*size > 14)
+               *size = 14;
+       *vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+                                                 __u32 vendor_id, __u8 size,
+                                                 const __u8 *vendor_cmd)
+{
+       if (size > 11)
+               size = 11;
+       msg->len = 5 + size;
+       msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+       msg->msg[2] = vendor_id >> 16;
+       msg->msg[3] = (vendor_id >> 8) & 0xff;
+       msg->msg[4] = vendor_id & 0xff;
+       memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+                                                 __u32 *vendor_id,  __u8 *size,
+                                                 const __u8 **vendor_cmd)
+{
+       *size = msg->len - 5;
+
+       if (*size > 11)
+               *size = 11;
+       *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+       *vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+                                                    __u8 size,
+                                                    const __u8 *rc_code)
+{
+       if (size > 14)
+               size = 14;
+       msg->len = 2 + size;
+       msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+       memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+                                                    __u8 *size,
+                                                    const __u8 **rc_code)
+{
+       *size = msg->len - 2;
+
+       if (*size > 14)
+               *size = 14;
+       *rc_code = msg->msg + 2;
+}
+
 static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
 {
        msg->len = 2;
@@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
                msg->len += 4;
                msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
                              (ui_cmd->channel_identifier.major >> 8);
-               msg->msg[4] = ui_cmd->channel_identifier.major && 0xff;
+               msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
                msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
                msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
                break;
index b3e22893a002acc73d76b4825c653ca6aaafea1a..851968e803fa416b996bb32b8417b82c312a7ff2 100644 (file)
@@ -364,7 +364,7 @@ struct cec_caps {
  * @num_log_addrs: how many logical addresses should be claimed. Set by the
  *     caller.
  * @vendor_id: the vendor ID of the device. Set by the caller.
- * @flags: set to 0.
+ * @flags: flags.
  * @osd_name: the OSD name of the device. Set by the caller.
  * @primary_device_type: the primary device type for each logical address.
  *     Set by the caller.
@@ -389,6 +389,9 @@ struct cec_log_addrs {
        __u8 features[CEC_MAX_LOG_ADDRS][12];
 };
 
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK  (1 << 0)
+
 /* Events */
 
 /* Event that occurs when the adapter state changes */
index e2949397c19b0d58bbebf84d37028ae9b969546c..573c5a18908fd53970fefea291805c500fd1d7f9 100644 (file)
 #define __compiler_offsetof(a, b)                                      \
        __builtin_offsetof(a, b)
 
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+#if GCC_VERSION >= 40100
 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
 #endif
 
  */
 #define asm_volatile_goto(x...)        do { asm goto(x); asm (""); } while (0)
 
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+/*
+ * sparse (__CHECKER__) pretends to be gcc, but can't do constant
+ * folding in __builtin_bswap*() (yet), so don't set these for it.
+ */
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
 #if GCC_VERSION >= 40400
 #define __HAVE_BUILTIN_BSWAP32__
 #define __HAVE_BUILTIN_BSWAP64__
 #if GCC_VERSION >= 40800
 #define __HAVE_BUILTIN_BSWAP16__
 #endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
 
 #if GCC_VERSION >= 50000
 #define KASAN_ABI_VERSION 4
index 1bb95484272501bbc8d0603da489f56b4f87714a..668569844d37cef4d51c6c7652d5f34ded2a66d6 100644 (file)
@@ -527,13 +527,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * object's lifetime is managed by something other than RCU.  That
  * "something other" might be reference counting or simple immortality.
  *
- * The seemingly unused void * variable is to validate @p is indeed a pointer
- * type. All pointer types silently cast to void *.
+ * The seemingly unused variable ___typecheck_p validates that @p is
+ * indeed a pointer type by using a pointer to typeof(*p) as the type.
+ * Taking a pointer to typeof(*p) again is needed in case p is void *.
  */
 #define lockless_dereference(p) \
 ({ \
        typeof(p) _________p1 = READ_ONCE(p); \
-       __maybe_unused const void * const _________p2 = _________p1; \
+       typeof(*(p)) *___typecheck_p __maybe_unused; \
        smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
        (_________p1); \
 })
index 242bf530edfcfe5d720c0818dcb84c0b967b03ff..34bd80512a0c154a94abb11821ae2ec391663c18 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __CPUHOTPLUG_H
 #define __CPUHOTPLUG_H
 
+#include <linux/types.h>
+
 enum cpuhp_state {
        CPUHP_OFFLINE,
        CPUHP_CREATE_THREADS,
index 7f5a5822538539848450f7d55c94ceee50d6c0bc..0148a3046b4864733200d76a3a45ff26e38c1523 100644 (file)
@@ -118,6 +118,15 @@ typedef struct {
        u32 imagesize;
 } efi_capsule_header_t;
 
+struct efi_boot_memmap {
+       efi_memory_desc_t       **map;
+       unsigned long           *map_size;
+       unsigned long           *desc_size;
+       u32                     *desc_ver;
+       unsigned long           *key_ptr;
+       unsigned long           *buff_size;
+};
+
 /*
  * EFI capsule flags
  */
@@ -946,7 +955,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
 /* Iterate through an efi_memory_map */
 #define for_each_efi_memory_desc_in_map(m, md)                            \
        for ((md) = (m)->map;                                              \
-            ((void *)(md) + (m)->desc_size) <= (m)->map_end;              \
+            (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end;      \
             (md) = (void *)(md) + (m)->desc_size)
 
 /**
@@ -1371,11 +1380,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
                          efi_loaded_image_t *image, int *cmd_line_len);
 
 efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
-                               efi_memory_desc_t **map,
-                               unsigned long *map_size,
-                               unsigned long *desc_size,
-                               u32 *desc_ver,
-                               unsigned long *key_ptr);
+                               struct efi_boot_memmap *map);
 
 efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                           unsigned long size, unsigned long align,
@@ -1457,4 +1462,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
        arch_efi_call_virt_teardown();                                  \
 })
 
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+       efi_system_table_t *sys_table_arg,
+       struct efi_boot_memmap *map,
+       void *priv);
+
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
+                                   void *handle,
+                                   struct efi_boot_memmap *map,
+                                   void *priv,
+                                   efi_exit_boot_map_processing priv_func);
 #endif /* _LINUX_EFI_H */
index 86baaa45567ce54d9499292bb2d0770c786e93ba..a44794e508df8cd3e72a8b3e37ad1a56a6107cd2 100644 (file)
@@ -51,6 +51,16 @@ struct fence_array {
 
 extern const struct fence_ops fence_array_ops;
 
+/**
+ * fence_is_array - check if a fence is from the array subsclass
+ *
+ * Return true if it is a fence_array and false otherwise.
+ */
+static inline bool fence_is_array(struct fence *fence)
+{
+       return fence->ops == &fence_array_ops;
+}
+
 /**
  * to_fence_array - cast a fence to a fence_array
  * @fence: fence to cast to a fence_array
index 8cc719a637286d7262386d21c6ed40453b36a1e5..0d763053f97a2665b4571aa5d4710f362a900751 100644 (file)
@@ -49,8 +49,6 @@ struct fence_cb;
  * @timestamp: Timestamp when the fence was signaled.
  * @status: Optional, only valid if < 0, must be set before calling
  * fence_signal, indicates that the fence has completed with an error.
- * @child_list: list of children fences
- * @active_list: list of active fences
  *
  * the flags member must be manipulated and read using the appropriate
  * atomic ops (bit_*), so taking the spinlock will not be needed most
@@ -62,7 +60,7 @@ struct fence_cb;
  * implementer of the fence for its own purposes. Can be used in different
  * ways by different fence implementers, so do not rely on this.
  *
- * *) Since atomic bitops are used, this is not guaranteed to be the case.
+ * Since atomic bitops are used, this is not guaranteed to be the case.
  * Particularly, if the bit was set, but fence_signal was called right
  * before this bit was set, it would have been able to set the
  * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
index 3523bf62f32869f6c936f93386fa04db5f6edbb9..901e25d495ccfb71d8fc91b158a19cfe4dd9fccf 100644 (file)
@@ -574,6 +574,7 @@ static inline void mapping_allow_writable(struct address_space *mapping)
 
 struct posix_acl;
 #define ACL_NOT_CACHED ((void *)(-1))
+#define ACL_DONT_CACHE ((void *)(-3))
 
 static inline struct posix_acl *
 uncached_acl_sentinel(struct task_struct *task)
index cfa6cde25f8e8a3a594fd30bfd8e42de1360826c..76cff18bb0328dda26a424da5b36eb0385272846 100644 (file)
@@ -274,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *);
 extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
                                                unsigned int);
 /* policy.c */
-extern int fscrypt_process_policy(struct inode *,
-                                       const struct fscrypt_policy *);
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
 extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
 extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
 extern int fscrypt_inherit_context(struct inode *, struct inode *,
@@ -345,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
 }
 
 /* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct inode *i,
+static inline int fscrypt_notsupp_process_policy(struct file *f,
                                const struct fscrypt_policy *p)
 {
        return -EOPNOTSUPP;
index 58205f33af023ee30cc9c18f52aa6f45fea08f59..7268ed076be8e46d5fd182042f41556c54a38737 100644 (file)
@@ -148,6 +148,7 @@ struct fsnotify_group {
        #define FS_PRIO_1       1 /* fanotify content based access control */
        #define FS_PRIO_2       2 /* fanotify pre-content access */
        unsigned int priority;
+       bool shutdown;          /* group is being shut down, don't queue more events */
 
        /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
        struct mutex mark_mutex;        /* protect marks_list */
@@ -179,7 +180,6 @@ struct fsnotify_group {
                        spinlock_t access_lock;
                        struct list_head access_list;
                        wait_queue_head_t access_waitq;
-                       atomic_t bypass_perm;
 #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
                        int f_flags;
                        unsigned int max_marks;
@@ -292,6 +292,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
 extern void fsnotify_get_group(struct fsnotify_group *group);
 /* drop reference on a group from fsnotify_alloc_group */
 extern void fsnotify_put_group(struct fsnotify_group *group);
+/* group destruction begins, stop queuing new events */
+extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
 /* destroy group */
 extern void fsnotify_destroy_group(struct fsnotify_group *group);
 /* fasync handler function */
@@ -304,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
                              struct fsnotify_event *event,
                              int (*merge)(struct list_head *,
                                           struct fsnotify_event *));
-/* Remove passed event from groups notification queue */
-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
 /* true if the group notification queue is empty */
 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
 /* return, but do not dequeue the first event on the notification queue */
index 5198f8ed08a464f8da3294efe814f79e2978a009..c97eab67558f6c33877217d7f0a22df6e2bbdcca 100644 (file)
@@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
                                  const char *name,
                                  struct config_item_type *type)
 {
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
        config_group_init_type_name(&t->group, name, type);
 #endif
 }
index 645ad06b5d52134b877322f86184c2729f487517..58df02bd93c9f04d0546a32de278503918d3e4f8 100644 (file)
  * See Documentation/io-mapping.txt
  */
 
-#ifdef CONFIG_HAVE_ATOMIC_IOMAP
-
-#include <asm/iomap.h>
-
 struct io_mapping {
        resource_size_t base;
        unsigned long size;
        pgprot_t prot;
+       void __iomem *iomem;
 };
 
+#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+
+#include <asm/iomap.h>
 /*
  * For small address space machines, mapping large objects
  * into the kernel virtual space isn't practical. Where
@@ -49,34 +49,25 @@ struct io_mapping {
  */
 
 static inline struct io_mapping *
-io_mapping_create_wc(resource_size_t base, unsigned long size)
+io_mapping_init_wc(struct io_mapping *iomap,
+                  resource_size_t base,
+                  unsigned long size)
 {
-       struct io_mapping *iomap;
        pgprot_t prot;
 
-       iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
-       if (!iomap)
-               goto out_err;
-
        if (iomap_create_wc(base, size, &prot))
-               goto out_free;
+               return NULL;
 
        iomap->base = base;
        iomap->size = size;
        iomap->prot = prot;
        return iomap;
-
-out_free:
-       kfree(iomap);
-out_err:
-       return NULL;
 }
 
 static inline void
-io_mapping_free(struct io_mapping *mapping)
+io_mapping_fini(struct io_mapping *mapping)
 {
        iomap_free(mapping->base, mapping->size);
-       kfree(mapping);
 }
 
 /* Atomic map/unmap */
@@ -121,21 +112,46 @@ io_mapping_unmap(void __iomem *vaddr)
 #else
 
 #include <linux/uaccess.h>
-
-/* this struct isn't actually defined anywhere */
-struct io_mapping;
+#include <asm/pgtable.h>
 
 /* Create the io_mapping object*/
 static inline struct io_mapping *
-io_mapping_create_wc(resource_size_t base, unsigned long size)
+io_mapping_init_wc(struct io_mapping *iomap,
+                  resource_size_t base,
+                  unsigned long size)
 {
-       return (struct io_mapping __force *) ioremap_wc(base, size);
+       iomap->base = base;
+       iomap->size = size;
+       iomap->iomem = ioremap_wc(base, size);
+#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
+       iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
+#elif defined(pgprot_writecombine)
+       iomap->prot = pgprot_writecombine(PAGE_KERNEL);
+#else
+       iomap->prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+
+       return iomap;
 }
 
 static inline void
-io_mapping_free(struct io_mapping *mapping)
+io_mapping_fini(struct io_mapping *mapping)
+{
+       iounmap(mapping->iomem);
+}
+
+/* Non-atomic map/unmap */
+static inline void __iomem *
+io_mapping_map_wc(struct io_mapping *mapping,
+                 unsigned long offset,
+                 unsigned long size)
+{
+       return mapping->iomem + offset;
+}
+
+static inline void
+io_mapping_unmap(void __iomem *vaddr)
 {
-       iounmap((void __force __iomem *) mapping);
 }
 
 /* Atomic map/unmap */
@@ -145,30 +161,42 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 {
        preempt_disable();
        pagefault_disable();
-       return ((char __force __iomem *) mapping) + offset;
+       return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
 }
 
 static inline void
 io_mapping_unmap_atomic(void __iomem *vaddr)
 {
+       io_mapping_unmap(vaddr);
        pagefault_enable();
        preempt_enable();
 }
 
-/* Non-atomic map/unmap */
-static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping,
-                 unsigned long offset,
-                 unsigned long size)
+#endif /* HAVE_ATOMIC_IOMAP */
+
+static inline struct io_mapping *
+io_mapping_create_wc(resource_size_t base,
+                    unsigned long size)
 {
-       return ((char __force __iomem *) mapping) + offset;
+       struct io_mapping *iomap;
+
+       iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
+       if (!iomap)
+               return NULL;
+
+       if (!io_mapping_init_wc(iomap, base, size)) {
+               kfree(iomap);
+               return NULL;
+       }
+
+       return iomap;
 }
 
 static inline void
-io_mapping_unmap(void __iomem *vaddr)
+io_mapping_free(struct io_mapping *iomap)
 {
+       io_mapping_fini(iomap);
+       kfree(iomap);
 }
 
-#endif /* HAVE_ATOMIC_IOMAP */
-
 #endif /* _LINUX_IO_MAPPING_H */
index 3267df4610125600f7e441a53a7a0fe583d2ddc1..3d70ece1031377c41933712782268eaabaee7fe5 100644 (file)
@@ -18,6 +18,11 @@ struct vm_fault;
 #define IOMAP_MAPPED   0x03    /* blocks allocated @blkno */
 #define IOMAP_UNWRITTEN        0x04    /* blocks allocated @blkno in unwritten state */
 
+/*
+ * Flags for iomap mappings:
+ */
+#define IOMAP_F_MERGED 0x01    /* contains multiple blocks/extents */
+
 /*
  * Magic value for blkno:
  */
@@ -27,7 +32,8 @@ struct iomap {
        sector_t                blkno;  /* 1st sector of mapping, 512b units */
        loff_t                  offset; /* file offset of mapping, bytes */
        u64                     length; /* length of mapping, bytes */
-       int                     type;   /* type of mapping */
+       u16                     type;   /* type of mapping */
+       u16                     flags;  /* flags for mapping */
        struct block_device     *bdev;  /* block device for I/O */
 };
 
index b52424eaa0ed38e932d174f6a950a488d823399b..0ac26c892fe25c11030415ec48d5580b3d3fefa2 100644 (file)
@@ -945,6 +945,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
 static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
 #endif
 
+/*
+ * The irqsave variants are for usage in non interrupt code. Do not use
+ * them in irq_chip callbacks. Use irq_gc_lock() instead.
+ */
+#define irq_gc_lock_irqsave(gc, flags) \
+       raw_spin_lock_irqsave(&(gc)->lock, flags)
+
+#define irq_gc_unlock_irqrestore(gc, flags)    \
+       raw_spin_unlock_irqrestore(&(gc)->lock, flags)
+
 static inline void irq_reg_writel(struct irq_chip_generic *gc,
                                  u32 val, int reg_offset)
 {
index 56b0b7ec66aacd6bb895deb21d818ef9b068e801..99ac022edc60633be531e5df89d252d26f1b0766 100644 (file)
  */
 #define E_ITS_MOVI_UNMAPPED_INTERRUPT          0x010107
 #define E_ITS_MOVI_UNMAPPED_COLLECTION         0x010109
+#define E_ITS_INT_UNMAPPED_INTERRUPT           0x010307
 #define E_ITS_CLEAR_UNMAPPED_INTERRUPT         0x010507
 #define E_ITS_MAPD_DEVICE_OOR                  0x010801
 #define E_ITS_MAPC_PROCNUM_OOR                 0x010902
index 01e908ac4a39a7ed65a68a59f097b302a80c3b57..9c28b4d4c90b137ac72acad323ae2785e3d4dcce 100644 (file)
@@ -1113,8 +1113,20 @@ struct kvm_device {
 /* create, destroy, and name are mandatory */
 struct kvm_device_ops {
        const char *name;
+
+       /*
+        * create is called holding kvm->lock and any operations not suitable
+        * to do while holding the lock should be deferred to init (see
+        * below).
+        */
        int (*create)(struct kvm_device *dev, u32 type);
 
+       /*
+        * init is called after create if create is successful and is called
+        * outside of holding kvm->lock.
+        */
+       void (*init)(struct kvm_device *dev);
+
        /*
         * Destroy is responsible for freeing dev.
         *
index 4429d255c8ab6c7524436d2ee36fdcea57304447..5e5b2969d93167a8337823dcc74b15ce53870307 100644 (file)
@@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
 }
 
 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+extern void mpol_put_task_policy(struct task_struct *);
 
 #else
 
@@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
        return -1; /* no node preference */
 }
 
+static inline void mpol_put_task_policy(struct task_struct *task)
+{
+}
 #endif /* CONFIG_NUMA */
 #endif
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
new file mode 100644 (file)
index 0000000..304985e
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
+#define __LINUX_MFD_DA8XX_CFGCHIP_H
+
+#include <linux/bitops.h>
+
+/* register offset (32-bit registers) */
+#define CFGCHIP(n)                             ((n) * 4)
+
+/* CFGCHIP0 (PLL0/EDMA3_0) register bits */
+#define CFGCHIP0_PLL_MASTER_LOCK               BIT(4)
+#define CFGCHIP0_EDMA30TC1DBS(n)               ((n) << 2)
+#define CFGCHIP0_EDMA30TC1DBS_MASK             CFGCHIP0_EDMA30TC1DBS(0x3)
+#define CFGCHIP0_EDMA30TC1DBS_16               CFGCHIP0_EDMA30TC1DBS(0x0)
+#define CFGCHIP0_EDMA30TC1DBS_32               CFGCHIP0_EDMA30TC1DBS(0x1)
+#define CFGCHIP0_EDMA30TC1DBS_64               CFGCHIP0_EDMA30TC1DBS(0x2)
+#define CFGCHIP0_EDMA30TC0DBS(n)               ((n) << 0)
+#define CFGCHIP0_EDMA30TC0DBS_MASK             CFGCHIP0_EDMA30TC0DBS(0x3)
+#define CFGCHIP0_EDMA30TC0DBS_16               CFGCHIP0_EDMA30TC0DBS(0x0)
+#define CFGCHIP0_EDMA30TC0DBS_32               CFGCHIP0_EDMA30TC0DBS(0x1)
+#define CFGCHIP0_EDMA30TC0DBS_64               CFGCHIP0_EDMA30TC0DBS(0x2)
+
+/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */
+#define CFGCHIP1_CAP2SRC(n)                    ((n) << 27)
+#define CFGCHIP1_CAP2SRC_MASK                  CFGCHIP1_CAP2SRC(0x1f)
+#define CFGCHIP1_CAP2SRC_ECAP_PIN              CFGCHIP1_CAP2SRC(0x0)
+#define CFGCHIP1_CAP2SRC_MCASP0_TX             CFGCHIP1_CAP2SRC(0x1)
+#define CFGCHIP1_CAP2SRC_MCASP0_RX             CFGCHIP1_CAP2SRC(0x2)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0x7)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX            CFGCHIP1_CAP2SRC(0x8)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_TX            CFGCHIP1_CAP2SRC(0x9)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC          CFGCHIP1_CAP2SRC(0xa)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0xb)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX            CFGCHIP1_CAP2SRC(0xc)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_TX            CFGCHIP1_CAP2SRC(0xd)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC          CFGCHIP1_CAP2SRC(0xe)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0xf)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX            CFGCHIP1_CAP2SRC(0x10)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_TX            CFGCHIP1_CAP2SRC(0x11)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC          CFGCHIP1_CAP2SRC(0x12)
+#define CFGCHIP1_CAP1SRC(n)                    ((n) << 22)
+#define CFGCHIP1_CAP1SRC_MASK                  CFGCHIP1_CAP1SRC(0x1f)
+#define CFGCHIP1_CAP1SRC_ECAP_PIN              CFGCHIP1_CAP1SRC(0x0)
+#define CFGCHIP1_CAP1SRC_MCASP0_TX             CFGCHIP1_CAP1SRC(0x1)
+#define CFGCHIP1_CAP1SRC_MCASP0_RX             CFGCHIP1_CAP1SRC(0x2)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0x7)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX            CFGCHIP1_CAP1SRC(0x8)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_TX            CFGCHIP1_CAP1SRC(0x9)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC          CFGCHIP1_CAP1SRC(0xa)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0xb)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX            CFGCHIP1_CAP1SRC(0xc)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_TX            CFGCHIP1_CAP1SRC(0xd)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC          CFGCHIP1_CAP1SRC(0xe)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0xf)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX            CFGCHIP1_CAP1SRC(0x10)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_TX            CFGCHIP1_CAP1SRC(0x11)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC          CFGCHIP1_CAP1SRC(0x12)
+#define CFGCHIP1_CAP0SRC(n)                    ((n) << 17)
+#define CFGCHIP1_CAP0SRC_MASK                  CFGCHIP1_CAP0SRC(0x1f)
+#define CFGCHIP1_CAP0SRC_ECAP_PIN              CFGCHIP1_CAP0SRC(0x0)
+#define CFGCHIP1_CAP0SRC_MCASP0_TX             CFGCHIP1_CAP0SRC(0x1)
+#define CFGCHIP1_CAP0SRC_MCASP0_RX             CFGCHIP1_CAP0SRC(0x2)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0x7)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX            CFGCHIP1_CAP0SRC(0x8)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_TX            CFGCHIP1_CAP0SRC(0x9)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC          CFGCHIP1_CAP0SRC(0xa)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0xb)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX            CFGCHIP1_CAP0SRC(0xc)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_TX            CFGCHIP1_CAP0SRC(0xd)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC          CFGCHIP1_CAP0SRC(0xe)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0xf)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX            CFGCHIP1_CAP0SRC(0x10)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_TX            CFGCHIP1_CAP0SRC(0x11)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC          CFGCHIP1_CAP0SRC(0x12)
+#define CFGCHIP1_HPIBYTEAD                     BIT(16)
+#define CFGCHIP1_HPIENA                                BIT(15)
+#define CFGCHIP0_EDMA31TC0DBS(n)               ((n) << 13)
+#define CFGCHIP0_EDMA31TC0DBS_MASK             CFGCHIP0_EDMA31TC0DBS(0x3)
+#define CFGCHIP0_EDMA31TC0DBS_16               CFGCHIP0_EDMA31TC0DBS(0x0)
+#define CFGCHIP0_EDMA31TC0DBS_32               CFGCHIP0_EDMA31TC0DBS(0x1)
+#define CFGCHIP0_EDMA31TC0DBS_64               CFGCHIP0_EDMA31TC0DBS(0x2)
+#define CFGCHIP1_TBCLKSYNC                     BIT(12)
+#define CFGCHIP1_AMUTESEL0(n)                  ((n) << 0)
+#define CFGCHIP1_AMUTESEL0_MASK                        CFGCHIP1_AMUTESEL0(0xf)
+#define CFGCHIP1_AMUTESEL0_LOW                 CFGCHIP1_AMUTESEL0(0x0)
+#define CFGCHIP1_AMUTESEL0_BANK_0              CFGCHIP1_AMUTESEL0(0x1)
+#define CFGCHIP1_AMUTESEL0_BANK_1              CFGCHIP1_AMUTESEL0(0x2)
+#define CFGCHIP1_AMUTESEL0_BANK_2              CFGCHIP1_AMUTESEL0(0x3)
+#define CFGCHIP1_AMUTESEL0_BANK_3              CFGCHIP1_AMUTESEL0(0x4)
+#define CFGCHIP1_AMUTESEL0_BANK_4              CFGCHIP1_AMUTESEL0(0x5)
+#define CFGCHIP1_AMUTESEL0_BANK_5              CFGCHIP1_AMUTESEL0(0x6)
+#define CFGCHIP1_AMUTESEL0_BANK_6              CFGCHIP1_AMUTESEL0(0x7)
+#define CFGCHIP1_AMUTESEL0_BANK_7              CFGCHIP1_AMUTESEL0(0x8)
+
+/* CFGCHIP2 (USB PHY) register bits */
+#define CFGCHIP2_PHYCLKGD                      BIT(17)
+#define CFGCHIP2_VBUSSENSE                     BIT(16)
+#define CFGCHIP2_RESET                         BIT(15)
+#define CFGCHIP2_OTGMODE(n)                    ((n) << 13)
+#define CFGCHIP2_OTGMODE_MASK                  CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_OTGMODE_NO_OVERRIDE           CFGCHIP2_OTGMODE(0x0)
+#define CFGCHIP2_OTGMODE_FORCE_HOST            CFGCHIP2_OTGMODE(0x1)
+#define CFGCHIP2_OTGMODE_FORCE_DEVICE          CFGCHIP2_OTGMODE(0x2)
+#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW   CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_USB1PHYCLKMUX                 BIT(12)
+#define CFGCHIP2_USB2PHYCLKMUX                 BIT(11)
+#define CFGCHIP2_PHYPWRDN                      BIT(10)
+#define CFGCHIP2_OTGPWRDN                      BIT(9)
+#define CFGCHIP2_DATPOL                                BIT(8)
+#define CFGCHIP2_USB1SUSPENDM                  BIT(7)
+#define CFGCHIP2_PHY_PLLON                     BIT(6)
+#define CFGCHIP2_SESENDEN                      BIT(5)
+#define CFGCHIP2_VBDTCTEN                      BIT(4)
+#define CFGCHIP2_REFFREQ(n)                    ((n) << 0)
+#define CFGCHIP2_REFFREQ_MASK                  CFGCHIP2_REFFREQ(0xf)
+#define CFGCHIP2_REFFREQ_12MHZ                 CFGCHIP2_REFFREQ(0x1)
+#define CFGCHIP2_REFFREQ_24MHZ                 CFGCHIP2_REFFREQ(0x2)
+#define CFGCHIP2_REFFREQ_48MHZ                 CFGCHIP2_REFFREQ(0x3)
+#define CFGCHIP2_REFFREQ_19_2MHZ               CFGCHIP2_REFFREQ(0x4)
+#define CFGCHIP2_REFFREQ_38_4MHZ               CFGCHIP2_REFFREQ(0x5)
+#define CFGCHIP2_REFFREQ_13MHZ                 CFGCHIP2_REFFREQ(0x6)
+#define CFGCHIP2_REFFREQ_26MHZ                 CFGCHIP2_REFFREQ(0x7)
+#define CFGCHIP2_REFFREQ_20MHZ                 CFGCHIP2_REFFREQ(0x8)
+#define CFGCHIP2_REFFREQ_40MHZ                 CFGCHIP2_REFFREQ(0x9)
+
+/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */
+#define CFGCHIP3_RMII_SEL                      BIT(8)
+#define CFGCHIP3_UPP_TX_CLKSRC                 BIT(6)
+#define CFGCHIP3_PLL1_MASTER_LOCK              BIT(5)
+#define CFGCHIP3_ASYNC3_CLKSRC                 BIT(4)
+#define CFGCHIP3_PRUEVTSEL                     BIT(3)
+#define CFGCHIP3_DIV45PENA                     BIT(2)
+#define CFGCHIP3_EMA_CLKSRC                    BIT(1)
+
+/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */
+#define CFGCHIP4_AMUTECLR0                     BIT(0)
+
+#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */
index 2567a87872b0afbf0a5db0716871aea88626b3d7..7f55b8b410328f764bbed1949711d588406fe29c 100644 (file)
 /*
  * time in us for processing a single channel, calculated as follows:
  *
- * num cycles = open delay + (sample delay + conv time) * averaging
+ * max num cycles = open delay + (sample delay + conv time) * averaging
  *
- * num cycles: 152 + (1 + 13) * 16 = 376
+ * max num cycles: 262143 + (255 + 13) * 16 = 266431
  *
  * clock frequency: 26MHz / 8 = 3.25MHz
  * clock period: 1 / 3.25MHz = 308ns
  *
- * processing time: 376 * 308ns = 116us
+ * max processing time: 266431 * 308ns = 83ms(approx)
  */
-#define IDLE_TIMEOUT 116 /* microsec */
+#define IDLE_TIMEOUT 83 /* milliseconds */
 
 #define TSCADC_CELLS           2
 
index 21bc4557b67ad9b7aec4f81b4fd02ef9714ac7eb..d1f9a581aca876978a32e18f73841c5cdc24058c 100644 (file)
@@ -6710,9 +6710,10 @@ struct mlx5_ifc_pude_reg_bits {
 };
 
 struct mlx5_ifc_ptys_reg_bits {
-       u8         an_disable_cap[0x1];
+       u8         reserved_at_0[0x1];
        u8         an_disable_admin[0x1];
-       u8         reserved_at_2[0x6];
+       u8         an_disable_cap[0x1];
+       u8         reserved_at_3[0x5];
        u8         local_port[0x8];
        u8         reserved_at_10[0xd];
        u8         proto_mask[0x3];
index 08ed53eeedd5fd203a961310367ef8a206c76696..ef815b9cd42696bc70db1e9f35e39a9c295afacd 100644 (file)
@@ -2014,6 +2014,7 @@ extern void mm_drop_all_locks(struct mm_struct *mm);
 
 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
 extern struct file *get_mm_exe_file(struct mm_struct *mm);
+extern struct file *get_task_exe_file(struct task_struct *task);
 
 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
index f2e4e90621ec25c237703bc5b1ecba814d1afce9..7f2ae99e5daf39406fa5b166f6a1a75c464d1fcd 100644 (file)
@@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES];
 
 #ifdef CONFIG_CMA
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
 #else
 #  define is_migrate_cma(migratetype) false
+#  define is_migrate_cma_page(_page) false
 #endif
 
 #define for_each_migratetype_order(order, type) \
@@ -826,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  */
 #define zone_idx(zone)         ((zone) - (zone)->zone_pgdat->node_zones)
 
-static inline int populated_zone(struct zone *zone)
+/*
+ * Returns true if a zone has pages managed by the buddy allocator.
+ * All the reclaim decisions have to use this function rather than
+ * populated_zone(). If the whole zone is reserved then we can easily
+ * end up with populated_zone() && !managed_zone().
+ */
+static inline bool managed_zone(struct zone *zone)
+{
+       return zone->managed_pages;
+}
+
+/* Returns true if a zone has memory */
+static inline bool populated_zone(struct zone *zone)
 {
-       return (!!zone->present_pages);
+       return zone->present_pages;
 }
 
 extern int movable_zone;
index 4f0bfe5912b2f1eb9be7a5c25f4e1ec64c2fafbe..e8c81fbd5f9cd11d4ac01adb929c7d1eee86e69e 100644 (file)
@@ -270,6 +270,8 @@ enum {
        MSI_FLAG_MULTI_PCI_MSI          = (1 << 2),
        /* Support PCI MSIX interrupts */
        MSI_FLAG_PCI_MSIX               = (1 << 3),
+       /* Needs early activate, required for PCI */
+       MSI_FLAG_ACTIVATE_EARLY         = (1 << 4),
 };
 
 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
index 076df5360ba50544b0d835b1290038cf85be30e4..e8d79d4ebcfe4468c62340379ac7b2583e7fb724 100644 (file)
@@ -3267,6 +3267,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
        napi->skb = NULL;
 }
 
+bool netdev_is_rx_handler_busy(struct net_device *dev);
 int netdev_rx_handler_register(struct net_device *dev,
                               rx_handler_func_t *rx_handler,
                               void *rx_handler_data);
@@ -3891,8 +3892,7 @@ void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 void netdev_rss_key_fill(void *buffer, size_t len);
 
-int dev_get_nest_level(struct net_device *dev,
-                      bool (*type_check)(const struct net_device *dev));
+int dev_get_nest_level(struct net_device *dev);
 int skb_checksum_help(struct sk_buff *skb);
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path);
index 80ca889b164e3eab5b42c7249d264f55650e44bf..664da00486257519acadedde60b5de1571a8d540 100644 (file)
@@ -15,6 +15,6 @@ struct nf_acct;
 struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
 void nfnl_acct_put(struct nf_acct *acct);
 void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
-extern int nfnl_acct_overquota(const struct sk_buff *skb,
-                             struct nf_acct *nfacct);
+int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
+                       struct nf_acct *nfacct);
 #endif /* _NFNL_ACCT_H */
index d8b37bab2887e75ed0c7f3cc58a57f2594109179..7676557ce357d682c3c47f0599e66bdd8a42225f 100644 (file)
@@ -794,7 +794,7 @@ struct nvmf_connect_command {
 };
 
 struct nvmf_connect_data {
-       uuid_le         hostid;
+       uuid_be         hostid;
        __le16          cntlid;
        char            resv4[238];
        char            subsysnqn[NVMF_NQN_FIELD_LEN];
index 66a1260b33de9a48fe0dcb2c1d4460c44950c457..01e84436cddfec1da8b871b6343d72a2420ba070 100644 (file)
@@ -571,56 +571,57 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  */
 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
 {
-       int ret = 0;
        char __user *end = uaddr + size - 1;
 
        if (unlikely(size == 0))
-               return ret;
+               return 0;
 
+       if (unlikely(uaddr > end))
+               return -EFAULT;
        /*
         * Writing zeroes into userspace here is OK, because we know that if
         * the zero gets there, we'll be overwriting it.
         */
-       while (uaddr <= end) {
-               ret = __put_user(0, uaddr);
-               if (ret != 0)
-                       return ret;
+       do {
+               if (unlikely(__put_user(0, uaddr) != 0))
+                       return -EFAULT;
                uaddr += PAGE_SIZE;
-       }
+       } while (uaddr <= end);
 
        /* Check whether the range spilled into the next page. */
        if (((unsigned long)uaddr & PAGE_MASK) ==
                        ((unsigned long)end & PAGE_MASK))
-               ret = __put_user(0, end);
+               return __put_user(0, end);
 
-       return ret;
+       return 0;
 }
 
 static inline int fault_in_multipages_readable(const char __user *uaddr,
                                               int size)
 {
        volatile char c;
-       int ret = 0;
        const char __user *end = uaddr + size - 1;
 
        if (unlikely(size == 0))
-               return ret;
+               return 0;
 
-       while (uaddr <= end) {
-               ret = __get_user(c, uaddr);
-               if (ret != 0)
-                       return ret;
+       if (unlikely(uaddr > end))
+               return -EFAULT;
+
+       do {
+               if (unlikely(__get_user(c, uaddr) != 0))
+                       return -EFAULT;
                uaddr += PAGE_SIZE;
-       }
+       } while (uaddr <= end);
 
        /* Check whether the range spilled into the next page. */
        if (((unsigned long)uaddr & PAGE_MASK) ==
                        ((unsigned long)end & PAGE_MASK)) {
-               ret = __get_user(c, end);
-               (void)c;
+               return __get_user(c, end);
        }
 
-       return ret;
+       (void)c;
+       return 0;
 }
 
 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
index 2599a980340f44f2550bacded406cb34f38808dc..0ab8359656696d6e5d35a89fd1b22c7dc08145b4 100644 (file)
@@ -682,15 +682,6 @@ struct pci_driver {
 
 #define        to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
 
-/**
- * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
- * @_table: device table name
- *
- * This macro is deprecated and should not be used in new code.
- */
-#define DEFINE_PCI_DEVICE_TABLE(_table) \
-       const struct pci_device_id _table[]
-
 /**
  * PCI_DEVICE - macro used to describe a specific pci device
  * @vend: the 16 bit PCI Vendor ID
@@ -1251,10 +1242,12 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
                      unsigned int command_bits, u32 flags);
 
-#define PCI_IRQ_NOLEGACY       (1 << 0) /* don't use legacy interrupts */
-#define PCI_IRQ_NOMSI          (1 << 1) /* don't use MSI interrupts */
-#define PCI_IRQ_NOMSIX         (1 << 2) /* don't use MSI-X interrupts */
-#define PCI_IRQ_NOAFFINITY     (1 << 3) /* don't auto-assign affinity */
+#define PCI_IRQ_LEGACY         (1 << 0) /* allow legacy interrupts */
+#define PCI_IRQ_MSI            (1 << 1) /* allow MSI interrupts */
+#define PCI_IRQ_MSIX           (1 << 2) /* allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY       (1 << 3) /* auto-assign affinity */
+#define PCI_IRQ_ALL_TYPES \
+       (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
 
 /* kmem_cache style wrapper around pci_alloc_consistent() */
 
index 8ed4326164cc843b41da6fbfe69d85cee2d61232..2b6b43cc0dd5121d8d4f6024f8ec67f862ff3328 100644 (file)
@@ -743,7 +743,9 @@ struct perf_event_context {
        u64                             parent_gen;
        u64                             generation;
        int                             pin_count;
+#ifdef CONFIG_CGROUP_PERF
        int                             nr_cgroups;      /* cgroup evts */
+#endif
        void                            *task_ctx_data; /* pmu specific data */
        struct rcu_head                 rcu_head;
 };
@@ -769,7 +771,9 @@ struct perf_cpu_context {
        unsigned int                    hrtimer_active;
 
        struct pmu                      *unique_pmu;
+#ifdef CONFIG_CGROUP_PERF
        struct perf_cgroup              *cgrp;
+#endif
 };
 
 struct perf_output_handle {
index 8dc155dab3ed4950ef7f2ce6bfb737b3617b0e68..696a56be7d3e2ba0f54d92ac059941470b228ccc 100644 (file)
@@ -266,39 +266,21 @@ extern asmlinkage void dump_stack(void) __cold;
  * and other debug macros are compiled out unless either DEBUG is defined
  * or CONFIG_DYNAMIC_DEBUG is set.
  */
-
-#ifdef CONFIG_PRINTK
-
-asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...);
-
-#define pr_emerg(fmt, ...)     __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...)     __pr_alert(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...)      __pr_crit(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...)       __pr_err(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...)      __pr_warn(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...)    __pr_notice(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...)      __pr_info(pr_fmt(fmt), ##__VA_ARGS__)
-
-#else
-
-#define pr_emerg(fmt, ...)     printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...)     printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...)      printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...)       printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...)      printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...)    printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...)      printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-#endif
-
-#define pr_warning pr_warn
-
+#define pr_emerg(fmt, ...) \
+       printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+       printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+       printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+       printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+       printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn pr_warning
+#define pr_notice(fmt, ...) \
+       printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+       printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 /*
  * Like KERN_CONT, pr_cont() should only be used when continuing
  * a line with no newline ('\n') enclosed. Otherwise it defaults
index b1e3c57c7117c936954d55a1c635709bcb9c9bf9..d6c4177df7cb690537384d8626fe41690579146a 100644 (file)
@@ -70,8 +70,16 @@ struct qed_dbcx_pfc_params {
        u8 max_tc;
 };
 
+enum qed_dcbx_sf_ieee_type {
+       QED_DCBX_SF_IEEE_ETHTYPE,
+       QED_DCBX_SF_IEEE_TCP_PORT,
+       QED_DCBX_SF_IEEE_UDP_PORT,
+       QED_DCBX_SF_IEEE_TCP_UDP_PORT
+};
+
 struct qed_app_entry {
        bool ethtype;
+       enum qed_dcbx_sf_ieee_type sf_ieee;
        bool enabled;
        u8 prio;
        u16 proto_id;
index de1f64318fc4ec02aa5f2b449bbbd152bc1a0144..fcb4c364617329f10257c5cfe6d86d75d42c7132 100644 (file)
@@ -705,70 +705,6 @@ typedef struct sctp_auth_chunk {
        sctp_authhdr_t auth_hdr;
 } __packed sctp_auth_chunk_t;
 
-struct sctp_info {
-       __u32   sctpi_tag;
-       __u32   sctpi_state;
-       __u32   sctpi_rwnd;
-       __u16   sctpi_unackdata;
-       __u16   sctpi_penddata;
-       __u16   sctpi_instrms;
-       __u16   sctpi_outstrms;
-       __u32   sctpi_fragmentation_point;
-       __u32   sctpi_inqueue;
-       __u32   sctpi_outqueue;
-       __u32   sctpi_overall_error;
-       __u32   sctpi_max_burst;
-       __u32   sctpi_maxseg;
-       __u32   sctpi_peer_rwnd;
-       __u32   sctpi_peer_tag;
-       __u8    sctpi_peer_capable;
-       __u8    sctpi_peer_sack;
-       __u16   __reserved1;
-
-       /* assoc status info */
-       __u64   sctpi_isacks;
-       __u64   sctpi_osacks;
-       __u64   sctpi_opackets;
-       __u64   sctpi_ipackets;
-       __u64   sctpi_rtxchunks;
-       __u64   sctpi_outofseqtsns;
-       __u64   sctpi_idupchunks;
-       __u64   sctpi_gapcnt;
-       __u64   sctpi_ouodchunks;
-       __u64   sctpi_iuodchunks;
-       __u64   sctpi_oodchunks;
-       __u64   sctpi_iodchunks;
-       __u64   sctpi_octrlchunks;
-       __u64   sctpi_ictrlchunks;
-
-       /* primary transport info */
-       struct sockaddr_storage sctpi_p_address;
-       __s32   sctpi_p_state;
-       __u32   sctpi_p_cwnd;
-       __u32   sctpi_p_srtt;
-       __u32   sctpi_p_rto;
-       __u32   sctpi_p_hbinterval;
-       __u32   sctpi_p_pathmaxrxt;
-       __u32   sctpi_p_sackdelay;
-       __u32   sctpi_p_sackfreq;
-       __u32   sctpi_p_ssthresh;
-       __u32   sctpi_p_partial_bytes_acked;
-       __u32   sctpi_p_flight_size;
-       __u16   sctpi_p_error;
-       __u16   __reserved2;
-
-       /* sctp sock info */
-       __u32   sctpi_s_autoclose;
-       __u32   sctpi_s_adaptation_ind;
-       __u32   sctpi_s_pd_point;
-       __u8    sctpi_s_nodelay;
-       __u8    sctpi_s_disable_fragments;
-       __u8    sctpi_s_v4mapped;
-       __u8    sctpi_s_frag_interleave;
-       __u32   sctpi_s_type;
-       __u32   __reserved3;
-};
-
 struct sctp_infox {
        struct sctp_info *sctpinfo;
        struct sctp_association *asoc;
index 923266cd294a33c8d98cbe274df0991f6647dafa..48ec7651989b093fc015e44b59657c7000e11012 100644 (file)
@@ -111,7 +111,6 @@ struct uart_8250_port {
                                                 *   if no_console_suspend
                                                 */
        unsigned char           probe;
-       struct mctrl_gpios      *gpios;
 #define UART_PROBE_RSA (1 << 0)
 
        /*
index 6f0b3e0adc73674f56449f49e61a7264463efd5f..0f665cb26b505729fad04be94ead98ff99e79359 100644 (file)
@@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
               __skb_linearize(skb) : 0;
 }
 
+static __always_inline void
+__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
+                    unsigned int off)
+{
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_block_sub(skb->csum,
+                                          csum_partial(start, len, 0), off);
+       else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+                skb_checksum_start_offset(skb) < 0)
+               skb->ip_summed = CHECKSUM_NONE;
+}
+
 /**
  *     skb_postpull_rcsum - update checksum for received skb after pull
  *     @skb: buffer to update
@@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
  *     update the CHECKSUM_COMPLETE checksum, or set ip_summed to
  *     CHECKSUM_NONE so that it can be recomputed from scratch.
  */
-
 static inline void skb_postpull_rcsum(struct sk_buff *skb,
                                      const void *start, unsigned int len)
 {
-       if (skb->ip_summed == CHECKSUM_COMPLETE)
-               skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
-       else if (skb->ip_summed == CHECKSUM_PARTIAL &&
-                skb_checksum_start_offset(skb) < 0)
-               skb->ip_summed = CHECKSUM_NONE;
+       __skb_postpull_rcsum(skb, start, len, 0);
 }
 
-unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+static __always_inline void
+__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
+                    unsigned int off)
+{
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_block_add(skb->csum,
+                                          csum_partial(start, len, 0), off);
+}
 
+/**
+ *     skb_postpush_rcsum - update checksum for received skb after push
+ *     @skb: buffer to update
+ *     @start: start of data after push
+ *     @len: length of data pushed
+ *
+ *     After doing a push on a received packet, you need to call this to
+ *     update the CHECKSUM_COMPLETE checksum.
+ */
 static inline void skb_postpush_rcsum(struct sk_buff *skb,
                                      const void *start, unsigned int len)
 {
-       /* For performing the reverse operation to skb_postpull_rcsum(),
-        * we can instead of ...
-        *
-        *   skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
-        *
-        * ... just use this equivalent version here to save a few
-        * instructions. Feeding csum of 0 in csum_partial() and later
-        * on adding skb->csum is equivalent to feed skb->csum in the
-        * first place.
-        */
-       if (skb->ip_summed == CHECKSUM_COMPLETE)
-               skb->csum = csum_partial(start, len, skb->csum);
+       __skb_postpush_rcsum(skb, start, len, 0);
 }
 
+unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+
 /**
  *     skb_push_rcsum - push skb and update receive checksum
  *     @skb: buffer to update
index 1a4ea551aae51c08fd70e4dd4693e97ca56f01de..4293808d8cfb5d4a473f220735e53da87390998b 100644 (file)
@@ -155,6 +155,18 @@ void kfree(const void *);
 void kzfree(const void *);
 size_t ksize(const void *);
 
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+                                             unsigned long n,
+                                             struct page *page)
+{
+       return NULL;
+}
+#endif
+
 /*
  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  * alignment larger than the alignment of a 64-bit integer.
index 76199b75d5845d21cbcfb67786dfec757e30a446..e302c447e057a98b0ba47b5bc73acd816b9dc7f7 100644 (file)
@@ -1,6 +1,16 @@
 #ifndef __SMC91X_H__
 #define __SMC91X_H__
 
+/*
+ * These bits define which access sizes a platform can support, rather
+ * than the maximal access size.  So, if your platform can do 16-bit
+ * and 32-bit accesses to the SMC91x device, but not 8-bit, set both
+ * SMC91X_USE_16BIT and SMC91X_USE_32BIT.
+ *
+ * The SMC91x driver requires at least one of SMC91X_USE_8BIT or
+ * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
+ * an invalid configuration.
+ */
 #define SMC91X_USE_8BIT (1 << 0)
 #define SMC91X_USE_16BIT (1 << 1)
 #define SMC91X_USE_32BIT (1 << 2)
index b6810c92b8bb14d9ef8c9d1036a0b25d40530ac3..5c02b0691587797e303758eb74a4cad7d5e577ff 100644 (file)
@@ -195,6 +195,8 @@ int         rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
                                struct rpc_xprt *,
                                void *),
                        void *data);
+void           rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
+                       unsigned long timeo);
 
 const char *rpc_proc_name(const struct rpc_task *task);
 #endif /* __KERNEL__ */
index 5e3e1b63dbb3c97f0145bd15a23dba7d4e916528..a16070dd03eefe9281476183ff4b5a0692523a09 100644 (file)
@@ -218,7 +218,8 @@ struct rpc_xprt {
        struct work_struct      task_cleanup;
        struct timer_list       timer;
        unsigned long           last_used,
-                               idle_timeout;
+                               idle_timeout,
+                               max_reconnect_timeout;
 
        /*
         * Send stuff
index c6ffe8b0725c63a9931ea60c9ec65383100ed257..aa17ccfc2f573c01f2f0505fc92d7c207f655a8d 100644 (file)
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/fence.h>
-
-struct sync_file_cb {
-       struct fence_cb cb;
-       struct fence *fence;
-       struct sync_file *sync_file;
-};
+#include <linux/fence-array.h>
 
 /**
  * struct sync_file - sync file to export to the userspace
@@ -32,10 +27,9 @@ struct sync_file_cb {
  * @kref:              reference count on fence.
  * @name:              name of sync_file.  Useful for debugging
  * @sync_file_list:    membership in global file list
- * @num_fences:                number of sync_pts in the fence
  * @wq:                        wait queue for fence signaling
- * @status:            0: signaled, >0:active, <0: error
- * @cbs:               sync_pts callback information
+ * @fence:             fence with the fences in the sync_file
+ * @cb:                        fence callback information
  */
 struct sync_file {
        struct file             *file;
@@ -44,14 +38,16 @@ struct sync_file {
 #ifdef CONFIG_DEBUG_FS
        struct list_head        sync_file_list;
 #endif
-       int num_fences;
 
        wait_queue_head_t       wq;
-       atomic_t                status;
 
-       struct sync_file_cb     cbs[];
+       struct fence            *fence;
+       struct fence_cb cb;
 };
 
+#define POLL_ENABLED FENCE_FLAG_USER_BITS
+
 struct sync_file *sync_file_create(struct fence *fence);
+struct fence *sync_file_get_fence(int fd);
 
 #endif /* _LINUX_SYNC_H */
index 697e160c78d0d7afd65dcf9ea07afaccd431669c..a4f7203a9017040ca166cdb06aacf1df30113d38 100644 (file)
@@ -42,6 +42,8 @@ extern int proc_dostring(struct ctl_table *, int,
                         void __user *, size_t *, loff_t *);
 extern int proc_dointvec(struct ctl_table *, int,
                         void __user *, size_t *, loff_t *);
+extern int proc_douintvec(struct ctl_table *, int,
+                        void __user *, size_t *, loff_t *);
 extern int proc_dointvec_minmax(struct ctl_table *, int,
                                void __user *, size_t *, loff_t *);
 extern int proc_dointvec_jiffies(struct ctl_table *, int,
index 352b1542f5cc21953c037f499d9b48f03ee98314..2b5b10eed74ff52fc5b11fe911be7d84b08eda77 100644 (file)
@@ -105,6 +105,31 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
 
 #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
 
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+                                          const void * const stackend,
+                                          const void *obj, unsigned long len)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+                                       bool to_user);
+
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+                                             bool to_user)
+{
+       if (!__builtin_constant_p(n))
+               __check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+                                    bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
index 349557825428e9b3d815e0bc2781d5d79b172d47..f30c187ed785366231e318a7beab151e0bba64b6 100644 (file)
@@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
 #ifndef user_access_begin
 #define user_access_begin() do { } while (0)
 #define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr) __get_user(x, ptr)
-#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
 #endif
 
 #endif         /* __LINUX_UACCESS_H__ */
index 1b5d1cd796e2b753bdd1ff586427a35c033a3ae9..75b4aaf31a9da419e9415f621cc930b02e871047 100644 (file)
@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
+#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
                         struct iov_iter *i);
index 8c3b412d84df38e37465e841996eee50ecfc50b7..ee162e3e879b97889776015c2b9678304f8a61fe 100644 (file)
@@ -73,34 +73,6 @@ static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
                                           unsigned int decodes) { };
 #endif
 
-/**
- *     vga_get         - acquire & locks VGA resources
- *
- *     @pdev: pci device of the VGA card or NULL for the system default
- *     @rsrc: bit mask of resources to acquire and lock
- *     @interruptible: blocking should be interruptible by signals ?
- *
- *     This function acquires VGA resources for the given
- *     card and mark those resources locked. If the resource requested
- *     are "normal" (and not legacy) resources, the arbiter will first check
- *     whether the card is doing legacy decoding for that type of resource. If
- *     yes, the lock is "converted" into a legacy resource lock.
- *     The arbiter will first look for all VGA cards that might conflict
- *     and disable their IOs and/or Memory access, including VGA forwarding
- *     on P2P bridges if necessary, so that the requested resources can
- *     be used. Then, the card is marked as locking these resources and
- *     the IO and/or Memory accesse are enabled on the card (including
- *     VGA forwarding on parent P2P bridges if any).
- *     This function will block if some conflicting card is already locking
- *     one of the required resources (or any resource on a different bus
- *     segment, since P2P bridges don't differenciate VGA memory and IO
- *     afaik). You can indicate whether this blocking should be interruptible
- *     by a signal (for userland interface) or not.
- *     Must not be called at interrupt time or in atomic context.
- *     If the card already owns the resources, the function succeeds.
- *     Nested calls are supported (a per-resource counter is maintained)
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
 #else
@@ -108,11 +80,14 @@ static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interrupt
 #endif
 
 /**
- *     vga_get_interruptible
+ * vga_get_interruptible
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
  *
- *     Shortcut to vga_get
+ * Shortcut to vga_get with interruptible set to true.
+ *
+ * On success, release the VGA resource again with vga_put().
  */
-
 static inline int vga_get_interruptible(struct pci_dev *pdev,
                                        unsigned int rsrc)
 {
@@ -120,47 +95,26 @@ static inline int vga_get_interruptible(struct pci_dev *pdev,
 }
 
 /**
- *     vga_get_uninterruptible
+ * vga_get_uninterruptible - shortcut to vga_get()
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
  *
- *     Shortcut to vga_get
+ * Shortcut to vga_get with interruptible set to false.
+ *
+ * On success, release the VGA resource again with vga_put().
  */
-
 static inline int vga_get_uninterruptible(struct pci_dev *pdev,
                                          unsigned int rsrc)
 {
        return vga_get(pdev, rsrc, 0);
 }
 
-/**
- *     vga_tryget      - try to acquire & lock legacy VGA resources
- *
- *     @pdev: pci devivce of VGA card or NULL for system default
- *     @rsrc: bit mask of resources to acquire and lock
- *
- *     This function performs the same operation as vga_get(), but
- *     will return an error (-EBUSY) instead of blocking if the resources
- *     are already locked by another card. It can be called in any context
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
 #else
 static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
 #endif
 
-/**
- *     vga_put         - release lock on legacy VGA resources
- *
- *     @pdev: pci device of VGA card or NULL for system default
- *     @rsrc: but mask of resource to release
- *
- *     This function releases resources previously locked by vga_get()
- *     or vga_tryget(). The resources aren't disabled right away, so
- *     that a subsequence vga_get() on the same card will succeed
- *     immediately. Resources have a counter, so locks are only
- *     released if the counter reaches 0.
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
 #else
@@ -168,25 +122,6 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
 #endif
 
 
-/**
- *     vga_default_device
- *
- *     This can be defined by the platform. The default implementation
- *     is rather dumb and will probably only work properly on single
- *     vga card setups and/or x86 platforms.
- *
- *     If your VGA default device is not PCI, you'll have to return
- *     NULL here. In this case, I assume it will not conflict with
- *     any PCI card. If this is not true, I'll have to define two archs
- *     hooks for enabling/disabling the VGA default device if that is
- *     possible. This may be a problem with real _ISA_ VGA cards, in
- *     addition to a PCI one. I don't know at this point how to deal
- *     with that card. Can theirs IOs be disabled at all ? If not, then
- *     I suppose it's a matter of having the proper arch hook telling
- *     us about it, so we basically never allow anybody to succeed a
- *     vga_get()...
- */
-
 #ifdef CONFIG_VGA_ARB
 extern struct pci_dev *vga_default_device(void);
 extern void vga_set_default_device(struct pci_dev *pdev);
@@ -195,14 +130,11 @@ static inline struct pci_dev *vga_default_device(void) { return NULL; };
 static inline void vga_set_default_device(struct pci_dev *pdev) { };
 #endif
 
-/**
- *     vga_conflicts
- *
- *     Architectures should define this if they have several
- *     independent PCI domains that can afford concurrent VGA
- *     decoding
+/*
+ * Architectures should define this if they have several
+ * independent PCI domains that can afford concurrent VGA
+ * decoding
  */
-
 #ifndef __ARCH_HAS_VGA_CONFLICT
 static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
 {
@@ -210,34 +142,6 @@ static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
 }
 #endif
 
-/**
- *     vga_client_register
- *
- *     @pdev: pci device of the VGA client
- *     @cookie: client cookie to be used in callbacks
- *     @irq_set_state: irq state change callback
- *     @set_vga_decode: vga decode change callback
- *
- *     return value: 0 on success, -1 on failure
- *     Register a client with the VGA arbitration logic
- *
- *     Clients have two callback mechanisms they can use.
- *     irq enable/disable callback -
- *             If a client can't disable its GPUs VGA resources, then we
- *             need to be able to ask it to turn off its irqs when we
- *             turn off its mem and io decoding.
- *     set_vga_decode
- *             If a client can disable its GPU VGA resource, it will
- *             get a callback from this to set the encode/decode state
- *
- * Rationale: we cannot disable VGA decode resources unconditionally
- * some single GPU laptops seem to require ACPI or BIOS access to the
- * VGA registers to control things like backlights etc.
- * Hopefully newer multi-GPU laptops do something saner, and desktops
- * won't have any special ACPI for this.
- * They driver will get a callback when VGA arbitration is first used
- * by userspace since we some older X servers have issues.
- */
 #if defined(CONFIG_VGA_ARB)
 int vga_client_register(struct pci_dev *pdev, void *cookie,
                        void (*irq_set_state)(void *cookie, bool state),
index dc7854b855f378adf5a6187fae1c2eab389a0726..fdb5d600e4bb98a5c09c42aef4bed3715641f6fc 100644 (file)
@@ -57,8 +57,8 @@ struct cec_devnode {
        int minor;
        bool registered;
        bool unregistered;
-       struct mutex fhs_lock;
        struct list_head fhs;
+       struct mutex lock;
 };
 
 struct cec_adapter;
index 41e6a24a44b9b11413b33d8fe717992dd2a2d6db..82f3c912a5b176d5f2d57361765c9bccbdded367 100644 (file)
@@ -176,8 +176,8 @@ int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
 int tcf_unregister_action(struct tc_action_ops *a,
                          struct pernet_operations *ops);
 int tcf_action_destroy(struct list_head *actions, int bind);
-int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
-                   struct tcf_result *res);
+int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
+                   int nr_actions, struct tcf_result *res);
 int tcf_action_init(struct net *net, struct nlattr *nla,
                                  struct nlattr *est, char *n, int ovr,
                                  int bind, struct list_head *);
@@ -189,30 +189,17 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
 
-#define tc_no_actions(_exts) \
-       (list_empty(&(_exts)->actions))
-
-#define tc_for_each_action(_a, _exts) \
-       list_for_each_entry(a, &(_exts)->actions, list)
-
-#define tc_single_action(_exts) \
-       (list_is_singular(&(_exts)->actions))
+#endif /* CONFIG_NET_CLS_ACT */
 
 static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
                                           u64 packets, u64 lastuse)
 {
+#ifdef CONFIG_NET_CLS_ACT
        if (!a->ops->stats_update)
                return;
 
        a->ops->stats_update(a, bytes, packets, lastuse);
+#endif
 }
 
-#else /* CONFIG_NET_CLS_ACT */
-
-#define tc_no_actions(_exts) true
-#define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
-#define tc_single_action(_exts) false
-#define tcf_action_stats_update(a, bytes, packets, lastuse)
-
-#endif /* CONFIG_NET_CLS_ACT */
 #endif
index ac1bc3c49fbdf9832fdb4f895657c0336bb61926..7b0f88699b25eafceeca6a52da908ee03aa70d95 100644 (file)
@@ -40,12 +40,12 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
                                           unsigned long,
                                           gfp_t);
 int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
+void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
 void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
 void rxrpc_kernel_end_call(struct rxrpc_call *);
 bool rxrpc_kernel_is_data_last(struct sk_buff *);
 u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
 int rxrpc_kernel_get_error_number(struct sk_buff *);
-void rxrpc_kernel_data_delivered(struct sk_buff *);
 void rxrpc_kernel_free_skb(struct sk_buff *);
 struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
 int rxrpc_kernel_reject_call(struct socket *);
index 9b4c418bebd84ae0a7debfbcc697ee92b136ec99..fd60eccb59a67969eba53416a376a3c912d62d81 100644 (file)
@@ -52,7 +52,7 @@ struct unix_sock {
        struct sock             sk;
        struct unix_address     *addr;
        struct path             path;
-       struct mutex            readlock;
+       struct mutex            iolock, bindlock;
        struct sock             *peer;
        struct list_head        link;
        atomic_long_t           inflight;
index 9c23f4d33e06c1ad1dab246b822d5dcc76c854ac..beb7610d64e936b02d959d97cdf5a85fa5b29132 100644 (file)
@@ -1102,6 +1102,7 @@ struct station_info {
        struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
 };
 
+#if IS_ENABLED(CONFIG_CFG80211)
 /**
  * cfg80211_get_station - retrieve information about a given station
  * @dev: the device where the station is supposed to be connected to
@@ -1114,6 +1115,14 @@ struct station_info {
  */
 int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
                         struct station_info *sinfo);
+#else
+static inline int cfg80211_get_station(struct net_device *dev,
+                                      const u8 *mac_addr,
+                                      struct station_info *sinfo)
+{
+       return -ENOENT;
+}
+#endif
 
 /**
  * enum monitor_flags - monitor flags
index 7a54a31d1d4cf7988ff7bd3bf3015b4a8f23e359..73ea256eb7d79f15429caf12760444e6e8753f10 100644 (file)
@@ -104,6 +104,7 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
 
        skb_push(skb, hdr_len);
 
+       skb_set_inner_protocol(skb, proto);
        skb_reset_transport_header(skb);
        greh = (struct gre_base_hdr *)skb->data;
        greh->flags = gre_tnl_flags_to_gre_flags(flags);
index 0dc0a51da38faacab2ea275681f5f70e09a6c79e..dce2d586d9cecb9e9de381aa0926f3e3d3ec9568 100644 (file)
@@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
        to = from | htonl(INET_ECN_CE << 20);
        *(__be32 *)iph = to;
        if (skb->ip_summed == CHECKSUM_COMPLETE)
-               skb->csum = csum_add(csum_sub(skb->csum, from), to);
+               skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
+                                    (__force __wsum)to);
        return 1;
 }
 
index 4079fc18ffe4643522178b394ed6d1b54d5a6093..7d4a72e75f334d300f9d55cd53bdd1d1e82c08f9 100644 (file)
@@ -111,6 +111,7 @@ struct fib_info {
        unsigned char           fib_scope;
        unsigned char           fib_type;
        __be32                  fib_prefsrc;
+       u32                     fib_tb_id;
        u32                     fib_priority;
        u32                     *fib_metrics;
 #define fib_mtu fib_metrics[RTAX_MTU-1]
@@ -319,7 +320,7 @@ void fib_flush_external(struct net *net);
 /* Exported by fib_semantics.c */
 int ip_fib_check_default(__be32 gw, struct net_device *dev);
 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
-int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_down_addr(struct net_device *dev, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
 
 extern u32 fib_multipath_secret __read_mostly;
index b4faadbb4e01f9ca14b19ebce355823f5716d129..cca510a585c3d55895da6a1f593474c15d666bca 100644 (file)
@@ -3620,7 +3620,8 @@ struct ieee80211_ops {
 
        int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
        void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-       u32 (*get_expected_throughput)(struct ieee80211_sta *sta);
+       u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
+                                      struct ieee80211_sta *sta);
        int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                           int *dbm);
 
index 6793614e6502a0eb51ca7efabc593e7bb6866576..e6937318546ceee3b32a63bd791932c1ea1e12de 100644 (file)
@@ -27,6 +27,20 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
 #endif
 }
 
+static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
+                                     const struct nf_conn *tmpl)
+{
+       if (tmpl && nfct_synproxy(tmpl)) {
+               if (!nfct_seqadj_ext_add(ct))
+                       return false;
+
+               if (!nfct_synproxy_ext_add(ct))
+                       return false;
+       }
+
+       return true;
+}
+
 struct synproxy_stats {
        unsigned int                    syn_received;
        unsigned int                    cookie_invalid;
index d27588c8dbd9f2a01f65cec2c8587f33a7eba1ce..1139cde0fdc590c08de507a66eee1b6de82eea1b 100644 (file)
@@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr,
 void nft_meta_set_destroy(const struct nft_ctx *ctx,
                          const struct nft_expr *expr);
 
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nft_data **data);
+
 #endif
index 60fa1530006b45bc229dbf46bfe93ff690bc7b42..02e28c529b29bcb77d24d950af6873d66995ff31 100644 (file)
@@ -8,6 +8,10 @@ struct nft_reject {
 
 extern const struct nla_policy nft_reject_policy[];
 
+int nft_reject_validate(const struct nft_ctx *ctx,
+                       const struct nft_expr *expr,
+                       const struct nft_data **data);
+
 int nft_reject_init(const struct nft_ctx *ctx,
                    const struct nft_expr *expr,
                    const struct nlattr * const tb[]);
index 6f8d65342d3adb86ea636bec00c750ad72015eb2..c99508d426ccfe8220c992209a35f84589d09906 100644 (file)
@@ -59,7 +59,8 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
 struct tcf_exts {
 #ifdef CONFIG_NET_CLS_ACT
        __u32   type; /* for backward compat(TCA_OLD_COMPAT) */
-       struct list_head actions;
+       int nr_actions;
+       struct tc_action **actions;
 #endif
        /* Map to export classifier specific extension TLV types to the
         * generic extensions API. Unsupported extensions must be set to 0.
@@ -72,7 +73,10 @@ static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
 {
 #ifdef CONFIG_NET_CLS_ACT
        exts->type = 0;
-       INIT_LIST_HEAD(&exts->actions);
+       exts->nr_actions = 0;
+       exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
+                               GFP_KERNEL);
+       WARN_ON(!exts->actions); /* TODO: propagate the error to callers */
 #endif
        exts->action = action;
        exts->police = police;
@@ -89,7 +93,7 @@ static inline int
 tcf_exts_is_predicative(struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       return !list_empty(&exts->actions);
+       return exts->nr_actions;
 #else
        return 0;
 #endif
@@ -108,6 +112,20 @@ tcf_exts_is_available(struct tcf_exts *exts)
        return tcf_exts_is_predicative(exts);
 }
 
+static inline void tcf_exts_to_list(const struct tcf_exts *exts,
+                                   struct list_head *actions)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       int i;
+
+       for (i = 0; i < exts->nr_actions; i++) {
+               struct tc_action *a = exts->actions[i];
+
+               list_add(&a->list, actions);
+       }
+#endif
+}
+
 /**
  * tcf_exts_exec - execute tc filter extensions
  * @skb: socket buffer
@@ -124,12 +142,25 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
               struct tcf_result *res)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       if (!list_empty(&exts->actions))
-               return tcf_action_exec(skb, &exts->actions, res);
+       if (exts->nr_actions)
+               return tcf_action_exec(skb, exts->actions, exts->nr_actions,
+                                      res);
 #endif
        return 0;
 }
 
+#ifdef CONFIG_NET_CLS_ACT
+
+#define tc_no_actions(_exts)  ((_exts)->nr_actions == 0)
+#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
+
+#else /* CONFIG_NET_CLS_ACT */
+
+#define tc_no_actions(_exts) true
+#define tc_single_action(_exts) false
+
+#endif /* CONFIG_NET_CLS_ACT */
+
 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
                      struct nlattr **tb, struct nlattr *rate_tlv,
                      struct tcf_exts *exts, bool ovr);
index efc01743b9d641bf6b16a37780ee0df34b4ec698..bafe2a0ab9085f24e17038516c55c00cfddd02f4 100644 (file)
@@ -382,7 +382,7 @@ enum {
        ADDIP_SERIAL_SIGN_BIT = (1<<31)
 };
 
-static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t)
+static inline int ADDIP_SERIAL_gte(__u32 s, __u32 t)
 {
        return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
 }
index ff5be7e8ddeae6f9d2f9eac889d7abbfbd396bbd..8741988e688015da6a7beb58e03b69791ea81df9 100644 (file)
@@ -1332,6 +1332,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
        if (!sk_has_account(sk))
                return;
        sk->sk_forward_alloc += size;
+
+       /* Avoid a possible overflow.
+        * TCP send queues can make this happen, if sk_mem_reclaim()
+        * is not called and more than 2 GBytes are released at once.
+        *
+        * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
+        * no need to hold that much forward allocation anyway.
+        */
+       if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+               __sk_mem_reclaim(sk, 1 << 20);
 }
 
 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
index c00e7d51bb18d77f47ef9fb016ef432f5db4f7be..7717302cab91e1bb18cc42c3f2d4b51f98f3c148 100644 (file)
@@ -1523,6 +1523,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
 {
        if (sk->sk_send_head == skb_unlinked)
                sk->sk_send_head = NULL;
+       if (tcp_sk(sk)->highest_sack == skb_unlinked)
+               tcp_sk(sk)->highest_sack = NULL;
 }
 
 static inline void tcp_init_send_head(struct sock *sk)
index adfebd6f243c14d18b23988aa27c23c2a88f985e..17934312eecbc7ec114036f54efcfbbd322aec0b 100644 (file)
@@ -1540,8 +1540,10 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
 int xfrm6_extract_header(struct sk_buff *skb);
 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+                 struct ip6_tnl *t);
 int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
 int xfrm6_rcv(struct sk_buff *skb);
 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
                     xfrm_address_t *saddr, u8 proto);
index 8e90dd28bb7536d16058d711b096f8125bd42874..e1f96737c2a17fc51cd1222da7fd8b6b4c0bdeb4 100644 (file)
@@ -2115,22 +2115,17 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
                                       size_t len)
 {
        const void __user *p = udata->inbuf + offset;
-       bool ret = false;
+       bool ret;
        u8 *buf;
 
        if (len > USHRT_MAX)
                return false;
 
-       buf = kmalloc(len, GFP_KERNEL);
-       if (!buf)
+       buf = memdup_user(p, len);
+       if (IS_ERR(buf))
                return false;
 
-       if (copy_from_user(buf, p, len))
-               goto free;
-
        ret = !memchr_inv(buf, 0, len);
-
-free:
        kfree(buf);
        return ret;
 }
index 13c0b2ba1b6c0942ef2c0dd07d77a78719438d1e..73d870918939e082295a90bd9a092814dd97131b 100644 (file)
@@ -11,12 +11,12 @@ struct sas_rphy;
 struct request;
 
 #if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS)
-static inline int is_sas_attached(struct scsi_device *sdev)
+static inline int scsi_is_sas_rphy(const struct device *sdev)
 {
        return 0;
 }
 #else
-extern int is_sas_attached(struct scsi_device *sdev);
+extern int scsi_is_sas_rphy(const struct device *);
 #endif
 
 static inline int sas_protocol_ata(enum sas_protocol proto)
@@ -202,7 +202,6 @@ extern int sas_rphy_add(struct sas_rphy *);
 extern void sas_rphy_remove(struct sas_rphy *);
 extern void sas_rphy_delete(struct sas_rphy *);
 extern void sas_rphy_unlink(struct sas_rphy *);
-extern int scsi_is_sas_rphy(const struct device *);
 
 struct sas_port *sas_port_alloc(struct device *, int);
 struct sas_port *sas_port_alloc_num(struct device *);
index 51440131d3372feb1f09b7139362db66119f0e2a..28c5da6fdfac762e64f3d7ae26edcc9adbefe9ab 100644 (file)
@@ -330,24 +330,32 @@ TRACE_EVENT(itimer_expire,
 #ifdef CONFIG_NO_HZ_COMMON
 
 #define TICK_DEP_NAMES                                 \
-               tick_dep_name(NONE)                     \
+               tick_dep_mask_name(NONE)                \
                tick_dep_name(POSIX_TIMER)              \
                tick_dep_name(PERF_EVENTS)              \
                tick_dep_name(SCHED)                    \
                tick_dep_name_end(CLOCK_UNSTABLE)
 
 #undef tick_dep_name
+#undef tick_dep_mask_name
 #undef tick_dep_name_end
 
-#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
-#define tick_dep_name_end(sdep)  TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* The MASK will convert to their bits and they need to be processed too */
+#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+       TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+#define tick_dep_name_end(sdep)  TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+       TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* NONE only has a mask defined for it */
+#define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
 
 TICK_DEP_NAMES
 
 #undef tick_dep_name
+#undef tick_dep_mask_name
 #undef tick_dep_name_end
 
 #define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
+#define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
 #define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
 
 #define show_tick_dep_name(val)                                \
index 462246aa200e1db1890de7d6640c81e859da250d..d6b5a21f3d3c9f9c228ae0291b3ae9b19f486bcc 100644 (file)
@@ -77,6 +77,10 @@ extern "C" {
 #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS                (1 << 1)
 /* Flag that USWC attributes should be used for GTT */
 #define AMDGPU_GEM_CREATE_CPU_GTT_USWC         (1 << 2)
+/* Flag that the memory should be in VRAM and cleared */
+#define AMDGPU_GEM_CREATE_VRAM_CLEARED         (1 << 3)
+/* Flag that create shadow bo(GTT) while allocating vram bo */
+#define AMDGPU_GEM_CREATE_SHADOW               (1 << 4)
 
 struct drm_amdgpu_gem_create_in  {
        /** the requested memory size */
@@ -481,6 +485,8 @@ struct drm_amdgpu_cs_chunk_data {
 #define AMDGPU_INFO_DEV_INFO                   0x16
 /* visible vram usage */
 #define AMDGPU_INFO_VIS_VRAM_USAGE             0x17
+/* number of TTM buffer evictions */
+#define AMDGPU_INFO_NUM_EVICTIONS              0x18
 
 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
 #define AMDGPU_INFO_MMR_SE_INDEX_MASK  0xff
@@ -643,6 +649,7 @@ struct drm_amdgpu_info_hw_ip {
  * Supported GPU families
  */
 #define AMDGPU_FAMILY_UNKNOWN                  0
+#define AMDGPU_FAMILY_SI                       110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
 #define AMDGPU_FAMILY_CI                       120 /* Bonaire, Hawaii */
 #define AMDGPU_FAMILY_KV                       125 /* Kaveri, Kabini, Mullins */
 #define AMDGPU_FAMILY_VI                       130 /* Iceland, Tonga */
index 452675fb55d9c429506048ee7ff240fdaf7f7db0..b2c52843bc7025b97d6e35961379a29ea29e5d33 100644 (file)
@@ -646,6 +646,7 @@ struct drm_gem_open {
 #define DRM_CAP_CURSOR_WIDTH           0x8
 #define DRM_CAP_CURSOR_HEIGHT          0x9
 #define DRM_CAP_ADDFB2_MODIFIERS       0x10
+#define DRM_CAP_PAGE_FLIP_TARGET       0x11
 
 /** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
index 49a72659b801dd65b39b24e63b666575afb43363..df0e3504c349a950bf41540fbcd6cd944cf11d2f 100644 (file)
@@ -520,7 +520,13 @@ struct drm_color_lut {
 
 #define DRM_MODE_PAGE_FLIP_EVENT 0x01
 #define DRM_MODE_PAGE_FLIP_ASYNC 0x02
-#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
+#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
+#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
+#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
+                                  DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
+                                 DRM_MODE_PAGE_FLIP_ASYNC | \
+                                 DRM_MODE_PAGE_FLIP_TARGET)
 
 /*
  * Request a page flip on the specified crtc.
@@ -543,8 +549,7 @@ struct drm_color_lut {
  * 'as soon as possible', meaning that it not delay waiting for vblank.
  * This may cause tearing on the screen.
  *
- * The reserved field must be zero until we figure out something
- * clever to use it for.
+ * The reserved field must be zero.
  */
 
 struct drm_mode_crtc_page_flip {
@@ -555,6 +560,34 @@ struct drm_mode_crtc_page_flip {
        __u64 user_data;
 };
 
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * Same as struct drm_mode_crtc_page_flip, but supports new flags and
+ * re-purposes the reserved field:
+ *
+ * The sequence field must be zero unless either of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When
+ * the ABSOLUTE flag is specified, the sequence field denotes the absolute
+ * vblank sequence when the flip should take effect. When the RELATIVE
+ * flag is specified, the sequence field denotes the relative (to the
+ * current one when the ioctl is called) vblank sequence when the flip
+ * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to
+ * make sure the vblank sequence before the target one has passed before
+ * calling this ioctl. The purpose of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify
+ * the target for when code dealing with a page flip runs during a
+ * vertical blank period.
+ */
+
+struct drm_mode_crtc_page_flip_target {
+       __u32 crtc_id;
+       __u32 fb_id;
+       __u32 flags;
+       __u32 sequence;
+       __u64 user_data;
+};
+
 /* create a dumb scanout buffer */
 struct drm_mode_create_dumb {
        __u32 height;
index d7e81a3886fdc41e63ec00cd7b1f0c50c7f24ebb..03725fe89859746391f63e23af622d10b5d5d0b0 100644 (file)
@@ -62,6 +62,30 @@ extern "C" {
 #define I915_ERROR_UEVENT              "ERROR"
 #define I915_RESET_UEVENT              "RESET"
 
+/*
+ * MOCS indexes used for GPU surfaces, defining the cacheability of the
+ * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
+ */
+enum i915_mocs_table_index {
+       /*
+        * Not cached anywhere, coherency between CPU and GPU accesses is
+        * guaranteed.
+        */
+       I915_MOCS_UNCACHED,
+       /*
+        * Cacheability and coherency controlled by the kernel automatically
+        * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
+        * usage of the surface (used for display scanout or not).
+        */
+       I915_MOCS_PTE,
+       /*
+        * Cached in all GPU caches available on the platform.
+        * Coherency between CPU and GPU accesses to the surface is not
+        * guaranteed without extra synchronization.
+        */
+       I915_MOCS_CACHED,
+};
+
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
 #define I915_NR_TEX_REGIONS 255        /* table size 2k - maximum due to use
@@ -363,6 +387,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_EXEC_SOFTPIN     37
 #define I915_PARAM_HAS_POOLED_EU        38
 #define I915_PARAM_MIN_EU_IN_POOL       39
+#define I915_PARAM_MMAP_GTT_VERSION     40
 
 typedef struct drm_i915_getparam {
        __s32 param;
@@ -698,15 +723,20 @@ struct drm_i915_gem_exec_object2 {
         */
        __u64 offset;
 
-#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
-#define EXEC_OBJECT_NEEDS_GTT  (1<<1)
-#define EXEC_OBJECT_WRITE      (1<<2)
+#define EXEC_OBJECT_NEEDS_FENCE                 (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT           (1<<1)
+#define EXEC_OBJECT_WRITE               (1<<2)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
-#define EXEC_OBJECT_PINNED     (1<<4)
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
+#define EXEC_OBJECT_PINNED              (1<<4)
+#define EXEC_OBJECT_PAD_TO_SIZE                 (1<<5)
+/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1)
        __u64 flags;
 
-       __u64 rsvd1;
+       union {
+               __u64 rsvd1;
+               __u64 pad_to_size;
+       };
        __u64 rsvd2;
 };
 
@@ -826,7 +856,16 @@ struct drm_i915_gem_busy {
         * having flushed any pending activity), and a non-zero return that
         * the object is still in-flight on the GPU. (The GPU has not yet
         * signaled completion for all pending requests that reference the
-        * object.)
+        * object.) An object is guaranteed to become idle eventually (so
+        * long as no new GPU commands are executed upon it). Due to the
+        * asynchronous nature of the hardware, an object reported
+        * as busy may become idle before the ioctl is completed.
+        *
+        * Furthermore, if the object is busy, which engine is busy is only
+        * provided as a guide. There are race conditions which prevent the
+        * report of which engines are busy from being always accurate.
+        * However, the converse is not true. If the object is idle, the
+        * result of the ioctl, that all engines are idle, is accurate.
         *
         * The returned dword is split into two fields to indicate both
         * the engines on which the object is being read, and the
@@ -849,6 +888,11 @@ struct drm_i915_gem_busy {
         * execution engines, e.g. multiple media engines, which are
         * mapped to the same identifier in the EXECBUFFER2 ioctl and
         * so are not separately reported for busyness.
+        *
+        * Caveat emptor:
+        * Only the boolean result of this query is reliable; that is whether
+        * the object is idle or busy. The report of which engines are busy
+        * should be only used as a heuristic.
         */
        __u32 busy;
 };
@@ -897,6 +941,7 @@ struct drm_i915_gem_caching {
 #define I915_TILING_NONE       0
 #define I915_TILING_X          1
 #define I915_TILING_Y          2
+#define I915_TILING_LAST       I915_TILING_Y
 
 #define I915_BIT_6_SWIZZLE_NONE                0
 #define I915_BIT_6_SWIZZLE_9           1
index 49f778de8e06868c1a12b2e81c47fa73c51c12ec..8c51e8a0df891e4583c6e18718e64265d33ce1dc 100644 (file)
@@ -42,6 +42,15 @@ extern "C" {
 #define MSM_PIPE_2D1         0x02
 #define MSM_PIPE_3D0         0x10
 
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK     0xffff
+#define MSM_PIPE_ID(x)       ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x)    ((x) & ~MSM_PIPE_ID_MASK)
+
 /* timeouts are specified in clock-monotonic absolute times (to simplify
  * restarting interrupted ioctls).  The following struct is logically the
  * same as 'struct timespec' but 32/64b ABI safe.
@@ -175,17 +184,28 @@ struct drm_msm_gem_submit_bo {
        __u64 presumed;       /* in/out, presumed buffer address */
 };
 
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_NO_IMPLICIT   0x80000000 /* disable implicit sync */
+#define MSM_SUBMIT_FENCE_FD_IN   0x40000000 /* enable input fence_fd */
+#define MSM_SUBMIT_FENCE_FD_OUT  0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_FLAGS                ( \
+               MSM_SUBMIT_NO_IMPLICIT   | \
+               MSM_SUBMIT_FENCE_FD_IN   | \
+               MSM_SUBMIT_FENCE_FD_OUT  | \
+               0)
+
 /* Each cmdstream submit consists of a table of buffers involved, and
  * one or more cmdstream buffers.  This allows for conditional execution
  * (context-restore), and IB buffers needed for per tile/bin draw cmds.
  */
 struct drm_msm_gem_submit {
-       __u32 pipe;           /* in, MSM_PIPE_x */
+       __u32 flags;          /* MSM_PIPE_x | MSM_SUBMIT_x */
        __u32 fence;          /* out */
        __u32 nr_bos;         /* in, number of submit_bo's */
        __u32 nr_cmds;        /* in, number of submit_cmd's */
        __u64 __user bos;     /* in, ptr to array of submit_bo's */
        __u64 __user cmds;    /* in, ptr to array of submit_cmd's */
+       __s32 fence_fd;       /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
 };
 
 /* The normal way to synchronize with the GPU is just to CPU_PREP on
index 9c9c6ad55f1487b9b4cead653fc996930603b259..5cd4d4d2dd1d226ba6e5b024a697e71dfde9ac7d 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/atmapi.h>
 #include <linux/atmioc.h>
+#include <linux/time.h>
 
 #define ZATM_GETPOOL   _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
                                                /* get pool statistics */
index da218fec605657ee415f8ad71a95d8851330a9de..9e5fc168c8a3d8cb3d8ef424eed6d67a74f2e8ac 100644 (file)
@@ -339,7 +339,7 @@ enum bpf_func_id {
        BPF_FUNC_skb_change_type,
 
        /**
-        * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
+        * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb
         * @skb: pointer to skb
         * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
         * @index: index of the cgroup in the bpf_map
@@ -348,7 +348,7 @@ enum bpf_func_id {
         *   == 1 skb succeeded the cgroup2 descendant test
         *    < 0 error
         */
-       BPF_FUNC_skb_in_cgroup,
+       BPF_FUNC_skb_under_cgroup,
 
        /**
         * bpf_get_hash_recalc(skb)
index 163e8adac2d664515a36341778209be057ab012d..4bd1f55d63773287e66644b48cea7fceae8f3a53 100644 (file)
@@ -16,7 +16,8 @@
 #define _UAPI__LINUX_IF_PPPOL2TP_H
 
 #include <linux/types.h>
-
+#include <linux/in.h>
+#include <linux/in6.h>
 
 /* Structure used to connect() the socket to a particular tunnel UDP
  * socket over IPv4.
index e128769331b5a7c6be4a10cf44c262be68a65772..d37bbb17a007c108f093ac517e4f0d567f3920de 100644 (file)
 #include <asm/byteorder.h>
 
 #include <linux/socket.h>
+#include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/if_pppol2tp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
 
 /* For user-space programs to pick up these definitions
  * which they wouldn't get otherwise without defining __KERNEL__
index 1046f5515174907f0779dac4add7cf0f14a51a68..777b6cdb1b7be001812f51b6fe9f9522af85d0ca 100644 (file)
@@ -2,6 +2,9 @@
 #define _UAPI_IF_TUNNEL_H_
 
 #include <linux/types.h>
+#include <linux/if.h>
+#include <linux/ip.h>
+#include <linux/in6.h>
 #include <asm/byteorder.h>
 
 
index 3d48014cdd71d0d120693594e50a4b1cbc3155cc..30f031db12f60c9c5f94a2194b09663f1b20e552 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef _IPX_H_
 #define _IPX_H_
+#include <linux/libc-compat.h> /* for compatibility with glibc netipx/ipx.h */
 #include <linux/types.h>
 #include <linux/sockios.h>
 #include <linux/socket.h>
 #define IPX_NODE_LEN   6
 #define IPX_MTU                576
 
+#if __UAPI_DEF_SOCKADDR_IPX
 struct sockaddr_ipx {
        __kernel_sa_family_t sipx_family;
        __be16          sipx_port;
@@ -14,6 +16,7 @@ struct sockaddr_ipx {
        __u8            sipx_type;
        unsigned char   sipx_zero;      /* 16 byte fill */
 };
+#endif /* __UAPI_DEF_SOCKADDR_IPX */
 
 /*
  * So we can fit the extra info for SIOCSIFADDR into the address nicely
@@ -23,12 +26,15 @@ struct sockaddr_ipx {
 #define IPX_DLTITF     0
 #define IPX_CRTITF     1
 
+#if __UAPI_DEF_IPX_ROUTE_DEFINITION
 struct ipx_route_definition {
        __be32        ipx_network;
        __be32        ipx_router_network;
        unsigned char ipx_router_node[IPX_NODE_LEN];
 };
+#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */
 
+#if __UAPI_DEF_IPX_INTERFACE_DEFINITION
 struct ipx_interface_definition {
        __be32        ipx_network;
        unsigned char ipx_device[16];
@@ -45,16 +51,20 @@ struct ipx_interface_definition {
 #define IPX_INTERNAL           2
        unsigned char ipx_node[IPX_NODE_LEN];
 };
-       
+#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */
+
+#if __UAPI_DEF_IPX_CONFIG_DATA
 struct ipx_config_data {
        unsigned char   ipxcfg_auto_select_primary;
        unsigned char   ipxcfg_auto_create_interfaces;
 };
+#endif /* __UAPI_DEF_IPX_CONFIG_DATA */
 
 /*
  * OLD Route Definition for backward compatibility.
  */
 
+#if __UAPI_DEF_IPX_ROUTE_DEF
 struct ipx_route_def {
        __be32          ipx_network;
        __be32          ipx_router_network;
@@ -67,6 +77,7 @@ struct ipx_route_def {
 #define IPX_RT_BLUEBOOK                2
 #define IPX_RT_ROUTED          1
 };
+#endif /* __UAPI_DEF_IPX_ROUTE_DEF */
 
 #define SIOCAIPXITFCRT         (SIOCPROTOPRIVATE)
 #define SIOCAIPXPRISLT         (SIOCPROTOPRIVATE + 1)
index e4f048ee7043d196a0948c9dfc520b78082c6e54..44b8a6bd5fe1128e8c18061e14b9a63486d26e0d 100644 (file)
 
 #endif /* _NETINET_IN_H */
 
+/* Coordinate with glibc netipx/ipx.h header. */
+#if defined(__NETIPX_IPX_H)
+
+#define __UAPI_DEF_SOCKADDR_IPX                        0
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION                0
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION    0
+#define __UAPI_DEF_IPX_CONFIG_DATA             0
+#define __UAPI_DEF_IPX_ROUTE_DEF               0
+
+#else /* defined(__NETIPX_IPX_H) */
+
+#define __UAPI_DEF_SOCKADDR_IPX                        1
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION                1
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION    1
+#define __UAPI_DEF_IPX_CONFIG_DATA             1
+#define __UAPI_DEF_IPX_ROUTE_DEF               1
+
+#endif /* defined(__NETIPX_IPX_H) */
+
 /* Definitions for xattr.h */
 #if defined(_SYS_XATTR_H)
 #define __UAPI_DEF_XATTR               0
 #define __UAPI_DEF_IN6_PKTINFO         1
 #define __UAPI_DEF_IP6_MTUINFO         1
 
+/* Definitions for ipx.h */
+#define __UAPI_DEF_SOCKADDR_IPX                        1
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION                1
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION    1
+#define __UAPI_DEF_IPX_CONFIG_DATA             1
+#define __UAPI_DEF_IPX_ROUTE_DEF               1
+
 /* Definitions for xattr.h */
 #define __UAPI_DEF_XATTR               1
 
index 01751faccaf87661adb96e63cba35a2df653bceb..c674ba2563b7df6e692161402fe84594f83a3c77 100644 (file)
@@ -24,7 +24,7 @@ enum nft_registers {
        __NFT_REG_MAX,
 
        NFT_REG32_00    = 8,
-       MFT_REG32_01,
+       NFT_REG32_01,
        NFT_REG32_02,
        NFT_REG32_03,
        NFT_REG32_04,
index d95a3018f6a1e7018f2b8774c4513387a270b3fd..54c3b4f4aceb4f7cdf5f23319f5c707751e3e7a7 100644 (file)
@@ -583,7 +583,7 @@ enum ovs_userspace_attr {
 #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
 
 struct ovs_action_trunc {
-       uint32_t max_len; /* Max packet size in bytes. */
+       __u32 max_len; /* Max packet size in bytes. */
 };
 
 /**
@@ -632,8 +632,8 @@ enum ovs_hash_alg {
  * @hash_basis: basis used for computing hash.
  */
 struct ovs_action_hash {
-       uint32_t  hash_alg;     /* One of ovs_hash_alg. */
-       uint32_t  hash_basis;
+       __u32  hash_alg;     /* One of ovs_hash_alg. */
+       __u32  hash_basis;
 };
 
 /**
index d304f4c9792c4b83d641701cbb589d09bcca499b..a406adcc0793e0f3a09706cb693867b49d0ef914 100644 (file)
@@ -944,4 +944,68 @@ struct sctp_default_prinfo {
        __u16 pr_policy;
 };
 
+struct sctp_info {
+       __u32   sctpi_tag;
+       __u32   sctpi_state;
+       __u32   sctpi_rwnd;
+       __u16   sctpi_unackdata;
+       __u16   sctpi_penddata;
+       __u16   sctpi_instrms;
+       __u16   sctpi_outstrms;
+       __u32   sctpi_fragmentation_point;
+       __u32   sctpi_inqueue;
+       __u32   sctpi_outqueue;
+       __u32   sctpi_overall_error;
+       __u32   sctpi_max_burst;
+       __u32   sctpi_maxseg;
+       __u32   sctpi_peer_rwnd;
+       __u32   sctpi_peer_tag;
+       __u8    sctpi_peer_capable;
+       __u8    sctpi_peer_sack;
+       __u16   __reserved1;
+
+       /* assoc status info */
+       __u64   sctpi_isacks;
+       __u64   sctpi_osacks;
+       __u64   sctpi_opackets;
+       __u64   sctpi_ipackets;
+       __u64   sctpi_rtxchunks;
+       __u64   sctpi_outofseqtsns;
+       __u64   sctpi_idupchunks;
+       __u64   sctpi_gapcnt;
+       __u64   sctpi_ouodchunks;
+       __u64   sctpi_iuodchunks;
+       __u64   sctpi_oodchunks;
+       __u64   sctpi_iodchunks;
+       __u64   sctpi_octrlchunks;
+       __u64   sctpi_ictrlchunks;
+
+       /* primary transport info */
+       struct sockaddr_storage sctpi_p_address;
+       __s32   sctpi_p_state;
+       __u32   sctpi_p_cwnd;
+       __u32   sctpi_p_srtt;
+       __u32   sctpi_p_rto;
+       __u32   sctpi_p_hbinterval;
+       __u32   sctpi_p_pathmaxrxt;
+       __u32   sctpi_p_sackdelay;
+       __u32   sctpi_p_sackfreq;
+       __u32   sctpi_p_ssthresh;
+       __u32   sctpi_p_partial_bytes_acked;
+       __u32   sctpi_p_flight_size;
+       __u16   sctpi_p_error;
+       __u16   __reserved2;
+
+       /* sctp sock info */
+       __u32   sctpi_s_autoclose;
+       __u32   sctpi_s_adaptation_ind;
+       __u32   sctpi_s_pd_point;
+       __u8    sctpi_s_nodelay;
+       __u8    sctpi_s_disable_fragments;
+       __u8    sctpi_s_v4mapped;
+       __u8    sctpi_s_frag_interleave;
+       __u32   sctpi_s_type;
+       __u32   __reserved3;
+};
+
 #endif /* _UAPI_SCTP_H */
index 413303d37b56b554384cc59585db8fcf9654b639..5b287d6970b3a06677b8f55cfbd0bdc162c61181 100644 (file)
@@ -85,15 +85,12 @@ struct sync_file_info {
 #define SYNC_IOC_MERGE         _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
 
 /**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ * DOC: SYNC_IOC_FILE_INFO - get detailed information on a sync_file
  *
- * Takes a struct sync_file_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len.  On return, len is
- * updated to reflect the total size of the sync_file_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ * Takes a struct sync_file_info. If num_fences is 0, the field is updated
+ * with the actual number of fences. If num_fences is > 0, the system will
+ * use the pointer provided on sync_fence_info to return up to num_fences of
+ * struct sync_fence_info, with detailed fence information.
  */
 #define SYNC_IOC_FILE_INFO     _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
 
index 6b011c19b50f969d66d3ec0b26d7036d6bac7525..1d57ed3d84d2c3d10d02dce56d728b9cf0ece17b 100644 (file)
@@ -32,7 +32,7 @@
  */
 
 #ifndef _UAPI_LINUX_VIRTIO_VSOCK_H
-#define _UAPI_LINUX_VIRTIO_VOSCK_H
+#define _UAPI_LINUX_VIRTIO_VSOCK_H
 
 #include <linux/types.h>
 #include <linux/virtio_ids.h>
index cbae529b7ce0999684c42d7906df3de3febdc209..180d526a55c3ab5b54befbb264279e61d7e5a519 100644 (file)
@@ -136,8 +136,8 @@ struct cxl_event_afu_driver_reserved {
         *
         * Of course the contents will be ABI, but that's up the AFU driver.
         */
-       size_t data_size;
-       u8 data[];
+       __u32 data_size;
+       __u8 data[];
 };
 
 struct cxl_event {
diff --git a/include/video/imx-ipu-image-convert.h b/include/video/imx-ipu-image-convert.h
new file mode 100644 (file)
index 0000000..7b87efc
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ *
+ * i.MX Queued image conversion support, with tiling and rotation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#ifndef __IMX_IPU_IMAGE_CONVERT_H__
+#define __IMX_IPU_IMAGE_CONVERT_H__
+
+#include <video/imx-ipu-v3.h>
+
+struct ipu_image_convert_ctx;
+
+/**
+ * struct ipu_image_convert_run - image conversion run request struct
+ *
+ * @ctx:       the conversion context
+ * @in_phys:   dma addr of input image buffer for this run
+ * @out_phys:  dma addr of output image buffer for this run
+ * @status:    completion status of this run
+ */
+struct ipu_image_convert_run {
+       struct ipu_image_convert_ctx *ctx;
+
+       dma_addr_t in_phys;
+       dma_addr_t out_phys;
+
+       int status;
+
+       /* internal to image converter, callers don't touch */
+       struct list_head list;
+};
+
+/**
+ * ipu_image_convert_cb_t - conversion callback function prototype
+ *
+ * @run:       the completed conversion run pointer
+ * @ctx:       a private context pointer for the callback
+ */
+typedef void (*ipu_image_convert_cb_t)(struct ipu_image_convert_run *run,
+                                      void *ctx);
+
+/**
+ * ipu_image_convert_enum_format() - enumerate the image converter's
+ *     supported input and output pixel formats.
+ *
+ * @index:     pixel format index
+ * @fourcc:    v4l2 fourcc for this index
+ *
+ * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise.
+ *
+ * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt.
+ */
+int ipu_image_convert_enum_format(int index, u32 *fourcc);
+
+/**
+ * ipu_image_convert_adjust() - adjust input/output images to IPU restrictions.
+ *
+ * @in:                input image format, adjusted on return
+ * @out:       output image format, adjusted on return
+ * @rot_mode:  rotation mode
+ *
+ * In V4L2, drivers can call ipu_image_convert_adjust() in .try_fmt.
+ */
+void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+                             enum ipu_rotate_mode rot_mode);
+
+/**
+ * ipu_image_convert_verify() - verify that input/output image formats
+ *         and rotation mode meet IPU restrictions.
+ *
+ * @in:                input image format
+ * @out:       output image format
+ * @rot_mode:  rotation mode
+ *
+ * Returns 0 if the formats and rotation mode meet IPU restrictions,
+ * -EINVAL otherwise.
+ */
+int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+                            enum ipu_rotate_mode rot_mode);
+
+/**
+ * ipu_image_convert_prepare() - prepare a conversion context.
+ *
+ * @ipu:       the IPU handle to use for the conversions
+ * @ic_task:   the IC task to use for the conversions
+ * @in:                input image format
+ * @out:       output image format
+ * @rot_mode:  rotation mode
+ * @complete:  run completion callback
+ * @complete_context:  a context pointer for the completion callback
+ *
+ * Returns an opaque conversion context pointer on success, error pointer
+ * on failure. The input/output formats and rotation mode must already meet
+ * IPU retrictions.
+ *
+ * In V4L2, drivers should call ipu_image_convert_prepare() at streamon.
+ */
+struct ipu_image_convert_ctx *
+ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                         struct ipu_image *in, struct ipu_image *out,
+                         enum ipu_rotate_mode rot_mode,
+                         ipu_image_convert_cb_t complete,
+                         void *complete_context);
+
+/**
+ * ipu_image_convert_unprepare() - unprepare a conversion context.
+ *
+ * @ctx: the conversion context pointer to unprepare
+ *
+ * Aborts any active or pending conversions for this context and
+ * frees the context. Any currently active or pending runs belonging
+ * to this context are returned via the completion callback with an
+ * error run status.
+ *
+ * In V4L2, drivers should call ipu_image_convert_unprepare() at
+ * streamoff.
+ */
+void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx);
+
+/**
+ * ipu_image_convert_queue() - queue a conversion run
+ *
+ * @run: the run request pointer
+ *
+ * ipu_image_convert_run must be dynamically allocated (_not_ as a local
+ * var) by callers and filled in with a previously prepared conversion
+ * context handle and the dma addr's of the input and output image buffers
+ * for this conversion run.
+ *
+ * When this conversion completes, the run pointer is returned via the
+ * completion callback. The caller is responsible for freeing the run
+ * object after it completes.
+ *
+ * In V4L2, drivers should call ipu_image_convert_queue() while
+ * streaming to queue the conversion of a received input buffer.
+ * For example mem2mem devices this would be called in .device_run.
+ */
+int ipu_image_convert_queue(struct ipu_image_convert_run *run);
+
+/**
+ * ipu_image_convert_abort() - abort conversions
+ *
+ * @ctx: the conversion context pointer
+ *
+ * This will abort any active or pending conversions for this context.
+ * Any currently active or pending runs belonging to this context are
+ * returned via the completion callback with an error run status.
+ */
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx);
+
+/**
+ * ipu_image_convert() - asynchronous image conversion request
+ *
+ * @ipu:       the IPU handle to use for the conversion
+ * @ic_task:   the IC task to use for the conversion
+ * @in:                input image format
+ * @out:       output image format
+ * @rot_mode:  rotation mode
+ * @complete:  run completion callback
+ * @complete_context:  a context pointer for the completion callback
+ *
+ * Request a single image conversion. Returns the run that has been queued.
+ * A conversion context is automatically created and is available in run->ctx.
+ * As with ipu_image_convert_prepare(), the input/output formats and rotation
+ * mode must already meet IPU retrictions.
+ *
+ * On successful return the caller can queue more run requests if needed, using
+ * the prepared context in run->ctx. The caller is responsible for unpreparing
+ * the context when no more conversion requests are needed.
+ */
+struct ipu_image_convert_run *
+ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                 struct ipu_image *in, struct ipu_image *out,
+                 enum ipu_rotate_mode rot_mode,
+                 ipu_image_convert_cb_t complete,
+                 void *complete_context);
+
+/**
+ * ipu_image_convert_sync() - synchronous single image conversion request
+ *
+ * @ipu:       the IPU handle to use for the conversion
+ * @ic_task:   the IC task to use for the conversion
+ * @in:                input image format
+ * @out:       output image format
+ * @rot_mode:  rotation mode
+ *
+ * Carry out a single image conversion. Returns when the conversion
+ * completes. The input/output formats and rotation mode must already
+ * meet IPU retrictions. The created context is automatically unprepared
+ * and the run freed on return.
+ */
+int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                          struct ipu_image *in, struct ipu_image *out,
+                          enum ipu_rotate_mode rot_mode);
+
+
+#endif /* __IMX_IPU_IMAGE_CONVERT_H__ */
index 7adeaae0696109c0088016bd4ad5c340d6455dd1..173073eb6aaf4350e10ac732cf9f482bb46f1639 100644 (file)
@@ -63,23 +63,41 @@ enum ipu_csi_dest {
 /*
  * Enumeration of IPU rotation modes
  */
+#define IPU_ROT_BIT_VFLIP (1 << 0)
+#define IPU_ROT_BIT_HFLIP (1 << 1)
+#define IPU_ROT_BIT_90    (1 << 2)
+
 enum ipu_rotate_mode {
        IPU_ROTATE_NONE = 0,
-       IPU_ROTATE_VERT_FLIP,
-       IPU_ROTATE_HORIZ_FLIP,
-       IPU_ROTATE_180,
-       IPU_ROTATE_90_RIGHT,
-       IPU_ROTATE_90_RIGHT_VFLIP,
-       IPU_ROTATE_90_RIGHT_HFLIP,
-       IPU_ROTATE_90_LEFT,
+       IPU_ROTATE_VERT_FLIP = IPU_ROT_BIT_VFLIP,
+       IPU_ROTATE_HORIZ_FLIP = IPU_ROT_BIT_HFLIP,
+       IPU_ROTATE_180 = (IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
+       IPU_ROTATE_90_RIGHT = IPU_ROT_BIT_90,
+       IPU_ROTATE_90_RIGHT_VFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_VFLIP),
+       IPU_ROTATE_90_RIGHT_HFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_HFLIP),
+       IPU_ROTATE_90_LEFT = (IPU_ROT_BIT_90 |
+                             IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
 };
 
+/* 90-degree rotations require the IRT unit */
+#define ipu_rot_mode_is_irt(m) (((m) & IPU_ROT_BIT_90) != 0)
+
 enum ipu_color_space {
        IPUV3_COLORSPACE_RGB,
        IPUV3_COLORSPACE_YUV,
        IPUV3_COLORSPACE_UNKNOWN,
 };
 
+/*
+ * Enumeration of VDI MOTION select
+ */
+enum ipu_motion_sel {
+       MOTION_NONE = 0,
+       LOW_MOTION,
+       MED_MOTION,
+       HIGH_MOTION,
+};
+
 struct ipuv3_channel;
 
 enum ipu_channel_irq {
@@ -97,20 +115,42 @@ enum ipu_channel_irq {
 #define IPUV3_CHANNEL_CSI2                      2
 #define IPUV3_CHANNEL_CSI3                      3
 #define IPUV3_CHANNEL_VDI_MEM_IC_VF             5
+/*
+ * NOTE: channels 6,7 are unused in the IPU and are not IDMAC channels,
+ * but the direct CSI->VDI linking is handled the same way as IDMAC
+ * channel linking in the FSU via the IPU_FS_PROC_FLOW registers, so
+ * these channel names are used to support the direct CSI->VDI link.
+ */
+#define IPUV3_CHANNEL_CSI_DIRECT                6
+#define IPUV3_CHANNEL_CSI_VDI_PREV              7
+#define IPUV3_CHANNEL_MEM_VDI_PREV              8
+#define IPUV3_CHANNEL_MEM_VDI_CUR               9
+#define IPUV3_CHANNEL_MEM_VDI_NEXT             10
 #define IPUV3_CHANNEL_MEM_IC_PP                        11
 #define IPUV3_CHANNEL_MEM_IC_PRP_VF            12
+#define IPUV3_CHANNEL_VDI_MEM_RECENT           13
 #define IPUV3_CHANNEL_G_MEM_IC_PRP_VF          14
 #define IPUV3_CHANNEL_G_MEM_IC_PP              15
+#define IPUV3_CHANNEL_G_MEM_IC_PRP_VF_ALPHA    17
+#define IPUV3_CHANNEL_G_MEM_IC_PP_ALPHA                18
+#define IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB_ALPHA        19
 #define IPUV3_CHANNEL_IC_PRP_ENC_MEM           20
 #define IPUV3_CHANNEL_IC_PRP_VF_MEM            21
 #define IPUV3_CHANNEL_IC_PP_MEM                        22
 #define IPUV3_CHANNEL_MEM_BG_SYNC              23
 #define IPUV3_CHANNEL_MEM_BG_ASYNC             24
+#define IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB      25
+#define IPUV3_CHANNEL_MEM_VDI_PLANE3_COMB      26
 #define IPUV3_CHANNEL_MEM_FG_SYNC              27
 #define IPUV3_CHANNEL_MEM_DC_SYNC              28
 #define IPUV3_CHANNEL_MEM_FG_ASYNC             29
 #define IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA                31
+#define IPUV3_CHANNEL_MEM_FG_ASYNC_ALPHA       33
+#define IPUV3_CHANNEL_DC_MEM_READ              40
 #define IPUV3_CHANNEL_MEM_DC_ASYNC             41
+#define IPUV3_CHANNEL_MEM_DC_COMMAND           42
+#define IPUV3_CHANNEL_MEM_DC_COMMAND2          43
+#define IPUV3_CHANNEL_MEM_DC_OUTPUT_MASK       44
 #define IPUV3_CHANNEL_MEM_ROT_ENC              45
 #define IPUV3_CHANNEL_MEM_ROT_VF               46
 #define IPUV3_CHANNEL_MEM_ROT_PP               47
@@ -118,6 +158,8 @@ enum ipu_channel_irq {
 #define IPUV3_CHANNEL_ROT_VF_MEM               49
 #define IPUV3_CHANNEL_ROT_PP_MEM               50
 #define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA                51
+#define IPUV3_CHANNEL_MEM_BG_ASYNC_ALPHA       52
+#define IPUV3_NUM_CHANNELS                     64
 
 int ipu_map_irq(struct ipu_soc *ipu, int irq);
 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
@@ -138,6 +180,7 @@ int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
 /*
  * IPU Common functions
  */
+int ipu_get_num(struct ipu_soc *ipu);
 void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2);
 void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi);
 void ipu_dump(struct ipu_soc *ipu);
@@ -160,6 +203,10 @@ int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel);
 bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num);
 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
 void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num);
+int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch);
+int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch);
+int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink);
+int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink);
 
 /*
  * IPU Channel Parameter Memory (cpmem) functions
@@ -184,8 +231,10 @@ void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres);
 void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
 void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
 void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
+void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off);
 void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride);
 void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id);
+int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch);
 void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
 void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch);
 void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
@@ -316,6 +365,19 @@ struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task);
 void ipu_ic_put(struct ipu_ic *ic);
 void ipu_ic_dump(struct ipu_ic *ic);
 
+/*
+ * IPU Video De-Interlacer (vdi) functions
+ */
+struct ipu_vdi;
+void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field);
+void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel);
+void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres);
+void ipu_vdi_unsetup(struct ipu_vdi *vdi);
+int ipu_vdi_enable(struct ipu_vdi *vdi);
+int ipu_vdi_disable(struct ipu_vdi *vdi);
+struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu);
+void ipu_vdi_put(struct ipu_vdi *vdi);
+
 /*
  * IPU Sensor Multiple FIFO Controller (SMFC) functions
  */
index 9a37c541822f6093aa86188484e5a6463e214e96..b5486e64860759aed11763ac983b3b4aa12988b3 100644 (file)
@@ -9,8 +9,8 @@
 
 DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
 
-DECLARE_PER_CPU(int, xen_vcpu_id);
-static inline int xen_vcpu_nr(int cpu)
+DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
+static inline uint32_t xen_vcpu_nr(int cpu)
 {
        return per_cpu(xen_vcpu_id, cpu);
 }
index 69886493ff1e3d31daa02d909447c744352b38ff..cac3f096050d5b27a9bd5de8600469f3c5aa0ada 100644 (file)
@@ -1761,6 +1761,7 @@ choice
 
 config SLAB
        bool "SLAB"
+       select HAVE_HARDENED_USERCOPY_ALLOCATOR
        help
          The regular slab allocator that is established and known to work
          well in all environments. It organizes cache hot objects in
@@ -1768,6 +1769,7 @@ config SLAB
 
 config SLUB
        bool "SLUB (Unqueued Allocator)"
+       select HAVE_HARDENED_USERCOPY_ALLOCATOR
        help
           SLUB is a slab allocator that minimizes cache line usage
           instead of managing queues of cached objects (SLAB approach).
index d6709eb70970eb764bd14af1d1874fe5144b9053..0d302a87f21b58ac711aaf5720cca12640bfc0ef 100644 (file)
@@ -19,6 +19,7 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/file.h>
 #include <linux/kernel.h>
 #include <linux/audit.h>
 #include <linux/kthread.h>
@@ -544,10 +545,11 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
        unsigned long ino;
        dev_t dev;
 
-       rcu_read_lock();
-       exe_file = rcu_dereference(tsk->mm->exe_file);
+       exe_file = get_task_exe_file(tsk);
+       if (!exe_file)
+               return 0;
        ino = exe_file->f_inode->i_ino;
        dev = exe_file->f_inode->i_sb->s_dev;
-       rcu_read_unlock();
+       fput(exe_file);
        return audit_mark_compare(mark, ino, dev);
 }
index fff3650d52fc774f7efdf2d289e879a916f44c60..570eeca7bdfa79ce16d18ee94fac64c40aa11d38 100644 (file)
@@ -26,11 +26,18 @@ struct bpf_htab {
        struct bucket *buckets;
        void *elems;
        struct pcpu_freelist freelist;
+       void __percpu *extra_elems;
        atomic_t count; /* number of elements in this hashtable */
        u32 n_buckets;  /* number of hash buckets */
        u32 elem_size;  /* size of each element in bytes */
 };
 
+enum extra_elem_state {
+       HTAB_NOT_AN_EXTRA_ELEM = 0,
+       HTAB_EXTRA_ELEM_FREE,
+       HTAB_EXTRA_ELEM_USED
+};
+
 /* each htab element is struct htab_elem + key + value */
 struct htab_elem {
        union {
@@ -38,7 +45,10 @@ struct htab_elem {
                struct bpf_htab *htab;
                struct pcpu_freelist_node fnode;
        };
-       struct rcu_head rcu;
+       union {
+               struct rcu_head rcu;
+               enum extra_elem_state state;
+       };
        u32 hash;
        char key[0] __aligned(8);
 };
@@ -113,6 +123,23 @@ free_elems:
        return err;
 }
 
+static int alloc_extra_elems(struct bpf_htab *htab)
+{
+       void __percpu *pptr;
+       int cpu;
+
+       pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
+       if (!pptr)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu) {
+               ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
+                       HTAB_EXTRA_ELEM_FREE;
+       }
+       htab->extra_elems = pptr;
+       return 0;
+}
+
 /* Called from syscall */
 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 {
@@ -185,6 +212,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
        if (percpu)
                cost += (u64) round_up(htab->map.value_size, 8) *
                        num_possible_cpus() * htab->map.max_entries;
+       else
+              cost += (u64) htab->elem_size * num_possible_cpus();
 
        if (cost >= U32_MAX - PAGE_SIZE)
                /* make sure page count doesn't overflow */
@@ -212,14 +241,22 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                raw_spin_lock_init(&htab->buckets[i].lock);
        }
 
+       if (!percpu) {
+               err = alloc_extra_elems(htab);
+               if (err)
+                       goto free_buckets;
+       }
+
        if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
                err = prealloc_elems_and_freelist(htab);
                if (err)
-                       goto free_buckets;
+                       goto free_extra_elems;
        }
 
        return &htab->map;
 
+free_extra_elems:
+       free_percpu(htab->extra_elems);
 free_buckets:
        kvfree(htab->buckets);
 free_htab:
@@ -349,7 +386,6 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
        if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
                free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
        kfree(l);
-
 }
 
 static void htab_elem_free_rcu(struct rcu_head *head)
@@ -370,6 +406,11 @@ static void htab_elem_free_rcu(struct rcu_head *head)
 
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
+       if (l->state == HTAB_EXTRA_ELEM_USED) {
+               l->state = HTAB_EXTRA_ELEM_FREE;
+               return;
+       }
+
        if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
                pcpu_freelist_push(&htab->freelist, &l->fnode);
        } else {
@@ -381,25 +422,44 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 
 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                         void *value, u32 key_size, u32 hash,
-                                        bool percpu, bool onallcpus)
+                                        bool percpu, bool onallcpus,
+                                        bool old_elem_exists)
 {
        u32 size = htab->map.value_size;
        bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
        struct htab_elem *l_new;
        void __percpu *pptr;
+       int err = 0;
 
        if (prealloc) {
                l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
                if (!l_new)
-                       return ERR_PTR(-E2BIG);
+                       err = -E2BIG;
        } else {
                if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
                        atomic_dec(&htab->count);
-                       return ERR_PTR(-E2BIG);
+                       err = -E2BIG;
+               } else {
+                       l_new = kmalloc(htab->elem_size,
+                                       GFP_ATOMIC | __GFP_NOWARN);
+                       if (!l_new)
+                               return ERR_PTR(-ENOMEM);
                }
-               l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
-               if (!l_new)
-                       return ERR_PTR(-ENOMEM);
+       }
+
+       if (err) {
+               if (!old_elem_exists)
+                       return ERR_PTR(err);
+
+               /* if we're updating the existing element and the hash table
+                * is full, use per-cpu extra elems
+                */
+               l_new = this_cpu_ptr(htab->extra_elems);
+               if (l_new->state != HTAB_EXTRA_ELEM_FREE)
+                       return ERR_PTR(-E2BIG);
+               l_new->state = HTAB_EXTRA_ELEM_USED;
+       } else {
+               l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
        }
 
        memcpy(l_new->key, key, key_size);
@@ -489,7 +549,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
        if (ret)
                goto err;
 
-       l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false);
+       l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
+                               !!l_old);
        if (IS_ERR(l_new)) {
                /* all pre-allocated elements are in use or memory exhausted */
                ret = PTR_ERR(l_new);
@@ -563,7 +624,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
                }
        } else {
                l_new = alloc_htab_elem(htab, key, value, key_size,
-                                       hash, true, onallcpus);
+                                       hash, true, onallcpus, false);
                if (IS_ERR(l_new)) {
                        ret = PTR_ERR(l_new);
                        goto err;
@@ -652,6 +713,7 @@ static void htab_map_free(struct bpf_map *map)
                htab_free_elems(htab);
                pcpu_freelist_destroy(&htab->freelist);
        }
+       free_percpu(htab->extra_elems);
        kvfree(htab->buckets);
        kfree(htab);
 }
index f72f23b8fdab42b8e2c3add264bb2680f93bf940..daea765d72e6f536c31b14bb569208b0e975b75c 100644 (file)
@@ -194,6 +194,7 @@ struct verifier_env {
        struct verifier_state_list **explored_states; /* search pruning optimization */
        struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
        u32 used_map_cnt;               /* number of used maps */
+       u32 id_gen;                     /* used to generate unique reg IDs */
        bool allow_ptr_leaks;
 };
 
@@ -1052,7 +1053,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
                        goto error;
                break;
        case BPF_MAP_TYPE_CGROUP_ARRAY:
-               if (func_id != BPF_FUNC_skb_in_cgroup)
+               if (func_id != BPF_FUNC_skb_under_cgroup)
                        goto error;
                break;
        default:
@@ -1074,7 +1075,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
                if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
                        goto error;
                break;
-       case BPF_FUNC_skb_in_cgroup:
+       case BPF_FUNC_skb_under_cgroup:
                if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
                        goto error;
                break;
@@ -1301,7 +1302,7 @@ add_imm:
                /* dst_reg stays as pkt_ptr type and since some positive
                 * integer value was added to the pointer, increment its 'id'
                 */
-               dst_reg->id++;
+               dst_reg->id = ++env->id_gen;
 
                /* something was added to pkt_ptr, set range and off to zero */
                dst_reg->off = 0;
index d1c51b7f5221fbfbb85ed3d12e4918a970713829..5e8dab5bf9adb93205781083a6cf5312da140fdc 100644 (file)
@@ -6270,6 +6270,12 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
        if (cgroup_sk_alloc_disabled)
                return;
 
+       /* Socket clone path */
+       if (skcd->val) {
+               cgroup_get(sock_cgroup_ptr(skcd));
+               return;
+       }
+
        rcu_read_lock();
 
        while (true) {
index c2de56ab0fce8b2193a2414afc2d4eb743510c74..7fa0c4ae6394f028fa09694b219314dd3d7d8731 100644 (file)
@@ -1,4 +1,12 @@
+# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
 CONFIG_KERNEL_XZ=y
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_KERNEL_LZ4 is not set
 CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_SLAB is not set
+# CONFIG_SLUB is not set
 CONFIG_SLOB=y
index c7fd2778ed50edc52b7c0e5f1f651dcb141c9495..c27e53326befe9f33ffc605a75a48cf8e9ae6624 100644 (file)
@@ -2069,6 +2069,20 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
        mutex_unlock(&cpuset_mutex);
 }
 
+/*
+ * Make sure the new task conform to the current state of its parent,
+ * which could have been changed by cpuset just after it inherits the
+ * state from the parent and before it sits on the cgroup's task list.
+ */
+void cpuset_fork(struct task_struct *task)
+{
+       if (task_css_is_root(task, cpuset_cgrp_id))
+               return;
+
+       set_cpus_allowed_ptr(task, &current->cpus_allowed);
+       task->mems_allowed = current->mems_allowed;
+}
+
 struct cgroup_subsys cpuset_cgrp_subsys = {
        .css_alloc      = cpuset_css_alloc,
        .css_online     = cpuset_css_online,
@@ -2079,6 +2093,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
        .attach         = cpuset_attach,
        .post_attach    = cpuset_post_attach,
        .bind           = cpuset_bind,
+       .fork           = cpuset_fork,
        .legacy_cftypes = files,
        .early_init     = true,
 };
index a19550d80ab1724d03ac1b0799aad54a6f1cf823..fc9bb22252913acd34cf35be26fe5777a43c4506 100644 (file)
@@ -242,18 +242,6 @@ unlock:
        return ret;
 }
 
-static void event_function_local(struct perf_event *event, event_f func, void *data)
-{
-       struct event_function_struct efs = {
-               .event = event,
-               .func = func,
-               .data = data,
-       };
-
-       int ret = event_function(&efs);
-       WARN_ON_ONCE(ret);
-}
-
 static void event_function_call(struct perf_event *event, event_f func, void *data)
 {
        struct perf_event_context *ctx = event->ctx;
@@ -303,6 +291,54 @@ again:
        raw_spin_unlock_irq(&ctx->lock);
 }
 
+/*
+ * Similar to event_function_call() + event_function(), but hard assumes IRQs
+ * are already disabled and we're on the right CPU.
+ */
+static void event_function_local(struct perf_event *event, event_f func, void *data)
+{
+       struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       struct task_struct *task = READ_ONCE(ctx->task);
+       struct perf_event_context *task_ctx = NULL;
+
+       WARN_ON_ONCE(!irqs_disabled());
+
+       if (task) {
+               if (task == TASK_TOMBSTONE)
+                       return;
+
+               task_ctx = ctx;
+       }
+
+       perf_ctx_lock(cpuctx, task_ctx);
+
+       task = ctx->task;
+       if (task == TASK_TOMBSTONE)
+               goto unlock;
+
+       if (task) {
+               /*
+                * We must be either inactive or active and the right task,
+                * otherwise we're screwed, since we cannot IPI to somewhere
+                * else.
+                */
+               if (ctx->is_active) {
+                       if (WARN_ON_ONCE(task != current))
+                               goto unlock;
+
+                       if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
+                               goto unlock;
+               }
+       } else {
+               WARN_ON_ONCE(&cpuctx->ctx != ctx);
+       }
+
+       func(event, cpuctx, ctx, data);
+unlock:
+       perf_ctx_unlock(cpuctx, task_ctx);
+}
+
 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
                       PERF_FLAG_FD_OUTPUT  |\
                       PERF_FLAG_PID_CGROUP |\
@@ -843,6 +879,32 @@ perf_cgroup_mark_enabled(struct perf_event *event,
                }
        }
 }
+
+/*
+ * Update cpuctx->cgrp so that it is set when first cgroup event is added and
+ * cleared when last cgroup event is removed.
+ */
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+                        struct perf_event_context *ctx, bool add)
+{
+       struct perf_cpu_context *cpuctx;
+
+       if (!is_cgroup_event(event))
+               return;
+
+       if (add && ctx->nr_cgroups++)
+               return;
+       else if (!add && --ctx->nr_cgroups)
+               return;
+       /*
+        * Because cgroup events are always per-cpu events,
+        * this will always be called from the right CPU.
+        */
+       cpuctx = __get_cpu_context(ctx);
+       cpuctx->cgrp = add ? event->cgrp : NULL;
+}
+
 #else /* !CONFIG_CGROUP_PERF */
 
 static inline bool
@@ -920,6 +982,13 @@ perf_cgroup_mark_enabled(struct perf_event *event,
                         struct perf_event_context *ctx)
 {
 }
+
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+                        struct perf_event_context *ctx, bool add)
+{
+}
+
 #endif
 
 /*
@@ -1392,6 +1461,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
+
        lockdep_assert_held(&ctx->lock);
 
        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1412,8 +1482,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
                list_add_tail(&event->group_entry, list);
        }
 
-       if (is_cgroup_event(event))
-               ctx->nr_cgroups++;
+       list_update_cgroup_event(event, ctx, true);
 
        list_add_rcu(&event->event_entry, &ctx->event_list);
        ctx->nr_events++;
@@ -1581,8 +1650,6 @@ static void perf_group_attach(struct perf_event *event)
 static void
 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-       struct perf_cpu_context *cpuctx;
-
        WARN_ON_ONCE(event->ctx != ctx);
        lockdep_assert_held(&ctx->lock);
 
@@ -1594,20 +1661,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 
        event->attach_state &= ~PERF_ATTACH_CONTEXT;
 
-       if (is_cgroup_event(event)) {
-               ctx->nr_cgroups--;
-               /*
-                * Because cgroup events are always per-cpu events, this will
-                * always be called from the right CPU.
-                */
-               cpuctx = __get_cpu_context(ctx);
-               /*
-                * If there are no more cgroup events then clear cgrp to avoid
-                * stale pointer in update_cgrp_time_from_cpuctx().
-                */
-               if (!ctx->nr_cgroups)
-                       cpuctx->cgrp = NULL;
-       }
+       list_update_cgroup_event(event, ctx, false);
 
        ctx->nr_events--;
        if (event->attr.inherit_stat)
@@ -1716,8 +1770,8 @@ static inline int pmu_filter_match(struct perf_event *event)
 static inline int
 event_filter_match(struct perf_event *event)
 {
-       return (event->cpu == -1 || event->cpu == smp_processor_id())
-           && perf_cgroup_match(event) && pmu_filter_match(event);
+       return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
+              perf_cgroup_match(event) && pmu_filter_match(event);
 }
 
 static void
@@ -1737,8 +1791,8 @@ event_sched_out(struct perf_event *event,
         * maintained, otherwise bogus information is return
         * via read() for time_enabled, time_running:
         */
-       if (event->state == PERF_EVENT_STATE_INACTIVE
-           && !event_filter_match(event)) {
+       if (event->state == PERF_EVENT_STATE_INACTIVE &&
+           !event_filter_match(event)) {
                delta = tstamp - event->tstamp_stopped;
                event->tstamp_running += delta;
                event->tstamp_stopped = tstamp;
@@ -2236,10 +2290,15 @@ perf_install_in_context(struct perf_event_context *ctx,
 
        lockdep_assert_held(&ctx->mutex);
 
-       event->ctx = ctx;
        if (event->cpu != -1)
                event->cpu = cpu;
 
+       /*
+        * Ensures that if we can observe event->ctx, both the event and ctx
+        * will be 'complete'. See perf_iterate_sb_cpu().
+        */
+       smp_store_release(&event->ctx, ctx);
+
        if (!task) {
                cpu_function_call(cpu, __perf_install_in_context, event);
                return;
@@ -2437,11 +2496,11 @@ static int __perf_event_stop(void *info)
        return 0;
 }
 
-static int perf_event_restart(struct perf_event *event)
+static int perf_event_stop(struct perf_event *event, int restart)
 {
        struct stop_event_data sd = {
                .event          = event,
-               .restart        = 1,
+               .restart        = restart,
        };
        int ret = 0;
 
@@ -3490,8 +3549,17 @@ static int perf_event_read(struct perf_event *event, bool group)
                        .group = group,
                        .ret = 0,
                };
-               smp_call_function_single(event->oncpu,
-                                        __perf_event_read, &data, 1);
+               /*
+                * Purposely ignore the smp_call_function_single() return
+                * value.
+                *
+                * If event->oncpu isn't a valid CPU it means the event got
+                * scheduled out and that will have updated the event count.
+                *
+                * Therefore, either way, we'll have an up-to-date event count
+                * after this.
+                */
+               (void)smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
                ret = data.ret;
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
@@ -3861,7 +3929,7 @@ static void exclusive_event_destroy(struct perf_event *event)
 
 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
 {
-       if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
+       if ((e1->pmu == e2->pmu) &&
            (e1->cpu == e2->cpu ||
             e1->cpu == -1 ||
             e2->cpu == -1))
@@ -4777,6 +4845,19 @@ static void ring_buffer_attach(struct perf_event *event,
                spin_unlock_irqrestore(&rb->event_lock, flags);
        }
 
+       /*
+        * Avoid racing with perf_mmap_close(AUX): stop the event
+        * before swizzling the event::rb pointer; if it's getting
+        * unmapped, its aux_mmap_count will be 0 and it won't
+        * restart. See the comment in __perf_pmu_output_stop().
+        *
+        * Data will inevitably be lost when set_output is done in
+        * mid-air, but then again, whoever does it like this is
+        * not in for the data anyway.
+        */
+       if (has_aux(event))
+               perf_event_stop(event, 0);
+
        rcu_assign_pointer(event->rb, rb);
 
        if (old_rb) {
@@ -5969,6 +6050,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
        struct perf_event *event;
 
        list_for_each_entry_rcu(event, &pel->list, sb_list) {
+               /*
+                * Skip events that are not fully formed yet; ensure that
+                * if we observe event->ctx, both event and ctx will be
+                * complete enough. See perf_install_in_context().
+                */
+               if (!smp_load_acquire(&event->ctx))
+                       continue;
+
                if (event->state < PERF_EVENT_STATE_INACTIVE)
                        continue;
                if (!event_filter_match(event))
@@ -6044,7 +6133,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
        raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
        if (restart)
-               perf_event_restart(event);
+               perf_event_stop(event, 1);
 }
 
 void perf_event_exec(void)
@@ -6088,7 +6177,13 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 
        /*
         * In case of inheritance, it will be the parent that links to the
-        * ring-buffer, but it will be the child that's actually using it:
+        * ring-buffer, but it will be the child that's actually using it.
+        *
+        * We are using event::rb to determine if the event should be stopped,
+        * however this may race with ring_buffer_attach() (through set_output),
+        * which will make us skip the event that actually needs to be stopped.
+        * So ring_buffer_attach() has to stop an aux event before re-assigning
+        * its rb pointer.
         */
        if (rcu_dereference(parent->rb) == rb)
                ro->err = __perf_event_stop(&sd);
@@ -6098,7 +6193,7 @@ static int __perf_pmu_output_stop(void *info)
 {
        struct perf_event *event = info;
        struct pmu *pmu = event->pmu;
-       struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
        struct remote_output ro = {
                .rb     = event->rb,
        };
@@ -6552,15 +6647,6 @@ got_name:
        kfree(buf);
 }
 
-/*
- * Whether this @filter depends on a dynamic object which is not loaded
- * yet or its load addresses are not known.
- */
-static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter)
-{
-       return filter->filter && filter->inode;
-}
-
 /*
  * Check whether inode and address range match filter criteria.
  */
@@ -6611,7 +6697,7 @@ static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
        raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
        if (restart)
-               perf_event_restart(event);
+               perf_event_stop(event, 1);
 }
 
 /*
@@ -6622,6 +6708,13 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma)
        struct perf_event_context *ctx;
        int ctxn;
 
+       /*
+        * Data tracing isn't supported yet and as such there is no need
+        * to keep track of anything that isn't related to executable code:
+        */
+       if (!(vma->vm_flags & VM_EXEC))
+               return;
+
        rcu_read_lock();
        for_each_task_context_nr(ctxn) {
                ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
@@ -7774,7 +7867,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        list_for_each_entry(filter, &ifh->list, entry) {
                event->addr_filters_offs[count] = 0;
 
-               if (perf_addr_filter_needs_mmap(filter))
+               /*
+                * Adjust base offset if the filter is associated to a binary
+                * that needs to be mapped:
+                */
+               if (filter->inode)
                        event->addr_filters_offs[count] =
                                perf_addr_filter_apply(filter, mm);
 
@@ -7789,7 +7886,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        mmput(mm);
 
 restart:
-       perf_event_restart(event);
+       perf_event_stop(event, 1);
 }
 
 /*
@@ -7905,8 +8002,10 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                                        goto fail;
                        }
 
-                       if (token == IF_SRC_FILE) {
-                               filename = match_strdup(&args[2]);
+                       if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
+                               int fpos = filter->range ? 2 : 1;
+
+                               filename = match_strdup(&args[fpos]);
                                if (!filename) {
                                        ret = -ENOMEM;
                                        goto fail;
index ae9b90dc9a5a66c74134d4464665ee05c125ea2b..257fa460b846032744e2da500d489d0995678571 100644 (file)
@@ -330,15 +330,22 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
        if (!rb)
                return NULL;
 
-       if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
+       if (!rb_has_aux(rb))
                goto err;
 
        /*
-        * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
-        * the aux buffer is in perf_mmap_close(), about to get freed.
+        * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
+        * about to get freed, so we leave immediately.
+        *
+        * Checking rb::aux_mmap_count and rb::refcount has to be done in
+        * the same order, see perf_mmap_close. Otherwise we end up freeing
+        * aux pages in this path, which is a bug, because in_atomic().
         */
        if (!atomic_read(&rb->aux_mmap_count))
-               goto err_put;
+               goto err;
+
+       if (!atomic_inc_not_zero(&rb->aux_refcount))
+               goto err;
 
        /*
         * Nesting is not supported for AUX area, make sure nested
index b7a525ab2083708f0203db32059dd66551d11d9b..8c50276b60d1c7fb75da997f85819f5b9a313c27 100644 (file)
@@ -172,8 +172,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        err = -EAGAIN;
        ptep = page_check_address(page, mm, addr, &ptl, 0);
-       if (!ptep)
+       if (!ptep) {
+               mem_cgroup_cancel_charge(kpage, memcg, false);
                goto unlock;
+       }
 
        get_page(kpage);
        page_add_new_anon_rmap(kpage, vma, addr, false);
@@ -200,7 +202,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 
        err = 0;
  unlock:
-       mem_cgroup_cancel_charge(kpage, memcg, false);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        unlock_page(page);
        return err;
index 2f974ae042a677a90c7d34a78b66793cbe8584c5..091a78be3b09d5669d9c10b98f6300e4171d2413 100644 (file)
@@ -848,12 +848,7 @@ void do_exit(long code)
        TASKS_RCU(preempt_enable());
        exit_notify(tsk, group_dead);
        proc_exit_connector(tsk);
-#ifdef CONFIG_NUMA
-       task_lock(tsk);
-       mpol_put(tsk->mempolicy);
-       tsk->mempolicy = NULL;
-       task_unlock(tsk);
-#endif
+       mpol_put_task_policy(tsk);
 #ifdef CONFIG_FUTEX
        if (unlikely(current->pi_state_cache))
                kfree(current->pi_state_cache);
index 52e725d4a866b4ac30f16b4db075b9b197e4e53d..beb31725f7e2746fb17cdd305c4193ab2c70e551 100644 (file)
@@ -798,6 +798,29 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
 }
 EXPORT_SYMBOL(get_mm_exe_file);
 
+/**
+ * get_task_exe_file - acquire a reference to the task's executable file
+ *
+ * Returns %NULL if task's mm (if any) has no associated executable file or
+ * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
+ * User must release file via fput().
+ */
+struct file *get_task_exe_file(struct task_struct *task)
+{
+       struct file *exe_file = NULL;
+       struct mm_struct *mm;
+
+       task_lock(task);
+       mm = task->mm;
+       if (mm) {
+               if (!(task->flags & PF_KTHREAD))
+                       exe_file = get_mm_exe_file(mm);
+       }
+       task_unlock(task);
+       return exe_file;
+}
+EXPORT_SYMBOL(get_task_exe_file);
+
 /**
  * get_task_mm - acquire a reference to the task's mm
  *
@@ -913,14 +936,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        deactivate_mm(tsk, mm);
 
        /*
-        * If we're exiting normally, clear a user-space tid field if
-        * requested.  We leave this alone when dying by signal, to leave
-        * the value intact in a core dump, and to save the unnecessary
-        * trouble, say, a killed vfork parent shouldn't touch this mm.
-        * Userland only wants this done for a sys_exit.
+        * Signal userspace if we're not exiting with a core dump
+        * because we want to leave the value intact for debugging
+        * purposes.
         */
        if (tsk->clear_child_tid) {
-               if (!(tsk->flags & PF_SIGNALED) &&
+               if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
                    atomic_read(&mm->mm_users) > 1) {
                        /*
                         * We don't check the error code - if userspace has
@@ -1404,7 +1425,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->real_start_time = ktime_get_boot_ns();
        p->io_context = NULL;
        p->audit_context = NULL;
-       threadgroup_change_begin(current);
        cgroup_fork(p);
 #ifdef CONFIG_NUMA
        p->mempolicy = mpol_dup(p->mempolicy);
@@ -1556,6 +1576,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        INIT_LIST_HEAD(&p->thread_group);
        p->task_works = NULL;
 
+       threadgroup_change_begin(current);
        /*
         * Ensure that the cgroup subsystem policies allow the new process to be
         * forked. It should be noted the the new process's css_set can be changed
@@ -1656,6 +1677,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 bad_fork_cancel_cgroup:
        cgroup_cancel_fork(p);
 bad_fork_free_pid:
+       threadgroup_change_end(current);
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_thread:
@@ -1688,7 +1710,6 @@ bad_fork_cleanup_policy:
        mpol_put(p->mempolicy);
 bad_fork_cleanup_threadgroup_lock:
 #endif
-       threadgroup_change_end(current);
        delayacct_tsk_free(p);
 bad_fork_cleanup_count:
        atomic_dec(&p->cred->user->processes);
index 33664f70e2d25e880efdbc8695fcc12989871150..46cb3a301bc1555a84607bab7b2aea8a2bcaf7e6 100644 (file)
@@ -179,7 +179,15 @@ int __read_mostly futex_cmpxchg_enabled;
  * Futex flags used to encode options to functions and preserve them across
  * restarts.
  */
-#define FLAGS_SHARED           0x01
+#ifdef CONFIG_MMU
+# define FLAGS_SHARED          0x01
+#else
+/*
+ * NOMMU does not have per process address space. Let the compiler optimize
+ * code away.
+ */
+# define FLAGS_SHARED          0x00
+#endif
 #define FLAGS_CLOCKRT          0x02
 #define FLAGS_HAS_TIMEOUT      0x04
 
@@ -405,6 +413,16 @@ static void get_futex_key_refs(union futex_key *key)
        if (!key->both.ptr)
                return;
 
+       /*
+        * On MMU less systems futexes are always "private" as there is no per
+        * process address space. We need the smp wmb nevertheless - yes,
+        * arch/blackfin has MMU less SMP ...
+        */
+       if (!IS_ENABLED(CONFIG_MMU)) {
+               smp_mb(); /* explicit smp_mb(); (B) */
+               return;
+       }
+
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
                ihold(key->shared.inode); /* implies smp_mb(); (B) */
@@ -436,6 +454,9 @@ static void drop_futex_key_refs(union futex_key *key)
                return;
        }
 
+       if (!IS_ENABLED(CONFIG_MMU))
+               return;
+
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
                iput(key->shared.inode);
index f68959341c0faa87809531af1a508dde6978feef..32f6cfcff21244a199bec1877b3a29ada5dcac5f 100644 (file)
@@ -39,6 +39,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
                return NULL;
        }
 
+       get_online_cpus();
        if (max_vecs >= num_online_cpus()) {
                cpumask_copy(affinity_mask, cpu_online_mask);
                *nr_vecs = num_online_cpus();
@@ -56,6 +57,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
                }
                *nr_vecs = vecs;
        }
+       put_online_cpus();
 
        return affinity_mask;
 }
index b4c1bc7c9ca204440c7c51e7df7a49b0c2ae511f..26ba5654d9d5bea8c741d3132f184fe9f0874f0f 100644 (file)
@@ -820,6 +820,21 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
        desc->name = name;
 
        if (handle != handle_bad_irq && is_chained) {
+               unsigned int type = irqd_get_trigger_type(&desc->irq_data);
+
+               /*
+                * We're about to start this interrupt immediately,
+                * hence the need to set the trigger configuration.
+                * But the .set_type callback may have overridden the
+                * flow handler, ignoring that we're dealing with a
+                * chained interrupt. Reset it immediately because we
+                * do know better.
+                */
+               if (type != IRQ_TYPE_NONE) {
+                       __irq_set_trigger(desc, type);
+                       desc->handle_irq = handle;
+               }
+
                irq_settings_set_noprobe(desc);
                irq_settings_set_norequest(desc);
                irq_settings_set_nothread(desc);
index 73a2b786b5e99b528250ddbc4b4fb007ab1e67e5..9530fcd27704008a9531c49d623910c707d681ce 100644 (file)
@@ -1681,8 +1681,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        action->dev_id = dev_id;
 
        retval = irq_chip_pm_get(&desc->irq_data);
-       if (retval < 0)
+       if (retval < 0) {
+               kfree(action);
                return retval;
+       }
 
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, action);
@@ -1985,8 +1987,10 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
        action->percpu_dev_id = dev_id;
 
        retval = irq_chip_pm_get(&desc->irq_data);
-       if (retval < 0)
+       if (retval < 0) {
+               kfree(action);
                return retval;
+       }
 
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, action);
index 54999350162cbc89326a67a3728814e17871d784..19e9dfbe97fa53f732edd375cdd50341327750ac 100644 (file)
@@ -359,6 +359,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
                else
                        dev_dbg(dev, "irq [%d-%d] for MSI\n",
                                virq, virq + desc->nvec_used - 1);
+               /*
+                * This flag is set by the PCI layer as we need to activate
+                * the MSI entries before the PCI layer enables MSI in the
+                * card. Otherwise the card latches a random msi message.
+                */
+               if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
+                       struct irq_data *irq_data;
+
+                       irq_data = irq_domain_get_irq_data(domain, desc->irq);
+                       irq_domain_activate_irq(irq_data);
+               }
        }
 
        return 0;
index 503bc2d348e59aa2f866a4ad5b79d913419e862b..037c321c56188f9fbb35193072f08052d6e4771f 100644 (file)
@@ -887,7 +887,10 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
        return 0;
 out:
        vfree(pi->sechdrs);
+       pi->sechdrs = NULL;
+
        vfree(pi->purgatory_buf);
+       pi->purgatory_buf = NULL;
        return ret;
 }
 
index 37649e69056cf974e27d0137260f8ff46ad688df..8a99abf58080be21fbb954777b48aca24d4342b5 100644 (file)
@@ -450,7 +450,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
                                goto gotlock;
                        }
                }
-               WRITE_ONCE(pn->state, vcpu_halted);
+               WRITE_ONCE(pn->state, vcpu_hashed);
                qstat_inc(qstat_pv_wait_head, true);
                qstat_inc(qstat_pv_wait_again, waitcnt);
                pv_wait(&l->locked, _Q_SLOW_VAL);
index 22e02530984574a6fee718aad26720516c34d497..b9d0315162540d1236e5e1268f184531d8259114 100644 (file)
@@ -153,7 +153,6 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
                 */
                if ((counter == qstat_pv_latency_kick) ||
                    (counter == qstat_pv_latency_wake)) {
-                       stat = 0;
                        if (kicks)
                                stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
                }
index 251d16b4cb41e67111ff2f1f783bf39cdea13822..b501e390bb34403c11d6ae09c31ea9fcb769ea8e 100644 (file)
@@ -247,6 +247,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
        arch_remove_memory(align_start, align_size);
+       untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
        pgmap_radix_release(res);
        dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
                        "%s: failed to free all reserved pages\n", __func__);
@@ -282,6 +283,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
                struct percpu_ref *ref, struct vmem_altmap *altmap)
 {
        resource_size_t key, align_start, align_size, align_end;
+       pgprot_t pgprot = PAGE_KERNEL;
        struct dev_pagemap *pgmap;
        struct page_map *page_map;
        int error, nid, is_ram;
@@ -351,6 +353,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (nid < 0)
                nid = numa_mem_id();
 
+       error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
+                       align_size);
+       if (error)
+               goto err_pfn_remap;
+
        error = arch_add_memory(nid, align_start, align_size, true);
        if (error)
                goto err_add_memory;
@@ -371,6 +378,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        return __va(res->start);
 
  err_add_memory:
+       untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+ err_pfn_remap:
  err_radix:
        pgmap_radix_release(res);
        devres_free(page_map);
index a881c6a7ba74020db9d4fee663445112af0edcc8..33c79b6105c55fdc4f228bb0348fbc2b92d68f0f 100644 (file)
@@ -300,12 +300,12 @@ static int create_image(int platform_mode)
        save_processor_state();
        trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
        error = swsusp_arch_suspend();
+       /* Restore control flow magically appears here */
+       restore_processor_state();
        trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
        if (error)
                printk(KERN_ERR "PM: Error %d creating hibernation image\n",
                        error);
-       /* Restore control flow magically appears here */
-       restore_processor_state();
        if (!in_suspend)
                events_check_enabled = false;
 
index 97b0df71303ef7bbecf28d1479c3e198885b6199..168ff442ebde71cefea74ce66a2bf8e86c6e3ddc 100644 (file)
@@ -482,7 +482,16 @@ void pm_qos_update_request(struct pm_qos_request *req,
                return;
        }
 
-       cancel_delayed_work_sync(&req->work);
+       /*
+        * This function may be called very early during boot, for example,
+        * from of_clk_init(), where irq needs to stay disabled.
+        * cancel_delayed_work_sync() assumes that irq is enabled on
+        * invocation and re-enables it on return.  Avoid calling it until
+        * workqueue is initialized.
+        */
+       if (keventd_up())
+               cancel_delayed_work_sync(&req->work);
+
        __pm_qos_update_request(req, new_value);
 }
 EXPORT_SYMBOL_GPL(pm_qos_update_request);
index 9a0178c2ac1df6b68f52a31e96beda95a1517781..b02228411d575b87ddc62db9f92b3fadf4765a5a 100644 (file)
@@ -835,9 +835,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
  */
 static bool rtree_next_node(struct memory_bitmap *bm)
 {
-       bm->cur.node = list_entry(bm->cur.node->list.next,
-                                 struct rtree_node, list);
-       if (&bm->cur.node->list != &bm->cur.zone->leaves) {
+       if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
+               bm->cur.node = list_entry(bm->cur.node->list.next,
+                                         struct rtree_node, list);
                bm->cur.node_pfn += BM_BITS_PER_BLOCK;
                bm->cur.node_bit  = 0;
                touch_softlockup_watchdog();
@@ -845,9 +845,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
        }
 
        /* No more nodes, goto next zone */
-       bm->cur.zone = list_entry(bm->cur.zone->list.next,
+       if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
+               bm->cur.zone = list_entry(bm->cur.zone->list.next,
                                  struct mem_zone_bm_rtree, list);
-       if (&bm->cur.zone->list != &bm->zones) {
                bm->cur.node = list_entry(bm->cur.zone->leaves.next,
                                          struct rtree_node, list);
                bm->cur.node_pfn = 0;
index 276762f3a46078141117e5810c1afc964e869ec9..d5760c42f042f4accfb65de23784bcaa877d9b00 100644 (file)
@@ -9,10 +9,10 @@
 
 char *_braille_console_setup(char **str, char **brl_options)
 {
-       if (!memcmp(*str, "brl,", 4)) {
+       if (!strncmp(*str, "brl,", 4)) {
                *brl_options = "";
                *str += 4;
-       } else if (!memcmp(str, "brl=", 4)) {
+       } else if (!strncmp(*str, "brl=", 4)) {
                *brl_options = *str + 4;
                *str = strchr(*brl_options, ',');
                if (!*str)
index 5d4505f30083550e59337fe5d90cf14f783573c0..7fd2838fa41748006cebf14ed7432739f3138301 100644 (file)
  */
 #include <linux/percpu.h>
 
-typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt,
-                                           va_list args);
+typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
 
-__printf(2, 0)
-int vprintk_default(int level, const char *fmt, va_list args);
+int __printf(1, 0) vprintk_default(const char *fmt, va_list args);
 
 #ifdef CONFIG_PRINTK_NMI
 
@@ -33,10 +31,9 @@ extern raw_spinlock_t logbuf_lock;
  * via per-CPU variable.
  */
 DECLARE_PER_CPU(printk_func_t, printk_func);
-__printf(2, 0)
-static inline int vprintk_func(int level, const char *fmt, va_list args)
+static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
 {
-       return this_cpu_read(printk_func)(level, fmt, args);
+       return this_cpu_read(printk_func)(fmt, args);
 }
 
 extern atomic_t nmi_message_lost;
@@ -47,10 +44,9 @@ static inline int get_nmi_message_lost(void)
 
 #else /* CONFIG_PRINTK_NMI */
 
-__printf(2, 0)
-static inline int vprintk_func(int level, const char *fmt, va_list args)
+static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
 {
-       return vprintk_default(level, fmt, args);
+       return vprintk_default(fmt, args);
 }
 
 static inline int get_nmi_message_lost(void)
index bc3eeb1ae6da6b58a1aed09372f70fc8c3fda088..16bab471c7e23d8099eebca1c897ccb6a09f6bdf 100644 (file)
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
  * one writer running. But the buffer might get flushed from another
  * CPU, so we need to be careful.
  */
-static int vprintk_nmi(int level, const char *fmt, va_list args)
+static int vprintk_nmi(const char *fmt, va_list args)
 {
        struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
        int add = 0;
@@ -79,16 +79,7 @@ again:
        if (!len)
                smp_rmb();
 
-       if (level != LOGLEVEL_DEFAULT) {
-               add = snprintf(s->buffer + len, sizeof(s->buffer) - len,
-                               KERN_SOH "%c", '0' + level);
-               add += vsnprintf(s->buffer + len + add,
-                                sizeof(s->buffer) - len - add,
-                                fmt, args);
-       } else {
-               add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len,
-                               fmt, args);
-       }
+       add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
 
        /*
         * Do it once again if the buffer has been flushed in the meantime.
@@ -108,24 +99,30 @@ again:
        return add;
 }
 
-/*
- * printk one line from the temporary buffer from @start index until
- * and including the @end index.
- */
-static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
+static void printk_nmi_flush_line(const char *text, int len)
 {
-       const char *buf = s->buffer + start;
-
        /*
         * The buffers are flushed in NMI only on panic.  The messages must
         * go only into the ring buffer at this stage.  Consoles will get
         * explicitly called later when a crashdump is not generated.
         */
        if (in_nmi())
-               printk_deferred("%.*s", (end - start) + 1, buf);
+               printk_deferred("%.*s", len, text);
        else
-               printk("%.*s", (end - start) + 1, buf);
+               printk("%.*s", len, text);
+
+}
 
+/*
+ * printk one line from the temporary buffer from @start index until
+ * and including the @end index.
+ */
+static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s,
+                                       int start, int end)
+{
+       const char *buf = s->buffer + start;
+
+       printk_nmi_flush_line(buf, (end - start) + 1);
 }
 
 /*
@@ -159,9 +156,11 @@ more:
         * the buffer an unexpected way. If we printed something then
         * @len must only increase.
         */
-       if (i && i >= len)
-               pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n",
-                      i, len);
+       if (i && i >= len) {
+               const char *msg = "printk_nmi_flush: internal error\n";
+
+               printk_nmi_flush_line(msg, strlen(msg));
+       }
 
        if (!len)
                goto out; /* Someone else has already flushed the buffer. */
@@ -175,14 +174,14 @@ more:
        /* Print line by line. */
        for (; i < size; i++) {
                if (s->buffer[i] == '\n') {
-                       print_nmi_seq_line(s, last_i, i);
+                       printk_nmi_flush_seq_line(s, last_i, i);
                        last_i = i + 1;
                }
        }
        /* Check if there was a partial line. */
        if (last_i < size) {
-               print_nmi_seq_line(s, last_i, size - 1);
-               pr_cont("\n");
+               printk_nmi_flush_seq_line(s, last_i, size - 1);
+               printk_nmi_flush_line("\n", strlen("\n"));
        }
 
        /*
index a5ef95ca18c9d945e7d8a826dacd27c7f7ebe008..eea6dbc2d8cf6ffc4d71729eb270a659b0a8c77d 100644 (file)
@@ -1930,28 +1930,7 @@ asmlinkage int printk_emit(int facility, int level,
 }
 EXPORT_SYMBOL(printk_emit);
 
-#ifdef CONFIG_PRINTK
-#define define_pr_level(func, loglevel)                                \
-asmlinkage __visible void func(const char *fmt, ...)           \
-{                                                              \
-       va_list args;                                           \
-                                                               \
-       va_start(args, fmt);                                    \
-       vprintk_default(loglevel, fmt, args);                   \
-       va_end(args);                                           \
-}                                                              \
-EXPORT_SYMBOL(func)
-
-define_pr_level(__pr_emerg, LOGLEVEL_EMERG);
-define_pr_level(__pr_alert, LOGLEVEL_ALERT);
-define_pr_level(__pr_crit, LOGLEVEL_CRIT);
-define_pr_level(__pr_err, LOGLEVEL_ERR);
-define_pr_level(__pr_warn, LOGLEVEL_WARNING);
-define_pr_level(__pr_notice, LOGLEVEL_NOTICE);
-define_pr_level(__pr_info, LOGLEVEL_INFO);
-#endif
-
-int vprintk_default(int level, const char *fmt, va_list args)
+int vprintk_default(const char *fmt, va_list args)
 {
        int r;
 
@@ -1961,7 +1940,7 @@ int vprintk_default(int level, const char *fmt, va_list args)
                return r;
        }
 #endif
-       r = vprintk_emit(0, level, NULL, 0, fmt, args);
+       r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
 
        return r;
 }
@@ -1994,7 +1973,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
        int r;
 
        va_start(args, fmt);
-       r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args);
+       r = vprintk_func(fmt, args);
        va_end(args);
 
        return r;
index 5c883fe8e44016df1109e8f66dd73377dfecb5e9..44817c640e99b13533221058066b03af010abb47 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/context_tracking.h>
 #include <linux/compiler.h>
 #include <linux/frame.h>
+#include <linux/prefetch.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -2015,6 +2016,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
+       /*
+        * Ensure we load p->on_rq _after_ p->state, otherwise it would
+        * be possible to, falsely, observe p->on_rq == 0 and get stuck
+        * in smp_cond_load_acquire() below.
+        *
+        * sched_ttwu_pending()                 try_to_wake_up()
+        *   [S] p->on_rq = 1;                  [L] P->state
+        *       UNLOCK rq->lock  -----.
+        *                              \
+        *                               +---   RMB
+        * schedule()                   /
+        *       LOCK rq->lock    -----'
+        *       UNLOCK rq->lock
+        *
+        * [task p]
+        *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
+        *
+        * Pairs with the UNLOCK+LOCK on rq->lock from the
+        * last wakeup of our task and the schedule that got our task
+        * current.
+        */
+       smp_rmb();
        if (p->on_rq && ttwu_remote(p, wake_flags))
                goto stat;
 
@@ -2971,6 +2994,23 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 EXPORT_PER_CPU_SYMBOL(kstat);
 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
 
+/*
+ * The function fair_sched_class.update_curr accesses the struct curr
+ * and its field curr->exec_start; when called from task_sched_runtime(),
+ * we observe a high rate of cache misses in practice.
+ * Prefetching this data results in improved performance.
+ */
+static inline void prefetch_curr_exec_start(struct task_struct *p)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       struct sched_entity *curr = (&p->se)->cfs_rq->curr;
+#else
+       struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
+#endif
+       prefetch(curr);
+       prefetch(&curr->exec_start);
+}
+
 /*
  * Return accounted runtime for the task.
  * In case the task is currently running, return the runtime plus current's
@@ -3005,6 +3045,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
         * thread, breaking clock_gettime().
         */
        if (task_current(rq, p) && task_on_rq_queued(p)) {
+               prefetch_curr_exec_start(p);
                update_rq_clock(rq);
                p->sched_class->update_curr(rq);
        }
index 5be58820465cced6c0d1dc06c9de146bddcf664f..d4184498c9f5e3c8674015f97fe04da2417dafbd 100644 (file)
@@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
 
        if (old_idx == IDX_INVALID) {
                cp->size++;
-               cp->elements[cp->size - 1].dl = 0;
+               cp->elements[cp->size - 1].dl = dl;
                cp->elements[cp->size - 1].cpu = cpu;
                cp->elements[cpu].idx = cp->size - 1;
                cpudl_change_key(cp, cp->size - 1, dl);
index 1934f658c03604272e5809f32fee1a6a3c928990..a846cf89eb96182950db834207322c92722d0d2e 100644 (file)
@@ -263,6 +263,11 @@ void account_idle_time(cputime_t cputime)
                cpustat[CPUTIME_IDLE] += (__force u64) cputime;
 }
 
+/*
+ * When a guest is interrupted for a longer amount of time, missed clock
+ * ticks are not redelivered later. Due to that, this function may on
+ * occasion account more time than the calling functions think elapsed.
+ */
 static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
 {
 #ifdef CONFIG_PARAVIRT
@@ -371,7 +376,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
         * idle, or potentially user or system time. Due to rounding,
         * other time can exceed ticks occasionally.
         */
-       other = account_other_time(cputime);
+       other = account_other_time(ULONG_MAX);
        if (other >= cputime)
                return;
        cputime -= other;
@@ -486,7 +491,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
        }
 
        cputime = cputime_one_jiffy;
-       steal = steal_account_process_time(cputime);
+       steal = steal_account_process_time(ULONG_MAX);
 
        if (steal >= cputime)
                return;
@@ -508,13 +513,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
  */
 void account_idle_ticks(unsigned long ticks)
 {
+       cputime_t cputime, steal;
 
        if (sched_clock_irqtime) {
                irqtime_account_idle_ticks(ticks);
                return;
        }
 
-       account_idle_time(jiffies_to_cputime(ticks));
+       cputime = jiffies_to_cputime(ticks);
+       steal = steal_account_process_time(ULONG_MAX);
+
+       if (steal >= cputime)
+               return;
+
+       cputime -= steal;
+       account_idle_time(cputime);
 }
 
 /*
@@ -606,19 +619,25 @@ static void cputime_adjust(struct task_cputime *curr,
        stime = curr->stime;
        utime = curr->utime;
 
-       if (utime == 0) {
-               stime = rtime;
+       /*
+        * If either stime or both stime and utime are 0, assume all runtime is
+        * userspace. Once a task gets some ticks, the monotonicy code at
+        * 'update' will ensure things converge to the observed ratio.
+        */
+       if (stime == 0) {
+               utime = rtime;
                goto update;
        }
 
-       if (stime == 0) {
-               utime = rtime;
+       if (utime == 0) {
+               stime = rtime;
                goto update;
        }
 
        stime = scale_stime((__force u64)stime, (__force u64)rtime,
                            (__force u64)(stime + utime));
 
+update:
        /*
         * Make sure stime doesn't go backwards; this preserves monotonicity
         * for utime because rtime is monotonic.
@@ -641,7 +660,6 @@ static void cputime_adjust(struct task_cputime *curr,
                stime = rtime - utime;
        }
 
-update:
        prev->stime = stime;
        prev->utime = utime;
 out:
@@ -686,6 +704,13 @@ static cputime_t get_vtime_delta(struct task_struct *tsk)
        unsigned long now = READ_ONCE(jiffies);
        cputime_t delta, other;
 
+       /*
+        * Unlike tick based timing, vtime based timing never has lost
+        * ticks, and no need for steal time accounting to make up for
+        * lost ticks. Vtime accounts a rounded version of actual
+        * elapsed time. Limit account_other_time to prevent rounding
+        * errors from causing elapsed vtime to go negative.
+        */
        delta = jiffies_to_cputime(now - tsk->vtime_snap);
        other = account_other_time(delta);
        WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
index fcb7f0217ff48610cca9bd5bd078f2f05df79164..1ce8867283dcde6e35ef74a72a1bca968decb918 100644 (file)
@@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
         *
         * XXX figure out if select_task_rq_dl() deals with offline cpus.
         */
-       if (unlikely(!rq->online))
+       if (unlikely(!rq->online)) {
+               lockdep_unpin_lock(&rq->lock, rf.cookie);
                rq = dl_task_offline_migration(rq, p);
+               rf.cookie = lockdep_pin_lock(&rq->lock);
+       }
 
        /*
         * Queueing this task back might have overloaded rq, check if we need
index 4088eedea7637859844c777dfa56dfb23136c142..039de34f15216d19f61386b6d6c66744660516c9 100644 (file)
@@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
        pcfs_rq = tg->parent->cfs_rq[cpu];
 
        cfs_rq->throttle_count = pcfs_rq->throttle_count;
-       pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+       cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
 }
 
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
index ef6c6c3f9d8a862b0355443ce794ff0babf83489..0db7c8a2afe2fb531fe390d78ff9bb435c992077 100644 (file)
@@ -605,12 +605,16 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
                ptrace_event(PTRACE_EVENT_SECCOMP, data);
                /*
                 * The delivery of a fatal signal during event
-                * notification may silently skip tracer notification.
-                * Terminating the task now avoids executing a system
-                * call that may not be intended.
+                * notification may silently skip tracer notification,
+                * which could leave us with a potentially unmodified
+                * syscall that the tracer would have liked to have
+                * changed. Since the process is about to die, we just
+                * force the syscall to be skipped and let the signal
+                * kill the process and correctly handle any tracer exit
+                * notifications.
                 */
                if (fatal_signal_pending(current))
-                       do_exit(SIGSYS);
+                       goto skip;
                /* Check if the tracer forced the syscall to be skipped. */
                this_syscall = syscall_get_nr(current, task_pt_regs(current));
                if (this_syscall < 0)
index b43d0b27c1feb5f6af7250ea2312bf6d5251aad0..a13bbdaab47dc66fec2c0dd3da0b2c9898725bdf 100644 (file)
@@ -2140,6 +2140,21 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
        return 0;
 }
 
+static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
+                                int *valp,
+                                int write, void *data)
+{
+       if (write) {
+               if (*negp)
+                       return -EINVAL;
+               *valp = *lvalp;
+       } else {
+               unsigned int val = *valp;
+               *lvalp = (unsigned long)val;
+       }
+       return 0;
+}
+
 static const char proc_wspace_sep[] = { ' ', '\t', '\n' };
 
 static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
@@ -2259,8 +2274,27 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
 int proc_dointvec(struct ctl_table *table, int write,
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-    return do_proc_dointvec(table,write,buffer,lenp,ppos,
-                           NULL,NULL);
+       return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL);
+}
+
+/**
+ * proc_douintvec - read a vector of unsigned integers
+ * @table: the sysctl table
+ * @write: %TRUE if this is a write to the sysctl file
+ * @buffer: the user buffer
+ * @lenp: the size of the user buffer
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer
+ * values from/to the user buffer, treated as an ASCII string.
+ *
+ * Returns 0 on success.
+ */
+int proc_douintvec(struct ctl_table *table, int write,
+                    void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       return do_proc_dointvec(table, write, buffer, lenp, ppos,
+                               do_proc_douintvec_conv, NULL);
 }
 
 /*
@@ -2858,6 +2892,12 @@ int proc_dointvec(struct ctl_table *table, int write,
        return -ENOSYS;
 }
 
+int proc_douintvec(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       return -ENOSYS;
+}
+
 int proc_dointvec_minmax(struct ctl_table *table, int write,
                    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -2903,6 +2943,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
  * exception granted :-)
  */
 EXPORT_SYMBOL(proc_dointvec);
+EXPORT_SYMBOL(proc_douintvec);
 EXPORT_SYMBOL(proc_dointvec_jiffies);
 EXPORT_SYMBOL(proc_dointvec_minmax);
 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
index 204fdc86863d71368ef61faacdb0d5458e78039f..2ec7c00228f3c981568b1dc911b4a91a83148d91 100644 (file)
@@ -908,10 +908,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
        ktime_t now, expires;
        int cpu = smp_processor_id();
 
+       now = tick_nohz_start_idle(ts);
+
        if (can_stop_idle_tick(cpu, ts)) {
                int was_stopped = ts->tick_stopped;
 
-               now = tick_nohz_start_idle(ts);
                ts->idle_calls++;
 
                expires = tick_nohz_stop_sched_tick(ts, now, cpu);
index 3b65746c7f156dbdea1e7b1f6fb4e43197820369..e07fb093f8195ace846881ffd81da46d5ce66e65 100644 (file)
@@ -401,7 +401,10 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
        do {
                seq = raw_read_seqcount_latch(&tkf->seq);
                tkr = tkf->base + (seq & 0x01);
-               now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
+               now = ktime_to_ns(tkr->base);
+
+               now += clocksource_delta(tkr->read(tkr->clock),
+                                        tkr->cycle_last, tkr->mask);
        } while (read_seqcount_retry(&tkf->seq, seq));
 
        return now;
index f6bd65236712b218fe89c2bee83b8e56ca1bf89e..107310a6f36f43a47300c46a4bd3b51a6b5b70fa 100644 (file)
@@ -23,7 +23,9 @@
 
 #include "timekeeping_internal.h"
 
-static unsigned int sleep_time_bin[32] = {0};
+#define NUM_BINS 32
+
+static unsigned int sleep_time_bin[NUM_BINS] = {0};
 
 static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
 {
@@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init);
 
 void tk_debug_account_sleep_time(struct timespec64 *t)
 {
-       sleep_time_bin[fls(t->tv_sec)]++;
+       /* Cap bin index so we don't overflow the array */
+       int bin = min(fls(t->tv_sec), NUM_BINS-1);
+
+       sleep_time_bin[bin]++;
 }
 
index 555670a5143c61bed5e7015f4f13849240be37b9..32bf6f75a8fec255c6d5fbf38e9fecd9e1e848fa 100644 (file)
@@ -1496,6 +1496,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
        struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
        u64 expires = KTIME_MAX;
        unsigned long nextevt;
+       bool is_max_delta;
 
        /*
         * Pretend that there is no timer pending if the cpu is offline.
@@ -1506,6 +1507,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 
        spin_lock(&base->lock);
        nextevt = __next_timer_interrupt(base);
+       is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
        base->next_expiry = nextevt;
        /*
         * We have a fresh next event. Check whether we can forward the base:
@@ -1519,7 +1521,8 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
                expires = basem;
                base->is_idle = false;
        } else {
-               expires = basem + (nextevt - basej) * TICK_NSEC;
+               if (!is_max_delta)
+                       expires = basem + (nextevt - basej) * TICK_NSEC;
                /*
                 * If we expect to sleep more than a tick, mark the base idle:
                 */
index 7598e6ca817a8b9ed5ea6ec85781499f967d26f7..dbafc5df03f3f04705ff5739c2c0f51aed8ca6ee 100644 (file)
@@ -223,7 +223,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
        what |= MASK_TC_BIT(op_flags, META);
        what |= MASK_TC_BIT(op_flags, PREFLUSH);
        what |= MASK_TC_BIT(op_flags, FUA);
-       if (op == REQ_OP_DISCARD)
+       if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
                what |= BLK_TC_ACT(BLK_TC_DISCARD);
        if (op == REQ_OP_FLUSH)
                what |= BLK_TC_ACT(BLK_TC_FLUSH);
index dade4c9559cc036c1b6aa8567abf0b0887847923..7bc56762ca352fd47897690ef875bb4c05b7eed2 100644 (file)
@@ -5124,19 +5124,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        struct trace_iterator *iter = filp->private_data;
        ssize_t sret;
 
-       /* return any leftover data */
-       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
-       if (sret != -EBUSY)
-               return sret;
-
-       trace_seq_init(&iter->seq);
-
        /*
         * Avoid more than one consumer on a single file descriptor
         * This is just a matter of traces coherency, the ring buffer itself
         * is protected.
         */
        mutex_lock(&iter->mutex);
+
+       /* return any leftover data */
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               goto out;
+
+       trace_seq_init(&iter->seq);
+
        if (iter->trace->read) {
                sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
                if (sret)
@@ -6163,9 +6164,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                return -EBUSY;
 #endif
 
-       if (splice_grow_spd(pipe, &spd))
-               return -ENOMEM;
-
        if (*ppos & (PAGE_SIZE - 1))
                return -EINVAL;
 
@@ -6175,6 +6173,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                len &= PAGE_MASK;
        }
 
+       if (splice_grow_spd(pipe, &spd))
+               return -ENOMEM;
+
  again:
        trace_access_lock(iter->cpu_file);
        entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
@@ -6232,19 +6233,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        /* did we read anything? */
        if (!spd.nr_pages) {
                if (ret)
-                       return ret;
+                       goto out;
 
+               ret = -EAGAIN;
                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
-                       return -EAGAIN;
+                       goto out;
 
                ret = wait_on_pipe(iter, true);
                if (ret)
-                       return ret;
+                       goto out;
 
                goto again;
        }
 
        ret = splice_to_pipe(pipe, &spd);
+out:
        splice_shrink_spd(&spd);
 
        return ret;
index 2307d7c89dac972fae526627ce21ecdd087369de..cab7405f48d24f7048b1494f53caa157998b4cea 100644 (file)
@@ -821,7 +821,7 @@ config DETECT_HUNG_TASK
        help
          Say Y here to enable the kernel to detect "hung tasks",
          which are bugs that cause the task to be stuck in
-         uninterruptible "D" state indefinitiley.
+         uninterruptible "D" state indefinitely.
 
          When a hung task is detected, the kernel will print the
          current stack trace (which you should report), but the
@@ -1686,24 +1686,6 @@ config LATENCYTOP
          Enable this option if you want to use the LatencyTOP tool
          to find out which userspace is blocking on what kernel operations.
 
-config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-       bool
-
-config DEBUG_STRICT_USER_COPY_CHECKS
-       bool "Strict user copy size checks"
-       depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-       depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
-       help
-         Enabling this option turns a certain set of sanity checks for user
-         copy operations into compile time failures.
-
-         The copy_from_user() etc checks are there to help test if there
-         are sufficient security checks on the length argument of
-         the copy operation, by having gcc prove that the argument is
-         within bounds.
-
-         If unsure, say N.
-
 source kernel/trace/Kconfig
 
 menu "Runtime Testing"
index cfa68eb269e4b6628221e436f88c3ff96e9c62d9..5dc77a8ec297ec478c003e894af206956a39fb4c 100644 (file)
@@ -24,7 +24,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
         earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o
 
-obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
 lib-$(CONFIG_HAS_DMA) += dma-noop.o
index 9e8c7386b3a03e8d418dd09847c9c7f295e06e77..7e3138cfc8c9c72858534b10ed58e4ef4c5e72f7 100644 (file)
@@ -290,26 +290,6 @@ done:
        return wanted - bytes;
 }
 
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-       if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-               return fault_in_pages_readable(buf, bytes);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
 /*
  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
  * bytes.  For each iovec, fault in each page that constitutes the iovec.
@@ -317,7 +297,7 @@ EXPORT_SYMBOL(iov_iter_fault_in_readable);
  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
  * because it is an invalid address).
  */
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 {
        size_t skip = i->iov_offset;
        const struct iovec *iov;
@@ -334,7 +314,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
        }
        return 0;
 }
-EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
 
 void iov_iter_init(struct iov_iter *i, int direction,
                        const struct iovec *iov, unsigned long nr_segs,
index 1b7bf73141418f5f1427b14e806c9a9d4c6e7582..91f0727e3cada11bd721afdeeb112dd196546637 100644 (file)
@@ -105,10 +105,10 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
 
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
        if (radix_tree_is_internal_node(entry)) {
-               unsigned long siboff = get_slot_offset(parent, entry);
-               if (siboff < RADIX_TREE_MAP_SIZE) {
-                       offset = siboff;
-                       entry = rcu_dereference_raw(parent->slots[offset]);
+               if (is_sibling_entry(parent, entry)) {
+                       void **sibentry = (void **) entry_to_node(entry);
+                       offset = get_slot_offset(parent, sibentry);
+                       entry = rcu_dereference_raw(*sibentry);
                }
        }
 #endif
index 5d845ffd7982770d39af65b6c8906ef7000a4b95..56054e541a0f92fb69d1de4502c4230d8b4f6c3d 100644 (file)
@@ -30,7 +30,7 @@
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
-#define BUCKET_LOCKS_PER_CPU   128UL
+#define BUCKET_LOCKS_PER_CPU   32UL
 
 static u32 head_hashfn(struct rhashtable *ht,
                       const struct bucket_table *tbl,
@@ -70,21 +70,25 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
        unsigned int nr_pcpus = num_possible_cpus();
 #endif
 
-       nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
+       nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
        size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
 
        /* Never allocate more than 0.5 locks per bucket */
        size = min_t(unsigned int, size, tbl->size >> 1);
 
        if (sizeof(spinlock_t) != 0) {
+               tbl->locks = NULL;
 #ifdef CONFIG_NUMA
                if (size * sizeof(spinlock_t) > PAGE_SIZE &&
                    gfp == GFP_KERNEL)
                        tbl->locks = vmalloc(size * sizeof(spinlock_t));
-               else
 #endif
-               tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
-                                          gfp);
+               if (gfp != GFP_KERNEL)
+                       gfp |= __GFP_NOWARN | __GFP_NORETRY;
+
+               if (!tbl->locks)
+                       tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
+                                                  gfp);
                if (!tbl->locks)
                        return -ENOMEM;
                for (i = 0; i < size; i++)
@@ -321,12 +325,14 @@ static int rhashtable_expand(struct rhashtable *ht)
 static int rhashtable_shrink(struct rhashtable *ht)
 {
        struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
-       unsigned int size;
+       unsigned int nelems = atomic_read(&ht->nelems);
+       unsigned int size = 0;
        int err;
 
        ASSERT_RHT_MUTEX(ht);
 
-       size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
+       if (nelems)
+               size = roundup_pow_of_two(nelems * 3 / 2);
        if (size < ht->p.min_size)
                size = ht->p.min_size;
 
index 33f655ef48cd1668604a1f60c4a97e4acbf52508..9c5fe81104135364bca9f2b0da47f4e2a1ed51fc 100644 (file)
@@ -40,8 +40,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
                unsigned long c, data;
 
                /* Fall back to byte-at-a-time if we get a page fault */
-               if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
-                       break;
+               unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
                *(unsigned long *)(dst+res) = c;
                if (has_zero(c, &data, &constants)) {
                        data = prep_zero_mask(c, data, &constants);
@@ -56,8 +56,7 @@ byte_at_a_time:
        while (max) {
                char c;
 
-               if (unlikely(unsafe_get_user(c,src+res)))
-                       return -EFAULT;
+               unsafe_get_user(c,src+res, efault);
                dst[res] = c;
                if (!c)
                        return res;
@@ -76,6 +75,7 @@ byte_at_a_time:
         * Nope: we hit the address space limit, and we still had more
         * characters the caller would have wanted. That's an EFAULT.
         */
+efault:
        return -EFAULT;
 }
 
index 2625943625d7fb229e6e2cf104e5d84c95246ffa..8e105ed4df12bb6bb0a170afff54d979c15d73c0 100644 (file)
@@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
        src -= align;
        max += align;
 
-       if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
-               return 0;
+       unsafe_get_user(c, (unsigned long __user *)src, efault);
        c |= aligned_byte_mask(align);
 
        for (;;) {
@@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
                if (unlikely(max <= sizeof(unsigned long)))
                        break;
                max -= sizeof(unsigned long);
-               if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
-                       return 0;
+               unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
        }
        res -= align;
 
@@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
         * Nope: we hit the address space limit, and we still had more
         * characters the caller would have wanted. That's 0.
         */
+efault:
        return 0;
 }
 
index 66c5fc8351e84d703373a3572d6afaea84c410da..cac20c5fb304fb09a5803de381c4fd6b0c1b367d 100644 (file)
@@ -143,7 +143,7 @@ static int __init
 test_hash_init(void)
 {
        char buf[SIZE+1];
-       u32 string_or = 0, hash_or[2][33] = { 0 };
+       u32 string_or = 0, hash_or[2][33] = { { 0, } };
        unsigned tests = 0;
        unsigned long long h64 = 0;
        int i, j;
@@ -219,21 +219,27 @@ test_hash_init(void)
        }
 
        /* Issue notices about skipped tests. */
-#ifndef HAVE_ARCH__HASH_32
-       pr_info("__hash_32() has no arch implementation to test.");
-#elif HAVE_ARCH__HASH_32 != 1
+#ifdef HAVE_ARCH__HASH_32
+#if HAVE_ARCH__HASH_32 != 1
        pr_info("__hash_32() is arch-specific; not compared to generic.");
 #endif
-#ifndef HAVE_ARCH_HASH_32
-       pr_info("hash_32() has no arch implementation to test.");
-#elif HAVE_ARCH_HASH_32 != 1
+#else
+       pr_info("__hash_32() has no arch implementation to test.");
+#endif
+#ifdef HAVE_ARCH_HASH_32
+#if HAVE_ARCH_HASH_32 != 1
        pr_info("hash_32() is arch-specific; not compared to generic.");
 #endif
-#ifndef HAVE_ARCH_HASH_64
-       pr_info("hash_64() has no arch implementation to test.");
-#elif HAVE_ARCH_HASH_64 != 1
+#else
+       pr_info("hash_32() has no arch implementation to test.");
+#endif
+#ifdef HAVE_ARCH_HASH_64
+#if HAVE_ARCH_HASH_64 != 1
        pr_info("hash_64() is arch-specific; not compared to generic.");
 #endif
+#else
+       pr_info("hash_64() has no arch implementation to test.");
+#endif
 
        pr_notice("%u tests passed.", tests);
 
index 297fdb5e74bd05bc4a3258c90465510463cbf8fb..64e899b633371d252deaf057156408a8ea8f4625 100644 (file)
@@ -38,7 +38,7 @@ MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
 
 static int max_size = 0;
 module_param(max_size, int, 0);
-MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)");
+MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
 
 static bool shrinking = false;
 module_param(shrinking, bool, 0);
diff --git a/lib/usercopy.c b/lib/usercopy.c
deleted file mode 100644 (file)
index 4f5b1dd..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <linux/export.h>
-#include <linux/bug.h>
-#include <linux/uaccess.h>
-
-void copy_from_user_overflow(void)
-{
-       WARN(1, "Buffer overflow detected!\n");
-}
-EXPORT_SYMBOL(copy_from_user_overflow);
index 78a23c5c302d96ad6ef1198a02deb39a9a6bc228..be0ee11fa0d9ee8ff068244a559a2c52ad96c84c 100644 (file)
@@ -262,7 +262,14 @@ config COMPACTION
        select MIGRATION
        depends on MMU
        help
-         Allows the compaction of memory for the allocation of huge pages.
+          Compaction is the only memory management component to form
+          high order (larger physically contiguous) memory blocks
+          reliably. The page allocator relies on compaction heavily and
+          the lack of the feature can lead to unexpected OOM killer
+          invocations for high order memory requests. You shouldn't
+          disable this option unless there really is a strong reason for
+          it and then we would be really interested to hear about that at
+          linux-mm@kvack.org.
 
 #
 # support for page migration
index fc059666c760e179db0a070926a1e8204f5e9c92..2ca1faf3fa09038feaeea4fb4adbe5ea6717df30 100644 (file)
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
 KCOV_INSTRUMENT_mmzone.o := n
 KCOV_INSTRUMENT_vmstat.o := n
 
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
 mmu-y                  := nommu.o
 mmu-$(CONFIG_MMU)      := gup.o highmem.o memory.o mincore.o \
                           mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -99,3 +102,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
 obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
 obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
index 8865bfb41b0b6e514a8f297c335a8682763bad6a..74c7cae4f6837dcb5a599df800e7300ff851519c 100644 (file)
@@ -42,9 +42,11 @@ const struct trace_print_flags vmaflag_names[] = {
 
 void __dump_page(struct page *page, const char *reason)
 {
+       int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+
        pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
-                 page, page_ref_count(page), page_mapcount(page),
-                 page->mapping, page->index);
+                 page, page_ref_count(page), mapcount,
+                 page->mapping, page_to_pgoff(page));
        if (PageCompound(page))
                pr_cont(" compound_mapcount: %d", compound_mapcount(page));
        pr_cont("\n");
index 2373f0a7d3405bb869ac4a3a4be175289af38114..53ae6d00656aced3b21ebe1af8215fc1112af5eb 100644 (file)
@@ -1078,7 +1078,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                goto out;
 
        page = pmd_page(*pmd);
-       VM_BUG_ON_PAGE(!PageHead(page), page);
+       VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_TOUCH)
                touch_pmd(vma, addr, pmd);
        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
@@ -1116,7 +1116,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        }
 skip_mlock:
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
-       VM_BUG_ON_PAGE(!PageCompound(page), page);
+       VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_GET)
                get_page(page);
 
@@ -1138,9 +1138,6 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
        bool was_writable;
        int flags = 0;
 
-       /* A PROT_NONE fault should not end up here */
-       BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
-
        fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
        if (unlikely(!pmd_same(pmd, *fe->pmd)))
                goto out_unlock;
@@ -1512,7 +1509,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        struct page *page;
        pgtable_t pgtable;
        pmd_t _pmd;
-       bool young, write, dirty;
+       bool young, write, dirty, soft_dirty;
        unsigned long addr;
        int i;
 
@@ -1546,6 +1543,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        write = pmd_write(*pmd);
        young = pmd_young(*pmd);
        dirty = pmd_dirty(*pmd);
+       soft_dirty = pmd_soft_dirty(*pmd);
 
        pmdp_huge_split_prepare(vma, haddr, pmd);
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -1562,6 +1560,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        swp_entry_t swp_entry;
                        swp_entry = make_migration_entry(page + i, write);
                        entry = swp_entry_to_pte(swp_entry);
+                       if (soft_dirty)
+                               entry = pte_swp_mksoft_dirty(entry);
                } else {
                        entry = mk_pte(page + i, vma->vm_page_prot);
                        entry = maybe_mkwrite(entry, vma);
@@ -1569,6 +1569,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                                entry = pte_wrprotect(entry);
                        if (!young)
                                entry = pte_mkold(entry);
+                       if (soft_dirty)
+                               entry = pte_mksoft_dirty(entry);
                }
                if (dirty)
                        SetPageDirty(page + i);
index b9aa1b0b38b0ecdb769cd33a8d77c0ffda2621bf..87e11d8ad536b8c360740ca9ce96461b0daeaaee 100644 (file)
@@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page)
                list_del(&page->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
+               h->max_huge_pages--;
                update_and_free_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
index b6728a33a4aca104fde8022b90fdf2df5630af31..baabaad4a4aaa89bb13fc691cf5df58af46c8b3b 100644 (file)
@@ -217,11 +217,8 @@ void quarantine_reduce(void)
        new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
                QUARANTINE_FRACTION;
        percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
-       if (WARN_ONCE(new_quarantine_size < percpu_quarantines,
-               "Too little memory, disabling global KASAN quarantine.\n"))
-               new_quarantine_size = 0;
-       else
-               new_quarantine_size -= percpu_quarantines;
+       new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
+               0 : new_quarantine_size - percpu_quarantines;
        WRITE_ONCE(quarantine_size, new_quarantine_size);
 
        last = global_quarantine.head;
index 79c52d0061af591b0417e5d67462f49fda8ac632..728d7790dc2da27175d403f314a359cec70da5fc 100644 (file)
@@ -838,7 +838,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
  * value (scan code).
  */
 
-static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
+static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
+               struct vm_area_struct **vmap)
 {
        struct vm_area_struct *vma;
        unsigned long hstart, hend;
@@ -846,7 +847,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
        if (unlikely(khugepaged_test_exit(mm)))
                return SCAN_ANY_PROCESS;
 
-       vma = find_vma(mm, address);
+       *vmap = vma = find_vma(mm, address);
        if (!vma)
                return SCAN_VMA_NULL;
 
@@ -881,6 +882,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                .pmd = pmd,
        };
 
+       /* we only decide to swapin, if there is enough young ptes */
+       if (referenced < HPAGE_PMD_NR/2) {
+               trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
+               return false;
+       }
        fe.pte = pte_offset_map(pmd, address);
        for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
                        fe.pte++, fe.address += PAGE_SIZE) {
@@ -888,17 +894,12 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                if (!is_swap_pte(pteval))
                        continue;
                swapped_in++;
-               /* we only decide to swapin, if there is enough young ptes */
-               if (referenced < HPAGE_PMD_NR/2) {
-                       trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
-                       return false;
-               }
                ret = do_swap_page(&fe, pteval);
 
                /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
                if (ret & VM_FAULT_RETRY) {
                        down_read(&mm->mmap_sem);
-                       if (hugepage_vma_revalidate(mm, address)) {
+                       if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
                                /* vma is no longer available, don't continue to swapin */
                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
                                return false;
@@ -923,7 +924,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 static void collapse_huge_page(struct mm_struct *mm,
                                   unsigned long address,
                                   struct page **hpage,
-                                  struct vm_area_struct *vma,
                                   int node, int referenced)
 {
        pmd_t *pmd, _pmd;
@@ -933,6 +933,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        spinlock_t *pmd_ptl, *pte_ptl;
        int isolated = 0, result = 0;
        struct mem_cgroup *memcg;
+       struct vm_area_struct *vma;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
        gfp_t gfp;
@@ -961,7 +962,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        }
 
        down_read(&mm->mmap_sem);
-       result = hugepage_vma_revalidate(mm, address);
+       result = hugepage_vma_revalidate(mm, address, &vma);
        if (result) {
                mem_cgroup_cancel_charge(new_page, memcg, true);
                up_read(&mm->mmap_sem);
@@ -994,7 +995,7 @@ static void collapse_huge_page(struct mm_struct *mm,
         * handled by the anon_vma lock + PG_lock.
         */
        down_write(&mm->mmap_sem);
-       result = hugepage_vma_revalidate(mm, address);
+       result = hugepage_vma_revalidate(mm, address, &vma);
        if (result)
                goto out;
        /* check if the pmd is still valid */
@@ -1202,7 +1203,7 @@ out_unmap:
        if (ret) {
                node = khugepaged_find_target_node();
                /* collapse_huge_page will return with the mmap_sem released */
-               collapse_huge_page(mm, address, hpage, vma, node, referenced);
+               collapse_huge_page(mm, address, hpage, node, referenced);
        }
 out:
        trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
index 66beca1ad92ffcc16c3636c4e7e54c46bc89cc3f..4be518d4e68ab437549693161c189c7caefe87ae 100644 (file)
@@ -1740,17 +1740,22 @@ static DEFINE_MUTEX(percpu_charge_mutex);
 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        struct memcg_stock_pcp *stock;
+       unsigned long flags;
        bool ret = false;
 
        if (nr_pages > CHARGE_BATCH)
                return ret;
 
-       stock = &get_cpu_var(memcg_stock);
+       local_irq_save(flags);
+
+       stock = this_cpu_ptr(&memcg_stock);
        if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
                stock->nr_pages -= nr_pages;
                ret = true;
        }
-       put_cpu_var(memcg_stock);
+
+       local_irq_restore(flags);
+
        return ret;
 }
 
@@ -1771,15 +1776,18 @@ static void drain_stock(struct memcg_stock_pcp *stock)
        stock->cached = NULL;
 }
 
-/*
- * This must be called under preempt disabled or must be called by
- * a thread which is pinned to local cpu.
- */
 static void drain_local_stock(struct work_struct *dummy)
 {
-       struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
+       struct memcg_stock_pcp *stock;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       stock = this_cpu_ptr(&memcg_stock);
        drain_stock(stock);
        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
+
+       local_irq_restore(flags);
 }
 
 /*
@@ -1788,14 +1796,19 @@ static void drain_local_stock(struct work_struct *dummy)
  */
 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
-       struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
+       struct memcg_stock_pcp *stock;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
+       stock = this_cpu_ptr(&memcg_stock);
        if (stock->cached != memcg) { /* reset if necessary */
                drain_stock(stock);
                stock->cached = memcg;
        }
        stock->nr_pages += nr_pages;
-       put_cpu_var(memcg_stock);
+
+       local_irq_restore(flags);
 }
 
 /*
@@ -2337,8 +2350,11 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
                return 0;
 
        memcg = get_mem_cgroup_from_mm(current->mm);
-       if (!mem_cgroup_is_root(memcg))
+       if (!mem_cgroup_is_root(memcg)) {
                ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+               if (!ret)
+                       __SetPageKmemcg(page);
+       }
        css_put(&memcg->css);
        return ret;
 }
@@ -2365,6 +2381,11 @@ void memcg_kmem_uncharge(struct page *page, int order)
                page_counter_uncharge(&memcg->memsw, nr_pages);
 
        page->mem_cgroup = NULL;
+
+       /* slab pages do not have PageKmemcg flag set */
+       if (PageKmemcg(page))
+               __ClearPageKmemcg(page);
+
        css_put_many(&memcg->css, nr_pages);
 }
 #endif /* !CONFIG_SLOB */
@@ -4069,14 +4090,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
 
 static DEFINE_IDR(mem_cgroup_idr);
 
-static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
 {
-       atomic_inc(&memcg->id.ref);
+       atomic_add(n, &memcg->id.ref);
 }
 
-static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
 {
-       if (atomic_dec_and_test(&memcg->id.ref)) {
+       if (atomic_sub_and_test(n, &memcg->id.ref)) {
                idr_remove(&mem_cgroup_idr, memcg->id.id);
                memcg->id.id = 0;
 
@@ -4085,6 +4106,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg)
        }
 }
 
+static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+       mem_cgroup_id_get_many(memcg, 1);
+}
+
+static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+       mem_cgroup_id_put_many(memcg, 1);
+}
+
 /**
  * mem_cgroup_from_id - look up a memcg from a memcg id
  * @id: the memcg id to look up
@@ -4719,6 +4750,8 @@ static void __mem_cgroup_clear_mc(void)
                if (!mem_cgroup_is_root(mc.from))
                        page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
 
+               mem_cgroup_id_put_many(mc.from, mc.moved_swap);
+
                /*
                 * we charged both to->memory and to->memsw, so we
                 * should uncharge to->memory.
@@ -4726,9 +4759,9 @@ static void __mem_cgroup_clear_mc(void)
                if (!mem_cgroup_is_root(mc.to))
                        page_counter_uncharge(&mc.to->memory, mc.moved_swap);
 
-               css_put_many(&mc.from->css, mc.moved_swap);
+               mem_cgroup_id_get_many(mc.to, mc.moved_swap);
+               css_put_many(&mc.to->css, mc.moved_swap);
 
-               /* we've already done css_get(mc.to) */
                mc.moved_swap = 0;
        }
        memcg_oom_recover(from);
@@ -5537,8 +5570,10 @@ static void uncharge_list(struct list_head *page_list)
                        else
                                nr_file += nr_pages;
                        pgpgout++;
-               } else
+               } else {
                        nr_kmem += 1 << compound_order(page);
+                       __ClearPageKmemcg(page);
+               }
 
                page->mem_cgroup = NULL;
        } while (next != page_list);
@@ -5781,6 +5816,24 @@ static int __init mem_cgroup_init(void)
 subsys_initcall(mem_cgroup_init);
 
 #ifdef CONFIG_MEMCG_SWAP
+static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
+{
+       while (!atomic_inc_not_zero(&memcg->id.ref)) {
+               /*
+                * The root cgroup cannot be destroyed, so it's refcount must
+                * always be >= 1.
+                */
+               if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
+                       VM_BUG_ON(1);
+                       break;
+               }
+               memcg = parent_mem_cgroup(memcg);
+               if (!memcg)
+                       memcg = root_mem_cgroup;
+       }
+       return memcg;
+}
+
 /**
  * mem_cgroup_swapout - transfer a memsw charge to swap
  * @page: page whose memsw charge to transfer
@@ -5790,7 +5843,7 @@ subsys_initcall(mem_cgroup_init);
  */
 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 {
-       struct mem_cgroup *memcg;
+       struct mem_cgroup *memcg, *swap_memcg;
        unsigned short oldid;
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5805,16 +5858,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!memcg)
                return;
 
-       mem_cgroup_id_get(memcg);
-       oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+       /*
+        * In case the memcg owning these pages has been offlined and doesn't
+        * have an ID allocated to it anymore, charge the closest online
+        * ancestor for the swap instead and transfer the memory+swap charge.
+        */
+       swap_memcg = mem_cgroup_id_get_online(memcg);
+       oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
        VM_BUG_ON_PAGE(oldid, page);
-       mem_cgroup_swap_statistics(memcg, true);
+       mem_cgroup_swap_statistics(swap_memcg, true);
 
        page->mem_cgroup = NULL;
 
        if (!mem_cgroup_is_root(memcg))
                page_counter_uncharge(&memcg->memory, 1);
 
+       if (memcg != swap_memcg) {
+               if (!mem_cgroup_is_root(swap_memcg))
+                       page_counter_charge(&swap_memcg->memsw, 1);
+               page_counter_uncharge(&memcg->memsw, 1);
+       }
+
        /*
         * Interrupts should be disabled here because the caller holds the
         * mapping->tree_lock lock which is taken with interrupts-off. It is
@@ -5853,11 +5917,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
        if (!memcg)
                return 0;
 
+       memcg = mem_cgroup_id_get_online(memcg);
+
        if (!mem_cgroup_is_root(memcg) &&
-           !page_counter_try_charge(&memcg->swap, 1, &counter))
+           !page_counter_try_charge(&memcg->swap, 1, &counter)) {
+               mem_cgroup_id_put(memcg);
                return -ENOMEM;
+       }
 
-       mem_cgroup_id_get(memcg);
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
index 83be99d9d8a15006e2cb267a3dd46e10e470f66e..793fe0f9841c09ce87b1d0e0a43f1b416ead6390 100644 (file)
@@ -3351,9 +3351,6 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
        bool was_writable = pte_write(pte);
        int flags = 0;
 
-       /* A PROT_NONE fault should not end up here */
-       BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
-
        /*
        * The "pte" at this point cannot be used safely without
        * validation through pte_unmap_same(). It's of NUMA type but
@@ -3458,6 +3455,11 @@ static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd)
        return VM_FAULT_FALLBACK;
 }
 
+static inline bool vma_is_accessible(struct vm_area_struct *vma)
+{
+       return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
+}
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3524,7 +3526,7 @@ static int handle_pte_fault(struct fault_env *fe)
        if (!pte_present(entry))
                return do_swap_page(fe, entry);
 
-       if (pte_protnone(entry))
+       if (pte_protnone(entry) && vma_is_accessible(fe->vma))
                return do_numa_page(fe, entry);
 
        fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
@@ -3590,7 +3592,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 
                barrier();
                if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
-                       if (pmd_protnone(orig_pmd))
+                       if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
                                return do_huge_pmd_numa_page(&fe, orig_pmd);
 
                        if ((fe.flags & FAULT_FLAG_WRITE) &&
index 3894b65b155555f11076f0cae90f71e2475b6929..b58906b6215cbb3d2b43abbcdb758e0809c42c7f 100644 (file)
@@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 
        /* init node's zones as empty zones, we don't have any present pages.*/
        free_area_init_node(nid, zones_size, start_pfn, zholes_size);
+       pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
 
        /*
         * The node we allocated has no zone fallback lists. For avoiding
@@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
 {
        arch_refresh_nodedata(nid, NULL);
+       free_percpu(pgdat->per_cpu_nodestats);
        arch_free_nodedata(pgdat);
        return;
 }
@@ -1565,7 +1567,9 @@ static struct page *new_node_page(struct page *page, unsigned long private,
                return alloc_huge_page_node(page_hstate(compound_head(page)),
                                        next_node_in(nid, nmask));
 
-       node_clear(nid, nmask);
+       if (nid != next_node_in(nid, nmask))
+               node_clear(nid, nmask);
+
        if (PageHighMem(page)
            || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
                gfp_mask |= __GFP_HIGHMEM;
index d8c4e38fb5f4be1d9748dc77f214c8a285374f8d..2da72a5b6ecc04f87bd9168fc31d613360e40f2e 100644 (file)
@@ -2336,6 +2336,23 @@ out:
        return ret;
 }
 
+/*
+ * Drop the (possibly final) reference to task->mempolicy.  It needs to be
+ * dropped after task->mempolicy is set to NULL so that any allocation done as
+ * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
+ * policy.
+ */
+void mpol_put_task_policy(struct task_struct *task)
+{
+       struct mempolicy *pol;
+
+       task_lock(task);
+       pol = task->mempolicy;
+       task->mempolicy = NULL;
+       task_unlock(task);
+       mpol_put(pol);
+}
+
 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
 {
        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
index 7d0a275df822e9e14c55e5d472cfc473ac3ae173..d53a9aa00977cbd0f81970e9e8a30b011cc73f31 100644 (file)
@@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task)
 {
        struct mm_struct *mm = task->mm;
        struct task_struct *p;
-       bool ret;
+       bool ret = true;
 
        /*
         * Skip tasks without mm because it might have passed its exit_mm and
index fb975cec351821151a422fb34171121f67459228..a2214c64ed3cd04dceaed7a579f593852e458df1 100644 (file)
@@ -1008,10 +1008,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
        }
        if (PageMappingFlags(page))
                page->mapping = NULL;
-       if (memcg_kmem_enabled() && PageKmemcg(page)) {
+       if (memcg_kmem_enabled() && PageKmemcg(page))
                memcg_kmem_uncharge(page, order);
-               __ClearPageKmemcg(page);
-       }
        if (check_free)
                bad += free_pages_check(page);
        if (bad)
@@ -3139,54 +3137,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        return NULL;
 }
 
-static inline bool
-should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
-                    enum compact_result compact_result,
-                    enum compact_priority *compact_priority,
-                    int compaction_retries)
-{
-       int max_retries = MAX_COMPACT_RETRIES;
-
-       if (!order)
-               return false;
-
-       /*
-        * compaction considers all the zone as desperately out of memory
-        * so it doesn't really make much sense to retry except when the
-        * failure could be caused by insufficient priority
-        */
-       if (compaction_failed(compact_result)) {
-               if (*compact_priority > MIN_COMPACT_PRIORITY) {
-                       (*compact_priority)--;
-                       return true;
-               }
-               return false;
-       }
-
-       /*
-        * make sure the compaction wasn't deferred or didn't bail out early
-        * due to locks contention before we declare that we should give up.
-        * But do not retry if the given zonelist is not suitable for
-        * compaction.
-        */
-       if (compaction_withdrawn(compact_result))
-               return compaction_zonelist_suitable(ac, order, alloc_flags);
-
-       /*
-        * !costly requests are much more important than __GFP_REPEAT
-        * costly ones because they are de facto nofail and invoke OOM
-        * killer to move on while costly can fail and users are ready
-        * to cope with that. 1/4 retries is rather arbitrary but we
-        * would need much more detailed feedback from compaction to
-        * make a better decision.
-        */
-       if (order > PAGE_ALLOC_COSTLY_ORDER)
-               max_retries /= 4;
-       if (compaction_retries <= max_retries)
-               return true;
-
-       return false;
-}
 #else
 static inline struct page *
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
@@ -3197,6 +3147,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        return NULL;
 }
 
+#endif /* CONFIG_COMPACTION */
+
 static inline bool
 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
                     enum compact_result compact_result,
@@ -3223,7 +3175,6 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
        }
        return false;
 }
-#endif /* CONFIG_COMPACTION */
 
 /* Perform direct synchronous page reclaim */
 static int
@@ -3756,12 +3707,10 @@ no_zone:
        }
 
 out:
-       if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
-               if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
-                       __free_pages(page, order);
-                       page = NULL;
-               } else
-                       __SetPageKmemcg(page);
+       if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
+           unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
+               __free_pages(page, order);
+               page = NULL;
        }
 
        if (kmemcheck_enabled && page)
@@ -4064,7 +4013,7 @@ long si_mem_available(void)
        int lru;
 
        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
-               pages[lru] = global_page_state(NR_LRU_BASE + lru);
+               pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
 
        for_each_zone(zone)
                wmark_low += zone->watermark[WMARK_LOW];
@@ -4411,7 +4360,7 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
        do {
                zone_type--;
                zone = pgdat->node_zones + zone_type;
-               if (populated_zone(zone)) {
+               if (managed_zone(zone)) {
                        zoneref_set_zone(zone,
                                &zonelist->_zonerefs[nr_zones++]);
                        check_highest_zone(zone_type);
@@ -4649,7 +4598,7 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
                for (j = 0; j < nr_nodes; j++) {
                        node = node_order[j];
                        z = &NODE_DATA(node)->node_zones[zone_type];
-                       if (populated_zone(z)) {
+                       if (managed_zone(z)) {
                                zoneref_set_zone(z,
                                        &zonelist->_zonerefs[pos++]);
                                check_highest_zone(zone_type);
@@ -4761,6 +4710,8 @@ int local_memory_node(int node)
 }
 #endif
 
+static void setup_min_unmapped_ratio(void);
+static void setup_min_slab_ratio(void);
 #else  /* CONFIG_NUMA */
 
 static void set_zonelist_order(void)
@@ -5882,9 +5833,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
                zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
 #ifdef CONFIG_NUMA
                zone->node = nid;
-               pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
-                                               / 100;
-               pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
 #endif
                zone->name = zone_names[j];
                zone->zone_pgdat = pgdat;
@@ -6805,6 +6753,12 @@ int __meminit init_per_zone_wmark_min(void)
        setup_per_zone_wmarks();
        refresh_zone_stat_thresholds();
        setup_per_zone_lowmem_reserve();
+
+#ifdef CONFIG_NUMA
+       setup_min_unmapped_ratio();
+       setup_min_slab_ratio();
+#endif
+
        return 0;
 }
 core_initcall(init_per_zone_wmark_min)
@@ -6846,43 +6800,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
 }
 
 #ifdef CONFIG_NUMA
+static void setup_min_unmapped_ratio(void)
+{
+       pg_data_t *pgdat;
+       struct zone *zone;
+
+       for_each_online_pgdat(pgdat)
+               pgdat->min_unmapped_pages = 0;
+
+       for_each_zone(zone)
+               zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
+                               sysctl_min_unmapped_ratio) / 100;
+}
+
+
 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
-       struct pglist_data *pgdat;
-       struct zone *zone;
        int rc;
 
        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
+       setup_min_unmapped_ratio();
+
+       return 0;
+}
+
+static void setup_min_slab_ratio(void)
+{
+       pg_data_t *pgdat;
+       struct zone *zone;
+
        for_each_online_pgdat(pgdat)
                pgdat->min_slab_pages = 0;
 
        for_each_zone(zone)
-               zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
-                               sysctl_min_unmapped_ratio) / 100;
-       return 0;
+               zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
+                               sysctl_min_slab_ratio) / 100;
 }
 
 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
-       struct pglist_data *pgdat;
-       struct zone *zone;
        int rc;
 
        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
-       for_each_online_pgdat(pgdat)
-               pgdat->min_slab_pages = 0;
+       setup_min_slab_ratio();
 
-       for_each_zone(zone)
-               zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
-                               sysctl_min_slab_ratio) / 100;
        return 0;
 }
 #endif
index 16bd82fad38c18f985ccf7092537c2cacee31427..eafe5ddc2b54ce969eebcff3c06729b4525ac339 100644 (file)
@@ -264,6 +264,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
        int ret;
        struct swap_info_struct *sis = page_swap_info(page);
 
+       BUG_ON(!PageSwapCache(page));
        if (sis->flags & SWP_FILE) {
                struct kiocb kiocb;
                struct file *swap_file = sis->swap_file;
@@ -337,6 +338,7 @@ int swap_readpage(struct page *page)
        int ret = 0;
        struct swap_info_struct *sis = page_swap_info(page);
 
+       BUG_ON(!PageSwapCache(page));
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageUptodate(page), page);
        if (frontswap_load(page) == 0) {
@@ -386,6 +388,7 @@ int swap_set_page_dirty(struct page *page)
 
        if (sis->flags & SWP_FILE) {
                struct address_space *mapping = sis->swap_file->f_mapping;
+               BUG_ON(!PageSwapCache(page));
                return mapping->a_ops->set_page_dirty(page);
        } else {
                return __set_page_dirty_no_writeback(page);
index 65ec288dc057e874a661fd372501454c32cc2bd4..c8a955b1297e0b60fb2efffdf3fc2bb71cfc4630 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dax.h>
 #include <linux/gfp.h>
 #include <linux/export.h>
 #include <linux/blkdev.h>
@@ -544,6 +545,14 @@ do_readahead(struct address_space *mapping, struct file *filp,
        if (!mapping || !mapping->a_ops)
                return -EINVAL;
 
+       /*
+        * Readahead doesn't make sense for DAX inodes, but we don't want it
+        * to report a failure either.  Instead, we just return success and
+        * don't do any work.
+        */
+       if (dax_mapping(mapping))
+               return 0;
+
        return force_page_cache_readahead(mapping, filp, index, nr);
 }
 
index 709bc83703b1bfef419fa674ef5b5e28f5d70f05..1ef36404e7b2d7daeef2061ff8f79524d7750bb9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound)
                VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
                __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
        } else {
-               if (PageTransCompound(page)) {
-                       VM_BUG_ON_PAGE(!PageLocked(page), page);
+               if (PageTransCompound(page) && page_mapping(page)) {
+                       VM_WARN_ON_ONCE(!PageLocked(page));
+
                        SetPageDoubleMap(compound_head(page));
                        if (PageMlocked(page))
                                clear_page_mlock(compound_head(page));
@@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
 {
        int i, nr = 1;
 
-       VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
+       VM_BUG_ON_PAGE(compound && !PageHead(page), page);
        lock_page_memcg(page);
 
        /* Hugepages are not counted in NR_FILE_MAPPED for now. */
index 7f7748a0f9e1f738fd1ffcaceccdee2ae54d8d35..971fc83e6402858dd558d0f04609ddad38373eb4 100644 (file)
@@ -270,7 +270,7 @@ bool shmem_charge(struct inode *inode, long pages)
                info->alloced -= pages;
                shmem_recalc_inode(inode);
                spin_unlock_irqrestore(&info->lock, flags);
-
+               shmem_unacct_blocks(info->flags, pages);
                return false;
        }
        percpu_counter_add(&sbinfo->used_blocks, pages);
@@ -291,6 +291,7 @@ void shmem_uncharge(struct inode *inode, long pages)
 
        if (sbinfo->max_blocks)
                percpu_counter_sub(&sbinfo->used_blocks, pages);
+       shmem_unacct_blocks(info->flags, pages);
 }
 
 /*
@@ -1980,7 +1981,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
                                return addr;
                        sb = shm_mnt->mnt_sb;
                }
-               if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
+               if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
                        return addr;
        }
 
@@ -3975,7 +3976,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
 
 struct kobj_attribute shmem_enabled_attr =
        __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
+#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
 
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
 bool shmem_huge_enabled(struct vm_area_struct *vma)
 {
        struct inode *inode = file_inode(vma->vm_file);
@@ -4006,7 +4009,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
                        return false;
        }
 }
-#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
+#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 
 #else /* !CONFIG_SHMEM */
 
index 261147ba156fb855c525975a0478d98096c7d0a2..b67271024135aff957f1361c3e327da41915b4c2 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4441,6 +4441,36 @@ static int __init slab_proc_init(void)
 module_init(slab_proc_init);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page)
+{
+       struct kmem_cache *cachep;
+       unsigned int objnr;
+       unsigned long offset;
+
+       /* Find and validate object. */
+       cachep = page->slab_cache;
+       objnr = obj_to_index(cachep, page, (void *)ptr);
+       BUG_ON(objnr >= cachep->num);
+
+       /* Find offset within object. */
+       offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+
+       /* Allow address range falling entirely within object size. */
+       if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+               return NULL;
+
+       return cachep->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 /**
  * ksize - get the actual amount of memory allocated for a given object
  * @objp: Pointer to the object
index 850737bdfbd82410dcd9e0e87d64ea808b0e39c7..9adae58462f8191b22659b1aa438ec637f6fc765 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
  */
 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
+       LIST_HEAD(discard);
        struct page *page, *h;
 
        BUG_ON(irqs_disabled());
@@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
        list_for_each_entry_safe(page, h, &n->partial, lru) {
                if (!page->inuse) {
                        remove_partial(n, page);
-                       discard_slab(s, page);
+                       list_add(&page->lru, &discard);
                } else {
                        list_slab_objects(s, page,
                        "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
        spin_unlock_irq(&n->list_lock);
+
+       list_for_each_entry_safe(page, h, &discard, lru)
+               discard_slab(s, page);
 }
 
 /*
@@ -3764,6 +3768,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page)
+{
+       struct kmem_cache *s;
+       unsigned long offset;
+       size_t object_size;
+
+       /* Find object and usable object size. */
+       s = page->slab_cache;
+       object_size = slab_ksize(s);
+
+       /* Reject impossible pointers. */
+       if (ptr < page_address(page))
+               return s->name;
+
+       /* Find offset within object. */
+       offset = (ptr - page_address(page)) % s->size;
+
+       /* Adjust for redzone and reject if within the redzone. */
+       if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+               if (offset < s->red_left_pad)
+                       return s->name;
+               offset -= s->red_left_pad;
+       }
+
+       /* Allow address range falling entirely within object size. */
+       if (offset <= object_size && n <= object_size - offset)
+               return NULL;
+
+       return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 static size_t __ksize(const void *object)
 {
        struct page *page;
index 78cfa292a29aa0780e5e83efd7b7733ce958e605..2657accc6e2b9c6a6a5c242cf8cd1629441b1f36 100644 (file)
@@ -2724,7 +2724,6 @@ int swapcache_prepare(swp_entry_t entry)
 struct swap_info_struct *page_swap_info(struct page *page)
 {
        swp_entry_t swap = { .val = page_private(page) };
-       BUG_ON(!PageSwapCache(page));
        return swap_info[swp_type(swap)];
 }
 
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644 (file)
index 0000000..3c8da0a
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
+ * which are designed to protect kernel memory from needless exposure
+ * and overwrite under many unintended conditions. This code is based
+ * on PAX_USERCOPY, which is:
+ *
+ * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
+ * Security Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+enum {
+       BAD_STACK = -1,
+       NOT_STACK = 0,
+       GOOD_FRAME,
+       GOOD_STACK,
+};
+
+/*
+ * Checks if a given pointer and length is contained by the current
+ * stack frame (if possible).
+ *
+ * Returns:
+ *     NOT_STACK: not at all on the stack
+ *     GOOD_FRAME: fully within a valid stack frame
+ *     GOOD_STACK: fully on the stack (when can't do frame-checking)
+ *     BAD_STACK: error condition (invalid stack position or bad stack frame)
+ */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+       const void * const stack = task_stack_page(current);
+       const void * const stackend = stack + THREAD_SIZE;
+       int ret;
+
+       /* Object is not on the stack at all. */
+       if (obj + len <= stack || stackend <= obj)
+               return NOT_STACK;
+
+       /*
+        * Reject: object partially overlaps the stack (passing the
+        * the check above means at least one end is within the stack,
+        * so if this check fails, the other end is outside the stack).
+        */
+       if (obj < stack || stackend < obj + len)
+               return BAD_STACK;
+
+       /* Check if object is safely within a valid frame. */
+       ret = arch_within_stack_frames(stack, stackend, obj, len);
+       if (ret)
+               return ret;
+
+       return GOOD_STACK;
+}
+
+static void report_usercopy(const void *ptr, unsigned long len,
+                           bool to_user, const char *type)
+{
+       pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+               to_user ? "exposure" : "overwrite",
+               to_user ? "from" : "to", ptr, type ? : "unknown", len);
+       /*
+        * For greater effect, it would be nice to do do_group_exit(),
+        * but BUG() actually hooks all the lock-breaking and per-arch
+        * Oops code, so that is used here instead.
+        */
+       BUG();
+}
+
+/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
+                    unsigned long high)
+{
+       unsigned long check_low = (uintptr_t)ptr;
+       unsigned long check_high = check_low + n;
+
+       /* Does not overlap if entirely above or entirely below. */
+       if (check_low >= high || check_high <= low)
+               return false;
+
+       return true;
+}
+
+/* Is this address range in the kernel text area? */
+static inline const char *check_kernel_text_object(const void *ptr,
+                                                  unsigned long n)
+{
+       unsigned long textlow = (unsigned long)_stext;
+       unsigned long texthigh = (unsigned long)_etext;
+       unsigned long textlow_linear, texthigh_linear;
+
+       if (overlaps(ptr, n, textlow, texthigh))
+               return "<kernel text>";
+
+       /*
+        * Some architectures have virtual memory mappings with a secondary
+        * mapping of the kernel text, i.e. there is more than one virtual
+        * kernel address that points to the kernel image. It is usually
+        * when there is a separate linear physical memory mapping, in that
+        * __pa() is not just the reverse of __va(). This can be detected
+        * and checked:
+        */
+       textlow_linear = (unsigned long)__va(__pa(textlow));
+       /* No different mapping: we're done. */
+       if (textlow_linear == textlow)
+               return NULL;
+
+       /* Check the secondary mapping... */
+       texthigh_linear = (unsigned long)__va(__pa(texthigh));
+       if (overlaps(ptr, n, textlow_linear, texthigh_linear))
+               return "<linear kernel text>";
+
+       return NULL;
+}
+
+static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+{
+       /* Reject if object wraps past end of memory. */
+       if ((unsigned long)ptr + n < (unsigned long)ptr)
+               return "<wrapped address>";
+
+       /* Reject if NULL or ZERO-allocation. */
+       if (ZERO_OR_NULL_PTR(ptr))
+               return "<null>";
+
+       return NULL;
+}
+
+/* Checks for allocs that are marked in some way as spanning multiple pages. */
+static inline const char *check_page_span(const void *ptr, unsigned long n,
+                                         struct page *page, bool to_user)
+{
+#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
+       const void *end = ptr + n - 1;
+       struct page *endpage;
+       bool is_reserved, is_cma;
+
+       /*
+        * Sometimes the kernel data regions are not marked Reserved (see
+        * check below). And sometimes [_sdata,_edata) does not cover
+        * rodata and/or bss, so check each range explicitly.
+        */
+
+       /* Allow reads of kernel rodata region (if not marked as Reserved). */
+       if (ptr >= (const void *)__start_rodata &&
+           end <= (const void *)__end_rodata) {
+               if (!to_user)
+                       return "<rodata>";
+               return NULL;
+       }
+
+       /* Allow kernel data region (if not marked as Reserved). */
+       if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
+               return NULL;
+
+       /* Allow kernel bss region (if not marked as Reserved). */
+       if (ptr >= (const void *)__bss_start &&
+           end <= (const void *)__bss_stop)
+               return NULL;
+
+       /* Is the object wholly within one base page? */
+       if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
+                  ((unsigned long)end & (unsigned long)PAGE_MASK)))
+               return NULL;
+
+       /* Allow if fully inside the same compound (__GFP_COMP) page. */
+       endpage = virt_to_head_page(end);
+       if (likely(endpage == page))
+               return NULL;
+
+       /*
+        * Reject if range is entirely either Reserved (i.e. special or
+        * device memory), or CMA. Otherwise, reject since the object spans
+        * several independently allocated pages.
+        */
+       is_reserved = PageReserved(page);
+       is_cma = is_migrate_cma_page(page);
+       if (!is_reserved && !is_cma)
+               return "<spans multiple pages>";
+
+       for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
+               page = virt_to_head_page(ptr);
+               if (is_reserved && !PageReserved(page))
+                       return "<spans Reserved and non-Reserved pages>";
+               if (is_cma && !is_migrate_cma_page(page))
+                       return "<spans CMA and non-CMA pages>";
+       }
+#endif
+
+       return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+                                           bool to_user)
+{
+       struct page *page;
+
+       /*
+        * Some architectures (arm64) return true for virt_addr_valid() on
+        * vmalloced addresses. Work around this by checking for vmalloc
+        * first.
+        *
+        * We also need to check for module addresses explicitly since we
+        * may copy static data from modules to userspace
+        */
+       if (is_vmalloc_or_module_addr(ptr))
+               return NULL;
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+
+       /* Check slab allocator for flags and size. */
+       if (PageSlab(page))
+               return __check_heap_object(ptr, n, page);
+
+       /* Verify object does not incorrectly span multiple pages. */
+       return check_page_span(ptr, n, page, to_user);
+}
+
+/*
+ * Validates that the given object is:
+ * - not bogus address
+ * - known-safe heap or stack object
+ * - not in kernel text
+ */
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+       const char *err;
+
+       /* Skip all tests if size is zero. */
+       if (!n)
+               return;
+
+       /* Check for invalid addresses. */
+       err = check_bogus_address(ptr, n);
+       if (err)
+               goto report;
+
+       /* Check for bad heap object. */
+       err = check_heap_object(ptr, n, to_user);
+       if (err)
+               goto report;
+
+       /* Check for bad stack object. */
+       switch (check_stack_object(ptr, n)) {
+       case NOT_STACK:
+               /* Object is not touching the current process stack. */
+               break;
+       case GOOD_FRAME:
+       case GOOD_STACK:
+               /*
+                * Object is either in the correct frame (when it
+                * is possible to check) or just generally on the
+                * process stack (when frame checking not available).
+                */
+               return;
+       default:
+               err = "<process stack>";
+               goto report;
+       }
+
+       /* Check for object in kernel to avoid text exposure. */
+       err = check_kernel_text_object(ptr, n);
+       if (!err)
+               return;
+
+report:
+       report_usercopy(ptr, n, to_user, err);
+}
+EXPORT_SYMBOL(__check_object_size);
index 374d95d0417856b096d902d40ff7cc29d4021b2e..0fe8b7113868fa01f308331c48ebf36a4c1c87ac 100644 (file)
@@ -1665,7 +1665,7 @@ static bool inactive_reclaimable_pages(struct lruvec *lruvec,
 
        for (zid = sc->reclaim_idx; zid >= 0; zid--) {
                zone = &pgdat->node_zones[zid];
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
@@ -2036,7 +2036,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
                struct zone *zone = &pgdat->node_zones[zid];
                unsigned long inactive_zone, active_zone;
 
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                inactive_zone = zone_page_state(zone,
@@ -2171,7 +2171,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 
                for (z = 0; z < MAX_NR_ZONES; z++) {
                        struct zone *zone = &pgdat->node_zones[z];
-                       if (!populated_zone(zone))
+                       if (!managed_zone(zone))
                                continue;
 
                        total_high_wmark += high_wmark_pages(zone);
@@ -2303,23 +2303,6 @@ out:
        }
 }
 
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-static void init_tlb_ubc(void)
-{
-       /*
-        * This deliberately does not clear the cpumask as it's expensive
-        * and unnecessary. If there happens to be data in there then the
-        * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
-        * then will be cleared.
-        */
-       current->tlb_ubc.flush_required = false;
-}
-#else
-static inline void init_tlb_ubc(void)
-{
-}
-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
-
 /*
  * This is a basic per-node page freer.  Used by both kswapd and direct reclaim.
  */
@@ -2355,8 +2338,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
        scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
                         sc->priority == DEF_PRIORITY);
 
-       init_tlb_ubc();
-
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
@@ -2510,7 +2491,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
        /* If compaction would go ahead or the allocation would succeed, stop */
        for (z = 0; z <= sc->reclaim_idx; z++) {
                struct zone *zone = &pgdat->node_zones[z];
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
@@ -2840,7 +2821,7 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
 
        for (i = 0; i <= ZONE_NORMAL; i++) {
                zone = &pgdat->node_zones[i];
-               if (!populated_zone(zone) ||
+               if (!managed_zone(zone) ||
                    pgdat_reclaimable_pages(pgdat) == 0)
                        continue;
 
@@ -3141,7 +3122,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
        for (i = 0; i <= classzone_idx; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (!zone_balanced(zone, order, classzone_idx))
@@ -3169,7 +3150,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
        sc->nr_to_reclaim = 0;
        for (z = 0; z <= sc->reclaim_idx; z++) {
                zone = pgdat->node_zones + z;
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
@@ -3242,7 +3223,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
                if (buffer_heads_over_limit) {
                        for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
                                zone = pgdat->node_zones + i;
-                               if (!populated_zone(zone))
+                               if (!managed_zone(zone))
                                        continue;
 
                                sc.reclaim_idx = i;
@@ -3262,7 +3243,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
                 */
                for (i = classzone_idx; i >= 0; i--) {
                        zone = pgdat->node_zones + i;
-                       if (!populated_zone(zone))
+                       if (!managed_zone(zone))
                                continue;
 
                        if (zone_balanced(zone, sc.order, classzone_idx))
@@ -3508,7 +3489,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
        pg_data_t *pgdat;
        int z;
 
-       if (!populated_zone(zone))
+       if (!managed_zone(zone))
                return;
 
        if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
@@ -3522,7 +3503,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
        /* Only wake kswapd if all zones are unbalanced */
        for (z = 0; z <= classzone_idx; z++) {
                zone = pgdat->node_zones + z;
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (zone_balanced(zone, order, classzone_idx))
index 82a116ba590eb5d289a76f73ca09f1cab9b4fdf5..8de138d3306bdbe6f3164db323637ded5bf5b5b3 100644 (file)
@@ -169,7 +169,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
-       vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
+       vlan->nest_level = dev_get_nest_level(real_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 4acb1d5417aaf980bc7797c817eb9a9a350ecbf5..f24b25c25106fb55fb713b7308a8d43413a2143b 100644 (file)
@@ -507,8 +507,8 @@ err_out:
                /* wakeup anybody waiting for slots to pin pages */
                wake_up(&vp_wq);
        }
-       kfree(in_pages);
-       kfree(out_pages);
+       kvfree(in_pages);
+       kvfree(out_pages);
        return err;
 }
 
index 7d170010beb9050ffbb90aff463c16cde60e144d..ee08540ce503a1cc3fc350a693d87510cf51ab32 100644 (file)
@@ -335,7 +335,7 @@ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
                goto out;
 
        skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
-       elp_buff = skb_push(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+       elp_buff = skb_put(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
        elp_packet = (struct batadv_elp_packet *)elp_buff;
        memset(elp_packet, 0, BATADV_ELP_HLEN);
 
index 7602c001e92b0a382577cd45a8c05da7b3889456..3d199478c4054300ea0b83085116d839a64047f4 100644 (file)
@@ -469,6 +469,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
        return 0;
 }
 
+/**
+ * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
+ * @orig_node: originator node whose last bonding candidate should be retrieved
+ *
+ * Return: last bonding candidate of router or NULL if not found
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+static struct batadv_orig_ifinfo *
+batadv_last_bonding_get(struct batadv_orig_node *orig_node)
+{
+       struct batadv_orig_ifinfo *last_bonding_candidate;
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+       last_bonding_candidate = orig_node->last_bonding_candidate;
+
+       if (last_bonding_candidate)
+               kref_get(&last_bonding_candidate->refcount);
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+
+       return last_bonding_candidate;
+}
+
 /**
  * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
  * @orig_node: originator node whose bonding candidates should be replaced
@@ -539,7 +562,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
         * router - obviously there are no other candidates.
         */
        rcu_read_lock();
-       last_candidate = orig_node->last_bonding_candidate;
+       last_candidate = batadv_last_bonding_get(orig_node);
        if (last_candidate)
                last_cand_router = rcu_dereference(last_candidate->router);
 
@@ -631,6 +654,9 @@ next:
                batadv_orig_ifinfo_put(next_candidate);
        }
 
+       if (last_candidate)
+               batadv_orig_ifinfo_put(last_candidate);
+
        return router;
 }
 
index ece45e0683fd85af7d5b9dcffdc8b92d52a20460..0b5f729d08d2b41abcfcdb1d7322d3c1ce01fa89 100644 (file)
@@ -250,7 +250,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
        skb_free_datagram(sk, skb);
 
-       if (msg->msg_flags & MSG_TRUNC)
+       if (flags & MSG_TRUNC)
                copied = skblen;
 
        return err ? : copied;
index c045b3c54768e478f49f600fa8ffdae4a59456fc..b0e23dfc5c3402654f2586b4e29dac9966c1c841 100644 (file)
@@ -262,6 +262,8 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
                break;
        }
 
+       kfree_skb(hdev->req_skb);
+       hdev->req_skb = NULL;
        hdev->req_status = hdev->req_result = 0;
 
        BT_DBG("%s end: err %d", hdev->name, err);
index 6ef8a01a9ad44e90f37267cd2313a75de003e601..96f04b7b9556d020a64402179eecdf79f547e162 100644 (file)
@@ -1091,7 +1091,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 
        skb_free_datagram(sk, skb);
 
-       if (msg->msg_flags & MSG_TRUNC)
+       if (flags & MSG_TRUNC)
                copied = skblen;
 
        return err ? : copied;
index 54ceb1f2cc9a2a6026dec5bda6d9006191dcf053..d4cad29b033fc6d8601913013f291ab287648928 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/crc16.h>
+#include <linux/filter.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -5835,6 +5836,9 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
                if (chan->sdu)
                        break;
 
+               if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
+                       break;
+
                chan->sdu_len = get_unaligned_le16(skb->data);
                skb_pull(skb, L2CAP_SDULEN_SIZE);
 
@@ -6610,6 +6614,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
                goto drop;
        }
 
+       if ((chan->mode == L2CAP_MODE_ERTM ||
+            chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
+               goto drop;
+
        if (!control->sframe) {
                int err;
 
index 1842141baedb691c6bfb35b79090ed42223f1db4..a8ba752732c9859b060a3a91887fe1c9d5da0a54 100644 (file)
@@ -1019,7 +1019,7 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
                goto done;
 
        if (pi->rx_busy_skb) {
-               if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+               if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
                        pi->rx_busy_skb = NULL;
                else
                        goto done;
@@ -1270,7 +1270,17 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
                goto done;
        }
 
-       err = sock_queue_rcv_skb(sk, skb);
+       if (chan->mode != L2CAP_MODE_ERTM &&
+           chan->mode != L2CAP_MODE_STREAMING) {
+               /* Even if no filter is attached, we could potentially
+                * get errors from security modules, etc.
+                */
+               err = sk_filter(sk, skb);
+               if (err)
+                       goto done;
+       }
+
+       err = __sock_queue_rcv_skb(sk, skb);
 
        /* For ERTM, handle one skb that doesn't fit into the recv
         * buffer.  This is important to do because the data frames
index c18080ad408572f53df75e18e2b56f714f784edb..cd620fab41b07827b922ac2c8fd1cbc2d50b143a 100644 (file)
@@ -267,7 +267,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
 
        /* If old entry was unassociated with any port, then delete it. */
        f = __br_fdb_get(br, br->dev->dev_addr, 0);
-       if (f && f->is_local && !f->dst)
+       if (f && f->is_local && !f->dst && !f->added_by_user)
                fdb_delete_local(br, NULL, f);
 
        fdb_insert(br, NULL, newaddr, 0);
@@ -282,7 +282,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
                if (!br_vlan_should_use(v))
                        continue;
                f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
-               if (f && f->is_local && !f->dst)
+               if (f && f->is_local && !f->dst && !f->added_by_user)
                        fdb_delete_local(br, NULL, f);
                fdb_insert(br, NULL, newaddr, v->vid);
        }
@@ -764,20 +764,25 @@ out:
 }
 
 /* Update (create or replace) forwarding database entry */
-static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
-                        __u16 state, __u16 flags, __u16 vid)
+static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
+                        const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
 {
-       struct net_bridge *br = source->br;
        struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
        struct net_bridge_fdb_entry *fdb;
        bool modified = false;
 
        /* If the port cannot learn allow only local and static entries */
-       if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
+       if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
            !(source->state == BR_STATE_LEARNING ||
              source->state == BR_STATE_FORWARDING))
                return -EPERM;
 
+       if (!source && !(state & NUD_PERMANENT)) {
+               pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
+                       br->dev->name);
+               return -EINVAL;
+       }
+
        fdb = fdb_find(head, addr, vid);
        if (fdb == NULL) {
                if (!(flags & NLM_F_CREATE))
@@ -832,22 +837,28 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
        return 0;
 }
 
-static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
-              const unsigned char *addr, u16 nlh_flags, u16 vid)
+static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
+                       struct net_bridge_port *p, const unsigned char *addr,
+                       u16 nlh_flags, u16 vid)
 {
        int err = 0;
 
        if (ndm->ndm_flags & NTF_USE) {
+               if (!p) {
+                       pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
+                               br->dev->name);
+                       return -EINVAL;
+               }
                local_bh_disable();
                rcu_read_lock();
-               br_fdb_update(p->br, p, addr, vid, true);
+               br_fdb_update(br, p, addr, vid, true);
                rcu_read_unlock();
                local_bh_enable();
        } else {
-               spin_lock_bh(&p->br->hash_lock);
-               err = fdb_add_entry(p, addr, ndm->ndm_state,
+               spin_lock_bh(&br->hash_lock);
+               err = fdb_add_entry(br, p, addr, ndm->ndm_state,
                                    nlh_flags, vid);
-               spin_unlock_bh(&p->br->hash_lock);
+               spin_unlock_bh(&br->hash_lock);
        }
 
        return err;
@@ -884,6 +895,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                                dev->name);
                        return -EINVAL;
                }
+               br = p->br;
                vg = nbp_vlan_group(p);
        }
 
@@ -895,15 +907,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                }
 
                /* VID was specified, so use it. */
-               if (dev->priv_flags & IFF_EBRIDGE)
-                       err = br_fdb_insert(br, NULL, addr, vid);
-               else
-                       err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
        } else {
-               if (dev->priv_flags & IFF_EBRIDGE)
-                       err = br_fdb_insert(br, NULL, addr, 0);
-               else
-                       err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
                if (err || !vg || !vg->num_vlans)
                        goto out;
 
@@ -914,11 +920,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                list_for_each_entry(v, &vg->vlan_list, vlist) {
                        if (!br_vlan_should_use(v))
                                continue;
-                       if (dev->priv_flags & IFF_EBRIDGE)
-                               err = br_fdb_insert(br, NULL, addr, v->vid);
-                       else
-                               err = __br_fdb_add(ndm, p, addr, nlh_flags,
-                                                  v->vid);
+                       err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
                        if (err)
                                goto out;
                }
index 8e486203d133a7f9158f94fda6dbd03ace32c52b..abe11f085479c62d5ac969d8d2d13aa5a6f762db 100644 (file)
@@ -80,13 +80,10 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
 
        BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
 
-       if (dev->flags & IFF_NOARP)
+       if ((dev->flags & IFF_NOARP) ||
+           !pskb_may_pull(skb, arp_hdr_len(dev)))
                return;
 
-       if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
-               dev->stats.tx_dropped++;
-               return;
-       }
        parp = arp_hdr(skb);
 
        if (parp->ar_pro != htons(ETH_P_IP) ||
index a5423a1eec057a3b777e43a449dfff46c1f496fa..c5fea9393946f64af336873645db82d09599d442 100644 (file)
@@ -1138,7 +1138,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                } else {
                        err = br_ip6_multicast_add_group(br, port,
                                                         &grec->grec_mca, vid);
-                       if (!err)
+                       if (err)
                                break;
                }
        }
index cceac5bb658f2245bad77981afb7b8109d2ad0f2..0833c251aef7963dc471e4385282c491b2fb2340 100644 (file)
@@ -368,6 +368,8 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 
        match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
        if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+               if (!IS_ERR(match))
+                       module_put(match->me);
                request_module("ebt_%s", m->u.name);
                match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
        }
index 4b901d9f2e7cffe217881d4c5dc40d805ba87b9e..ad47a921b70152f7a802284a03ce690b03390d1c 100644 (file)
@@ -86,6 +86,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
        .init           = nft_meta_set_init,
        .destroy        = nft_meta_set_destroy,
        .dump           = nft_meta_set_dump,
+       .validate       = nft_meta_set_validate,
 };
 
 static const struct nft_expr_ops *
index c83326c5ba580480b877079d2a465430ff408cf5..ef34a02719d73147f4a4af29f3d861b4dc34391e 100644 (file)
@@ -574,7 +574,7 @@ static void complete_generic_request(struct ceph_mon_generic_request *req)
        put_generic_request(req);
 }
 
-void cancel_generic_request(struct ceph_mon_generic_request *req)
+static void cancel_generic_request(struct ceph_mon_generic_request *req)
 {
        struct ceph_mon_client *monc = req->monc;
        struct ceph_mon_generic_request *lookup_req;
index b5ec09612ff71daeb1b95ed4b6f939a172cc7545..a97e7b506612b4255f4b99de76d74c46a1b3896d 100644 (file)
@@ -4220,7 +4220,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
 
                pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
                                               GFP_NOIO);
-               if (!pages) {
+               if (IS_ERR(pages)) {
                        ceph_msg_put(m);
                        return NULL;
                }
index ca53c8319209469a25011b15d26af951a09d392d..22fb96efcf3467713a9e5430057ead684326e3db 100644 (file)
@@ -84,12 +84,6 @@ retry:
 }
 EXPORT_SYMBOL(ceph_find_or_create_string);
 
-static void ceph_free_string(struct rcu_head *head)
-{
-       struct ceph_string *cs = container_of(head, struct ceph_string, rcu);
-       kfree(cs);
-}
-
 void ceph_release_string(struct kref *ref)
 {
        struct ceph_string *cs = container_of(ref, struct ceph_string, kref);
@@ -101,7 +95,7 @@ void ceph_release_string(struct kref *ref)
        }
        spin_unlock(&string_tree_lock);
 
-       call_rcu(&cs->rcu, ceph_free_string);
+       kfree_rcu(cs, rcu);
 }
 EXPORT_SYMBOL(ceph_release_string);
 
index 4ce07dc25573ed3d20f181f5b36327cb0f407fe3..ea6312057a7136806fc0c17d018c8909d4d130ba 100644 (file)
@@ -3974,6 +3974,22 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
        return skb;
 }
 
+/**
+ *     netdev_is_rx_handler_busy - check if receive handler is registered
+ *     @dev: device to check
+ *
+ *     Check if a receive handler is already registered for a given device.
+ *     Return true if there one.
+ *
+ *     The caller must hold the rtnl_mutex.
+ */
+bool netdev_is_rx_handler_busy(struct net_device *dev)
+{
+       ASSERT_RTNL();
+       return dev && rtnl_dereference(dev->rx_handler);
+}
+EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
+
 /**
  *     netdev_rx_handler_register - register receive handler
  *     @dev: device to register a handler for
@@ -6045,8 +6061,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
 
-int dev_get_nest_level(struct net_device *dev,
-                      bool (*type_check)(const struct net_device *dev))
+int dev_get_nest_level(struct net_device *dev)
 {
        struct net_device *lower = NULL;
        struct list_head *iter;
@@ -6056,15 +6071,12 @@ int dev_get_nest_level(struct net_device *dev,
        ASSERT_RTNL();
 
        netdev_for_each_lower_dev(dev, lower, iter) {
-               nest = dev_get_nest_level(lower, type_check);
+               nest = dev_get_nest_level(lower);
                if (max_nest < nest)
                        max_nest = nest;
        }
 
-       if (type_check(dev))
-               max_nest++;
-
-       return max_nest;
+       return max_nest + 1;
 }
 EXPORT_SYMBOL(dev_get_nest_level);
 
index 5708999f8a7945ec738e2043057817403e1b64ef..cb06aceb512acd225eb909fd36055fbf2e9dbf51 100644 (file)
@@ -1355,56 +1355,47 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
 {
        int err;
 
-       if (!skb_cloned(skb))
-               return 0;
-       if (skb_clone_writable(skb, write_len))
-               return 0;
-       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-       if (!err)
-               bpf_compute_data_end(skb);
+       err = skb_ensure_writable(skb, write_len);
+       bpf_compute_data_end(skb);
+
        return err;
 }
 
+static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
+{
+       if (skb_at_tc_ingress(skb))
+               skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
+}
+
+static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
+{
+       if (skb_at_tc_ingress(skb))
+               skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
+}
+
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 {
-       struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
-       int offset = (int) r2;
+       unsigned int offset = (unsigned int) r2;
        void *from = (void *) (long) r3;
        unsigned int len = (unsigned int) r4;
        void *ptr;
 
        if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
                return -EINVAL;
-
-       /* bpf verifier guarantees that:
-        * 'from' pointer points to bpf program stack
-        * 'len' bytes of it were initialized
-        * 'len' > 0
-        * 'skb' is a valid pointer to 'struct sk_buff'
-        *
-        * so check for invalid 'offset' and too large 'len'
-        */
-       if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
+       if (unlikely(offset > 0xffff))
                return -EFAULT;
        if (unlikely(bpf_try_make_writable(skb, offset + len)))
                return -EFAULT;
 
-       ptr = skb_header_pointer(skb, offset, len, sp->buff);
-       if (unlikely(!ptr))
-               return -EFAULT;
-
+       ptr = skb->data + offset;
        if (flags & BPF_F_RECOMPUTE_CSUM)
-               skb_postpull_rcsum(skb, ptr, len);
+               __skb_postpull_rcsum(skb, ptr, len, offset);
 
        memcpy(ptr, from, len);
 
-       if (ptr == sp->buff)
-               /* skb_store_bits cannot return -EFAULT here */
-               skb_store_bits(skb, offset, ptr, len);
-
        if (flags & BPF_F_RECOMPUTE_CSUM)
-               skb_postpush_rcsum(skb, ptr, len);
+               __skb_postpush_rcsum(skb, ptr, len, offset);
        if (flags & BPF_F_INVALIDATE_HASH)
                skb_clear_hash(skb);
 
@@ -1425,12 +1416,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
 static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
-       int offset = (int) r2;
+       unsigned int offset = (unsigned int) r2;
        void *to = (void *)(unsigned long) r3;
        unsigned int len = (unsigned int) r4;
        void *ptr;
 
-       if (unlikely((u32) offset > 0xffff))
+       if (unlikely(offset > 0xffff))
                goto err_clear;
 
        ptr = skb_header_pointer(skb, offset, len, to);
@@ -1458,20 +1449,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
 static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
-       int offset = (int) r2;
-       __sum16 sum, *ptr;
+       unsigned int offset = (unsigned int) r2;
+       __sum16 *ptr;
 
        if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
                return -EINVAL;
-       if (unlikely((u32) offset > 0xffff))
+       if (unlikely(offset > 0xffff || offset & 1))
                return -EFAULT;
-       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
-               return -EFAULT;
-
-       ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
-       if (unlikely(!ptr))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
                return -EFAULT;
 
+       ptr = (__sum16 *)(skb->data + offset);
        switch (flags & BPF_F_HDR_FIELD_MASK) {
        case 0:
                if (unlikely(from != 0))
@@ -1489,10 +1477,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        }
 
-       if (ptr == &sum)
-               /* skb_store_bits guaranteed to not return -EFAULT here */
-               skb_store_bits(skb, offset, ptr, sizeof(sum));
-
        return 0;
 }
 
@@ -1512,20 +1496,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
        bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
-       int offset = (int) r2;
-       __sum16 sum, *ptr;
+       unsigned int offset = (unsigned int) r2;
+       __sum16 *ptr;
 
        if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
                               BPF_F_HDR_FIELD_MASK)))
                return -EINVAL;
-       if (unlikely((u32) offset > 0xffff))
+       if (unlikely(offset > 0xffff || offset & 1))
                return -EFAULT;
-       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
                return -EFAULT;
 
-       ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
-       if (unlikely(!ptr))
-               return -EFAULT;
+       ptr = (__sum16 *)(skb->data + offset);
        if (is_mmzero && !*ptr)
                return 0;
 
@@ -1548,10 +1530,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 
        if (is_mmzero && !*ptr)
                *ptr = CSUM_MANGLED_0;
-       if (ptr == &sum)
-               /* skb_store_bits guaranteed to not return -EFAULT here */
-               skb_store_bits(skb, offset, ptr, sizeof(sum));
-
        return 0;
 }
 
@@ -1607,9 +1585,6 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
 
 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
 {
-       if (skb_at_tc_ingress(skb))
-               skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
-
        return dev_forward_skb(dev, skb);
 }
 
@@ -1648,6 +1623,8 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
        if (unlikely(!skb))
                return -ENOMEM;
 
+       bpf_push_mac_rcsum(skb);
+
        return flags & BPF_F_INGRESS ?
               __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
 }
@@ -1693,6 +1670,8 @@ int skb_do_redirect(struct sk_buff *skb)
                return -EINVAL;
        }
 
+       bpf_push_mac_rcsum(skb);
+
        return ri->flags & BPF_F_INGRESS ?
               __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
 }
@@ -1756,7 +1735,10 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
                     vlan_proto != htons(ETH_P_8021AD)))
                vlan_proto = htons(ETH_P_8021Q);
 
+       bpf_push_mac_rcsum(skb);
        ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
+       bpf_pull_mac_rcsum(skb);
+
        bpf_compute_data_end(skb);
        return ret;
 }
@@ -1776,7 +1758,10 @@ static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        int ret;
 
+       bpf_push_mac_rcsum(skb);
        ret = skb_vlan_pop(skb);
+       bpf_pull_mac_rcsum(skb);
+
        bpf_compute_data_end(skb);
        return ret;
 }
@@ -2298,7 +2283,7 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
 }
 
 #ifdef CONFIG_SOCK_CGROUP_DATA
-static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *)(long)r1;
        struct bpf_map *map = (struct bpf_map *)(long)r2;
@@ -2321,8 +2306,8 @@ static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp);
 }
 
-static const struct bpf_func_proto bpf_skb_in_cgroup_proto = {
-       .func           = bpf_skb_in_cgroup,
+static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
+       .func           = bpf_skb_under_cgroup,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
@@ -2402,8 +2387,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_get_smp_processor_id:
                return &bpf_get_smp_processor_id_proto;
 #ifdef CONFIG_SOCK_CGROUP_DATA
-       case BPF_FUNC_skb_in_cgroup:
-               return &bpf_skb_in_cgroup_proto;
+       case BPF_FUNC_skb_under_cgroup:
+               return &bpf_skb_under_cgroup_proto;
 #endif
        default:
                return sk_filter_func_proto(func_id);
index 61ad43f61c5edbffa48f4983d8bf1a7472a66cdc..52742a02814fddc6da9dd23c90af50c363b93b11 100644 (file)
@@ -680,11 +680,13 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
 void __skb_get_hash(struct sk_buff *skb)
 {
        struct flow_keys keys;
+       u32 hash;
 
        __flow_hash_secret_init();
 
-       __skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
-                         flow_keys_have_l4(&keys));
+       hash = ___skb_get_hash(skb, &keys, hashrnd);
+
+       __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
index 25dab8b60223e25ee92dae45a226fce2a6bb5a03..fd7b41edf1cec86e0ae464ec93bee25a43721afe 100644 (file)
@@ -1362,7 +1362,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
                if (!try_module_get(prot->owner))
                        goto out_free_sec;
                sk_tx_queue_clear(sk);
-               cgroup_sk_alloc(&sk->sk_cgrp_data);
        }
 
        return sk;
@@ -1422,6 +1421,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                sock_net_set(sk, net);
                atomic_set(&sk->sk_wmem_alloc, 1);
 
+               cgroup_sk_alloc(&sk->sk_cgrp_data);
                sock_update_classid(&sk->sk_cgrp_data);
                sock_update_netprioidx(&sk->sk_cgrp_data);
        }
@@ -1566,6 +1566,9 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_priority = 0;
                newsk->sk_incoming_cpu = raw_smp_processor_id();
                atomic64_set(&newsk->sk_cookie, 0);
+
+               cgroup_sk_alloc(&newsk->sk_cgrp_data);
+
                /*
                 * Before updating sk_refcnt, we must commit prior changes to memory
                 * (Documentation/RCU/rculist_nulls.txt for details)
index 415e117967c775f805b43cd634bdf9a55f064e2f..062a67ca9a212f6e8a966e5419c4a11867e601d9 100644 (file)
@@ -2232,7 +2232,7 @@ static struct devinet_sysctl_table {
 };
 
 static int __devinet_sysctl_register(struct net *net, char *dev_name,
-                                       struct ipv4_devconf *p)
+                                    int ifindex, struct ipv4_devconf *p)
 {
        int i;
        struct devinet_sysctl_table *t;
@@ -2255,6 +2255,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
                goto free;
 
        p->sysctl = t;
+
+       inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
        return 0;
 
 free:
@@ -2286,7 +2288,7 @@ static int devinet_sysctl_register(struct in_device *idev)
        if (err)
                return err;
        err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
-                                       &idev->cnf);
+                                       idev->dev->ifindex, &idev->cnf);
        if (err)
                neigh_sysctl_unregister(idev->arp_parms);
        return err;
@@ -2347,11 +2349,12 @@ static __net_init int devinet_init_net(struct net *net)
        }
 
 #ifdef CONFIG_SYSCTL
-       err = __devinet_sysctl_register(net, "all", all);
+       err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
        if (err < 0)
                goto err_reg_all;
 
-       err = __devinet_sysctl_register(net, "default", dflt);
+       err = __devinet_sysctl_register(net, "default",
+                                       NETCONFA_IFINDEX_DEFAULT, dflt);
        if (err < 0)
                goto err_reg_dflt;
 
index ef2ebeb89d0fb0cb9cb5da4aea13183097baec8c..1b25daf8c7f12ada5a6ea7178d95c6c086f9098d 100644 (file)
@@ -509,6 +509,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
                if (!dev)
                        return -ENODEV;
                cfg->fc_oif = dev->ifindex;
+               cfg->fc_table = l3mdev_fib_table(dev);
                if (colon) {
                        struct in_ifaddr *ifa;
                        struct in_device *in_dev = __in_dev_get_rtnl(dev);
@@ -1027,7 +1028,7 @@ no_promotions:
                         * First of all, we scan fib_info list searching
                         * for stray nexthop entries, then ignite fib_flush.
                         */
-                       if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
+                       if (fib_sync_down_addr(dev, ifa->ifa_local))
                                fib_flush(dev_net(dev));
                }
        }
index 539fa264e67d71148364c9fc0e694c78fd35e69b..e9f56225e53fde93a5470ee32c26b21b99ca4655 100644 (file)
@@ -1057,6 +1057,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi->fib_priority = cfg->fc_priority;
        fi->fib_prefsrc = cfg->fc_prefsrc;
        fi->fib_type = cfg->fc_type;
+       fi->fib_tb_id = cfg->fc_table;
 
        fi->fib_nhs = nhs;
        change_nexthops(fi) {
@@ -1337,18 +1338,21 @@ nla_put_failure:
  *   referring to it.
  * - device went down -> we must shutdown all nexthops going via it.
  */
-int fib_sync_down_addr(struct net *net, __be32 local)
+int fib_sync_down_addr(struct net_device *dev, __be32 local)
 {
        int ret = 0;
        unsigned int hash = fib_laddr_hashfn(local);
        struct hlist_head *head = &fib_info_laddrhash[hash];
+       struct net *net = dev_net(dev);
+       int tb_id = l3mdev_fib_table(dev);
        struct fib_info *fi;
 
        if (!fib_info_laddrhash || local == 0)
                return 0;
 
        hlist_for_each_entry(fi, head, fib_lhash) {
-               if (!net_eq(fi->fib_net, net))
+               if (!net_eq(fi->fib_net, net) ||
+                   fi->fib_tb_id != tb_id)
                        continue;
                if (fi->fib_prefsrc == local) {
                        fi->fib_flags |= RTNH_F_DEAD;
index d07fc076bea0a4bc96f68075fb3bb79b95007e63..e2ffc2a5c7db6ff3eea20d2083c2013fdf0dcc23 100644 (file)
@@ -249,7 +249,7 @@ static inline unsigned long get_index(t_key key, struct key_vector *kv)
  * index into the parent's child array. That is, they will be used to find
  * 'n' among tp's children.
  *
- * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
+ * The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits
  * for the node n.
  *
  * All the bits we have seen so far are significant to the node n. The rest
@@ -258,7 +258,7 @@ static inline unsigned long get_index(t_key key, struct key_vector *kv)
  * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
  * n's child array, and will of course be different for each child.
  *
- * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
+ * The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown
  * at this point.
  */
 
@@ -2452,9 +2452,7 @@ struct fib_route_iter {
 static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
                                            loff_t pos)
 {
-       struct fib_table *tb = iter->main_tb;
        struct key_vector *l, **tp = &iter->tnode;
-       struct trie *t;
        t_key key;
 
        /* use cache location of next-to-find key */
@@ -2462,8 +2460,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
                pos -= iter->pos;
                key = iter->key;
        } else {
-               t = (struct trie *)tb->tb_data;
-               iter->tnode = t->kv;
                iter->pos = 0;
                key = 0;
        }
@@ -2504,12 +2500,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
                return NULL;
 
        iter->main_tb = tb;
+       t = (struct trie *)tb->tb_data;
+       iter->tnode = t->kv;
 
        if (*pos != 0)
                return fib_route_get_idx(iter, *pos);
 
-       t = (struct trie *)tb->tb_data;
-       iter->tnode = t->kv;
        iter->pos = 0;
        iter->key = 0;
 
index 5b1481be028212cfb61735a2d8ff8fbccbee203f..113cc43df789a34b80fcf897621c936ff0cd6ca8 100644 (file)
@@ -370,7 +370,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
                         tunnel->parms.o_flags, proto, tunnel->parms.o_key,
                         htonl(tunnel->o_seqno));
 
-       skb_set_inner_protocol(skb, proto);
        ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
 
index 4b351af3e67b60ba89ab38fb28dac42b6648274d..d6feabb0351607f282e1f78f159c0ccb88bcec96 100644 (file)
@@ -312,6 +312,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
+       struct net_device *dev = skb->dev;
 
        /* if ingress device is enslaved to an L3 master device pass the
         * skb to its handler for processing
@@ -341,7 +342,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
         */
        if (!skb_valid_dst(skb)) {
                int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
-                                              iph->tos, skb->dev);
+                                              iph->tos, dev);
                if (unlikely(err)) {
                        if (err == -EXDEV)
                                __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
@@ -370,7 +371,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
                __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
        } else if (skb->pkt_type == PACKET_BROADCAST ||
                   skb->pkt_type == PACKET_MULTICAST) {
-               struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
+               struct in_device *in_dev = __in_dev_get_rcu(dev);
 
                /* RFC 1122 3.3.6:
                 *
index 9d847c3025516f3225c60be7c492adff4d254e5b..0f227db0e9ac6637c444fc8060b2e8f067653c3d 100644 (file)
@@ -73,9 +73,11 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
        skb_dst_set(skb, &rt->dst);
        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
-       if (skb_iif && proto == IPPROTO_UDP) {
-               /* Arrived from an ingress interface and got udp encapuslated.
-                * The encapsulated network segment length may exceed dst mtu.
+       if (skb_iif && !(df & htons(IP_DF))) {
+               /* Arrived from an ingress interface, got encapsulated, with
+                * fragmentation of encapulating frames allowed.
+                * If skb is gso, the resulting encapsulated network segments
+                * may exceed dst mtu.
                 * Allow IP Fragmentation of segments.
                 */
                IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
index a917903d5e9742fb07bac1b2a7fa94ee069c0d54..5d7944f394d9af83d6209c6ab2778be9e24d3553 100644 (file)
@@ -88,6 +88,7 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        struct net_device *dev;
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
+       struct xfrm_mode *inner_mode;
        struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
        u32 orig_mark = skb->mark;
        int ret;
@@ -105,7 +106,19 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        }
 
        x = xfrm_input_state(skb);
-       family = x->inner_mode->afinfo->family;
+
+       inner_mode = x->inner_mode;
+
+       if (x->sel.family == AF_UNSPEC) {
+               inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+               if (inner_mode == NULL) {
+                       XFRM_INC_STATS(dev_net(skb->dev),
+                                      LINUX_MIB_XFRMINSTATEMODEERROR);
+                       return -EINVAL;
+               }
+       }
+
+       family = inner_mode->afinfo->family;
 
        skb->mark = be32_to_cpu(tunnel->parms.i_key);
        ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
@@ -557,6 +570,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
        .get_link_net   = ip_tunnel_get_link_net,
 };
 
+static bool is_vti_tunnel(const struct net_device *dev)
+{
+       return dev->netdev_ops == &vti_netdev_ops;
+}
+
+static int vti_device_event(struct notifier_block *unused,
+                           unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+
+       if (!is_vti_tunnel(dev))
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_DOWN:
+               if (!net_eq(tunnel->net, dev_net(dev)))
+                       xfrm_garbage_collect(tunnel->net);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block vti_notifier_block __read_mostly = {
+       .notifier_call = vti_device_event,
+};
+
 static int __init vti_init(void)
 {
        const char *msg;
@@ -564,6 +604,8 @@ static int __init vti_init(void)
 
        pr_info("IPv4 over IPsec tunneling driver\n");
 
+       register_netdevice_notifier(&vti_notifier_block);
+
        msg = "tunnel device";
        err = register_pernet_device(&vti_net_ops);
        if (err < 0)
@@ -596,6 +638,7 @@ xfrm_proto_ah_failed:
 xfrm_proto_esp_failed:
        unregister_pernet_device(&vti_net_ops);
 pernet_dev_failed:
+       unregister_netdevice_notifier(&vti_notifier_block);
        pr_err("vti init: failed to register %s\n", msg);
        return err;
 }
@@ -607,6 +650,7 @@ static void __exit vti_fini(void)
        xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
        xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
        unregister_pernet_device(&vti_net_ops);
+       unregister_netdevice_notifier(&vti_notifier_block);
 }
 
 module_init(vti_init);
index 26253328d227bda8e3a6b23be3130a4ce4231b60..a87bcd2d4a94ed1712dd0379d3f81283a41965d4 100644 (file)
@@ -2076,6 +2076,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
        struct rta_mfc_stats mfcs;
        struct nlattr *mp_attr;
        struct rtnexthop *nhp;
+       unsigned long lastuse;
        int ct;
 
        /* If cache is unresolved, don't try to parse IIF and OIF */
@@ -2105,12 +2106,14 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
 
        nla_nest_end(skb, mp_attr);
 
+       lastuse = READ_ONCE(c->mfc_un.res.lastuse);
+       lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
+
        mfcs.mfcs_packets = c->mfc_un.res.pkt;
        mfcs.mfcs_bytes = c->mfc_un.res.bytes;
        mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
        if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
-           nla_put_u64_64bit(skb, RTA_EXPIRES,
-                             jiffies_to_clock_t(c->mfc_un.res.lastuse),
+           nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
                              RTA_PAD))
                return -EMSGSIZE;
 
index 2375b0a8be4618941e1e4d10fb54089d4f82ea79..30493beb611a1e43f3d7cb26afa14e88a935b894 100644 (file)
@@ -31,6 +31,7 @@ static unsigned int nf_route_table_hook(void *priv,
        __be32 saddr, daddr;
        u_int8_t tos;
        const struct iphdr *iph;
+       int err;
 
        /* root is playing with raw sockets. */
        if (skb->len < sizeof(struct iphdr) ||
@@ -46,15 +47,17 @@ static unsigned int nf_route_table_hook(void *priv,
        tos = iph->tos;
 
        ret = nft_do_chain(&pkt, priv);
-       if (ret != NF_DROP && ret != NF_QUEUE) {
+       if (ret != NF_DROP && ret != NF_STOLEN) {
                iph = ip_hdr(skb);
 
                if (iph->saddr != saddr ||
                    iph->daddr != daddr ||
                    skb->mark != mark ||
-                   iph->tos != tos)
-                       if (ip_route_me_harder(state->net, skb, RTN_UNSPEC))
-                               ret = NF_DROP;
+                   iph->tos != tos) {
+                       err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
+                       if (err < 0)
+                               ret = NF_DROP_ERR(err);
+               }
        }
        return ret;
 }
index c24f41c816b33f22b7e31ffb0b53e963b296f8c1..2c2553b9026cc652b6917529f71eba487c1ce711 100644 (file)
@@ -46,6 +46,7 @@ static const struct nft_expr_ops nft_reject_ipv4_ops = {
        .eval           = nft_reject_ipv4_eval,
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
index a1f2830d811025e561c9739c79a5c218a3801977..b5b47a26d4ec4b729a60502b3f9852a92fd4bb52 100644 (file)
@@ -476,12 +476,18 @@ u32 ip_idents_reserve(u32 hash, int segs)
        atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
        u32 old = ACCESS_ONCE(*p_tstamp);
        u32 now = (u32)jiffies;
-       u32 delta = 0;
+       u32 new, delta = 0;
 
        if (old != now && cmpxchg(p_tstamp, old, now) == old)
                delta = prandom_u32_max(now - old);
 
-       return atomic_add_return(segs + delta, p_id) - segs;
+       /* Do not use atomic_add_return() as it makes UBSAN unhappy */
+       do {
+               old = (u32)atomic_read(p_id);
+               new = old + delta + segs;
+       } while (atomic_cmpxchg(p_id, old, new) != old);
+
+       return new - segs;
 }
 EXPORT_SYMBOL(ip_idents_reserve);
 
index 032a96d78c99deda3b3298a305298f92776e2500..ffbb218de52057afbc55f95386200226a1c60517 100644 (file)
@@ -3193,7 +3193,6 @@ int tcp_abort(struct sock *sk, int err)
                        local_bh_enable();
                        return 0;
                }
-               sock_gen_put(sk);
                return -EOPNOTSUPP;
        }
 
@@ -3222,7 +3221,6 @@ int tcp_abort(struct sock *sk, int err)
        bh_unlock_sock(sk);
        local_bh_enable();
        release_sock(sk);
-       sock_put(sk);
        return 0;
 }
 EXPORT_SYMBOL_GPL(tcp_abort);
index 4d610934fb391c111d822a4d0544334a7b4b858f..a748c74aa8b781626d7a7805eef0f50da8e11328 100644 (file)
@@ -54,11 +54,16 @@ static int tcp_diag_destroy(struct sk_buff *in_skb,
 {
        struct net *net = sock_net(in_skb->sk);
        struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req);
+       int err;
 
        if (IS_ERR(sk))
                return PTR_ERR(sk);
 
-       return sock_diag_destroy(sk, ECONNABORTED);
+       err = sock_diag_destroy(sk, ECONNABORTED);
+
+       sock_gen_put(sk);
+
+       return err;
 }
 #endif
 
index 54d9f9b0120fe26828f6cc522711f8df8b2bcfb3..4e777a3243f94457d9928e3967bb83947da563f6 100644 (file)
@@ -150,6 +150,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
        tp->segs_in = 0;
        tcp_segs_in(tp, skb);
        __skb_pull(skb, tcp_hdrlen(skb));
+       sk_forced_mem_schedule(sk, skb->truesize);
        skb_set_owner_r(skb, sk);
 
        TCP_SKB_CB(skb)->seq++;
@@ -226,6 +227,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
        tcp_fastopen_add_skb(child, skb);
 
        tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
+       tp->rcv_wup = tp->rcv_nxt;
        /* tcp_conn_request() is sending the SYNACK,
         * and queues the child into listener accept queue.
         */
index 3ebf45b38bc309f448dbc4f27fe8722cefabaf19..08323bd95f2aaf57ce86d7adde063d0ff000180f 100644 (file)
@@ -5885,7 +5885,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                 * so release it.
                 */
                if (req) {
-                       tp->total_retrans = req->num_retrans;
+                       inet_csk(sk)->icsk_retransmits = 0;
                        reqsk_fastopen_remove(sk, req, false);
                } else {
                        /* Make sure socket is routed, for correct metrics. */
index 32b048e524d6773538918eca175b3f422f9c2aa7..7158d4f8dae4fe2482691e071a1f7751468f6b9a 100644 (file)
@@ -814,8 +814,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
        u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
                                             tcp_sk(sk)->snd_nxt;
 
+       /* RFC 7323 2.3
+        * The window field (SEG.WND) of every outgoing segment, with the
+        * exception of <SYN> segments, MUST be right-shifted by
+        * Rcv.Wind.Shift bits:
+        */
        tcp_v4_send_ack(sock_net(sk), skb, seq,
-                       tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
+                       tcp_rsk(req)->rcv_nxt,
+                       req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
                        tcp_time_stamp,
                        req->ts_recent,
                        0,
index bdaef7fd6e47701b2d2280e1f40f816e1920c432..5288cec4a2b27efc785c78ed19397f30fb1035ae 100644 (file)
@@ -2605,7 +2605,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
         * copying overhead: fragmentation, tunneling, mangling etc.
         */
        if (atomic_read(&sk->sk_wmem_alloc) >
-           min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
+           min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
+                 sk->sk_sndbuf))
                return -EAGAIN;
 
        if (skb_still_in_host_queue(sk, skb))
@@ -2830,7 +2831,7 @@ begin_fwd:
                if (tcp_retransmit_skb(sk, skb, segs))
                        return;
 
-               NET_INC_STATS(sock_net(sk), mib_idx);
+               NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
 
                if (tcp_in_cwnd_reduction(sk))
                        tp->prr_out += tcp_skb_pcount(skb);
@@ -3567,6 +3568,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
        if (!res) {
                __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+               if (unlikely(tcp_passive_fastopen(sk)))
+                       tcp_sk(sk)->total_retrans++;
        }
        return res;
 }
index d84930b2dd95759284aedbaee7f0a7ee078b6347..f712b411f6ed6c117aee9c0fb54872db0eae17aa 100644 (file)
@@ -384,6 +384,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
         */
        inet_rtx_syn_ack(sk, req);
        req->num_timeout++;
+       icsk->icsk_retransmits++;
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                          TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 }
index 028eb046ea40d510908040023b46b46469477505..9c5fc973267fe542a41f42c249e564dc8f5a0624 100644 (file)
@@ -76,7 +76,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 
        else if (!yeah->doing_reno_now) {
index e61f7cd65d08a478f4b4f4d60841688b89bb1f2d..5fdcb8d108d468ab33e8f29a21efdd643d27a49f 100644 (file)
@@ -1182,13 +1182,13 @@ out:
  *     @sk: socket
  *
  *     Drops all bad checksum frames, until a valid one is found.
- *     Returns the length of found skb, or 0 if none is found.
+ *     Returns the length of found skb, or -1 if none is found.
  */
-static unsigned int first_packet_length(struct sock *sk)
+static int first_packet_length(struct sock *sk)
 {
        struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
        struct sk_buff *skb;
-       unsigned int res;
+       int res;
 
        __skb_queue_head_init(&list_kill);
 
@@ -1203,7 +1203,7 @@ static unsigned int first_packet_length(struct sock *sk)
                __skb_unlink(skb, rcvq);
                __skb_queue_tail(&list_kill, skb);
        }
-       res = skb ? skb->len : 0;
+       res = skb ? skb->len : -1;
        spin_unlock_bh(&rcvq->lock);
 
        if (!skb_queue_empty(&list_kill)) {
@@ -1232,7 +1232,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 
        case SIOCINQ:
        {
-               unsigned int amount = first_packet_length(sk);
+               int amount = max_t(int, 0, first_packet_length(sk));
 
                return put_user(amount, (int __user *)arg);
        }
@@ -2184,7 +2184,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        /* Check for false positives due to checksum errors */
        if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
-           !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
+           !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
                mask &= ~(POLLIN | POLLRDNORM);
 
        return mask;
@@ -2216,7 +2216,6 @@ struct proto udp_prot = {
        .sysctl_wmem       = &sysctl_udp_wmem_min,
        .sysctl_rmem       = &sysctl_udp_rmem_min,
        .obj_size          = sizeof(struct udp_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
        .h.udp_table       = &udp_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udp_setsockopt,
index 3b3efbda48e13941b35388238508706844c043aa..2eea073e27efa13a86d6c6107426435ae7ddb18d 100644 (file)
@@ -55,7 +55,6 @@ struct proto  udplite_prot = {
        .unhash            = udp_lib_unhash,
        .get_port          = udp_v4_get_port,
        .obj_size          = sizeof(struct udp_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
        .h.udp_table       = &udplite_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udp_setsockopt,
index b644a23c3db06e851b239e694f22ed7a869653a3..41f5b504a782c392a20594325706c199136ecaa5 100644 (file)
@@ -29,7 +29,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
        memset(fl4, 0, sizeof(*fl4));
        fl4->daddr = daddr->a4;
        fl4->flowi4_tos = tos;
-       fl4->flowi4_oif = oif;
+       fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
        if (saddr)
                fl4->saddr = saddr->a4;
 
index ab3e796596b1dc065ba46390e82306fe138c8509..2f1f5d439788820aa2b75be8a26addb757ccf50c 100644 (file)
@@ -778,7 +778,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
        }
 
        if (p == &net->ipv6.devconf_all->forwarding) {
+               int old_dflt = net->ipv6.devconf_dflt->forwarding;
+
                net->ipv6.devconf_dflt->forwarding = newf;
+               if ((!newf) ^ (!old_dflt))
+                       inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+                                                    NETCONFA_IFINDEX_DEFAULT,
+                                                    net->ipv6.devconf_dflt);
+
                addrconf_forward_change(net, newf);
                if ((!newf) ^ (!old))
                        inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
@@ -1872,7 +1879,6 @@ static int addrconf_dad_end(struct inet6_ifaddr *ifp)
 
 void addrconf_dad_failure(struct inet6_ifaddr *ifp)
 {
-       struct in6_addr addr;
        struct inet6_dev *idev = ifp->idev;
        struct net *net = dev_net(ifp->idev->dev);
 
@@ -1934,18 +1940,6 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
                in6_ifa_put(ifp2);
 lock_errdad:
                spin_lock_bh(&ifp->lock);
-       } else if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
-               addr.s6_addr32[0] = htonl(0xfe800000);
-               addr.s6_addr32[1] = 0;
-
-               if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
-                   ipv6_addr_equal(&ifp->addr, &addr)) {
-                       /* DAD failed for link-local based on MAC address */
-                       idev->cnf.disable_ipv6 = 1;
-
-                       pr_info("%s: IPv6 being disabled!\n",
-                               ifp->idev->dev->name);
-               }
        }
 
 errdad:
@@ -1954,6 +1948,7 @@ errdad:
        spin_unlock_bh(&ifp->lock);
 
        addrconf_mod_dad_work(ifp, 0);
+       in6_ifa_put(ifp);
 }
 
 /* Join to solicited addr multicast group.
@@ -3543,7 +3538,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
        /* combine the user config with event to determine if permanent
         * addresses are to be removed from address hash table
         */
-       keep_addr = !(how || _keep_addr <= 0);
+       keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
 
        /* Step 2: clear hash table */
        for (i = 0; i < IN6_ADDR_HSIZE; i++) {
@@ -3599,7 +3594,7 @@ restart:
        /* re-combine the user config with event to determine if permanent
         * addresses are to be removed from the interface list
         */
-       keep_addr = (!how && _keep_addr > 0);
+       keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
 
        INIT_LIST_HEAD(&del_list);
        list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
@@ -3821,6 +3816,7 @@ static void addrconf_dad_work(struct work_struct *w)
                                                dad_work);
        struct inet6_dev *idev = ifp->idev;
        struct in6_addr mcaddr;
+       bool disable_ipv6 = false;
 
        enum {
                DAD_PROCESS,
@@ -3837,6 +3833,24 @@ static void addrconf_dad_work(struct work_struct *w)
        } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
                action = DAD_ABORT;
                ifp->state = INET6_IFADDR_STATE_POSTDAD;
+
+               if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
+                   !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
+                       struct in6_addr addr;
+
+                       addr.s6_addr32[0] = htonl(0xfe800000);
+                       addr.s6_addr32[1] = 0;
+
+                       if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
+                           ipv6_addr_equal(&ifp->addr, &addr)) {
+                               /* DAD failed for link-local based on MAC */
+                               idev->cnf.disable_ipv6 = 1;
+
+                               pr_info("%s: IPv6 being disabled!\n",
+                                       ifp->idev->dev->name);
+                               disable_ipv6 = true;
+                       }
+               }
        }
        spin_unlock_bh(&ifp->lock);
 
@@ -3844,7 +3858,10 @@ static void addrconf_dad_work(struct work_struct *w)
                addrconf_dad_begin(ifp);
                goto out;
        } else if (action == DAD_ABORT) {
+               in6_ifa_hold(ifp);
                addrconf_dad_stop(ifp, 1);
+               if (disable_ipv6)
+                       addrconf_ifdown(idev->dev, 0);
                goto out;
        }
 
@@ -6017,7 +6034,7 @@ static const struct ctl_table addrconf_sysctl[] = {
 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
                struct inet6_dev *idev, struct ipv6_devconf *p)
 {
-       int i;
+       int i, ifindex;
        struct ctl_table *table;
        char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
 
@@ -6037,6 +6054,13 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
        if (!p->sysctl_header)
                goto free;
 
+       if (!strcmp(dev_name, "all"))
+               ifindex = NETCONFA_IFINDEX_ALL;
+       else if (!strcmp(dev_name, "default"))
+               ifindex = NETCONFA_IFINDEX_DEFAULT;
+       else
+               ifindex = idev->dev->ifindex;
+       inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
        return 0;
 
 free:
index c53b92c617c545c041d50a4e458e0533d93a59df..37ac9de713c69af30ae50d03e53ee472a7520b98 100644 (file)
@@ -952,8 +952,10 @@ calipso_opt_insert(struct ipv6_opt_hdr *hop,
                memcpy(new, hop, start);
        ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def,
                                 secattr);
-       if (ret_val < 0)
+       if (ret_val < 0) {
+               kfree(new);
                return ERR_PTR(ret_val);
+       }
 
        buf_len = start + ret_val;
        /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */
index 776d145113e138872f45d97e7f66ff0416762d85..704274cbd495848848f0daf7dab53c57018b3e2d 100644 (file)
@@ -519,8 +519,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
        gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
                         protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
-       skb_set_inner_protocol(skb, protocol);
-
        return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
                            NEXTHDR_GRE);
 }
index 7b0481e3738f20b672cac77a88617adbe8ecfbbf..888543debe4eb5e91502bdd6b0e7315dd0340ac2 100644 (file)
@@ -1174,6 +1174,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPIP;
 
        dsfield = ipv4_get_dsfield(iph);
 
@@ -1233,6 +1234,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPV6;
 
        dsfield = ipv6_get_dsfield(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
index d90a11f14040cf8097d3270710ab7db02be60d84..5bd3afdcc7715e3abd83ee5f31a8d9eb9311d7fd 100644 (file)
@@ -321,11 +321,9 @@ static int vti6_rcv(struct sk_buff *skb)
                        goto discard;
                }
 
-               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
-
                rcu_read_unlock();
 
-               return xfrm6_rcv(skb);
+               return xfrm6_rcv_tnl(skb, t);
        }
        rcu_read_unlock();
        return -EINVAL;
@@ -340,6 +338,7 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        struct net_device *dev;
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
+       struct xfrm_mode *inner_mode;
        struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
        u32 orig_mark = skb->mark;
        int ret;
@@ -357,7 +356,19 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        }
 
        x = xfrm_input_state(skb);
-       family = x->inner_mode->afinfo->family;
+
+       inner_mode = x->inner_mode;
+
+       if (x->sel.family == AF_UNSPEC) {
+               inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+               if (inner_mode == NULL) {
+                       XFRM_INC_STATS(dev_net(skb->dev),
+                                      LINUX_MIB_XFRMINSTATEMODEERROR);
+                       return -EINVAL;
+               }
+       }
+
+       family = inner_mode->afinfo->family;
 
        skb->mark = be32_to_cpu(t->parms.i_key);
        ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
index 6122f9c5cc496d2ad329cd4d286c37ddd6239012..fccb5dd91902891a6ef7bb6154b2a5cf6b1c3792 100644 (file)
@@ -2239,6 +2239,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        struct rta_mfc_stats mfcs;
        struct nlattr *mp_attr;
        struct rtnexthop *nhp;
+       unsigned long lastuse;
        int ct;
 
        /* If cache is unresolved, don't try to parse IIF and OIF */
@@ -2269,12 +2270,14 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
 
        nla_nest_end(skb, mp_attr);
 
+       lastuse = READ_ONCE(c->mfc_un.res.lastuse);
+       lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
+
        mfcs.mfcs_packets = c->mfc_un.res.pkt;
        mfcs.mfcs_bytes = c->mfc_un.res.bytes;
        mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
        if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
-           nla_put_u64_64bit(skb, RTA_EXPIRES,
-                             jiffies_to_clock_t(c->mfc_un.res.lastuse),
+           nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
                              RTA_PAD))
                return -EMSGSIZE;
 
index 71d995ff3108fe001fc2318aa0e7c76c3cd9b1f8..2535223ba9569112d84dae7d3d22b4816fd53497 100644 (file)
@@ -31,6 +31,7 @@ static unsigned int nf_route_table_hook(void *priv,
        struct in6_addr saddr, daddr;
        u_int8_t hop_limit;
        u32 mark, flowlabel;
+       int err;
 
        /* malformed packet, drop it */
        if (nft_set_pktinfo_ipv6(&pkt, skb, state) < 0)
@@ -46,13 +47,16 @@ static unsigned int nf_route_table_hook(void *priv,
        flowlabel = *((u32 *)ipv6_hdr(skb));
 
        ret = nft_do_chain(&pkt, priv);
-       if (ret != NF_DROP && ret != NF_QUEUE &&
+       if (ret != NF_DROP && ret != NF_STOLEN &&
            (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
             memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
             skb->mark != mark ||
             ipv6_hdr(skb)->hop_limit != hop_limit ||
-            flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
-               return ip6_route_me_harder(state->net, skb) == 0 ? ret : NF_DROP;
+            flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
+               err = ip6_route_me_harder(state->net, skb);
+               if (err < 0)
+                       ret = NF_DROP_ERR(err);
+       }
 
        return ret;
 }
index 533cd5719c594e7664c162b59037bb43a10c717c..92bda9908bb9a354e701fab450549b43e2f22bfa 100644 (file)
@@ -47,6 +47,7 @@ static const struct nft_expr_ops nft_reject_ipv6_ops = {
        .eval           = nft_reject_ipv6_eval,
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
index fed40d1ec29b1fc331ad04b5a7173b183a116279..0e983b694ee805dc662a49ae5f6c9438b5ed931d 100644 (file)
@@ -55,7 +55,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct icmp6hdr user_icmph;
        int addr_type;
        struct in6_addr *daddr;
-       int iif = 0;
+       int oif = 0;
        struct flowi6 fl6;
        int err;
        struct dst_entry *dst;
@@ -78,25 +78,30 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                if (u->sin6_family != AF_INET6) {
                        return -EAFNOSUPPORT;
                }
-               if (sk->sk_bound_dev_if &&
-                   sk->sk_bound_dev_if != u->sin6_scope_id) {
-                       return -EINVAL;
-               }
                daddr = &(u->sin6_addr);
-               iif = u->sin6_scope_id;
+               if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
+                       oif = u->sin6_scope_id;
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
                daddr = &sk->sk_v6_daddr;
        }
 
-       if (!iif)
-               iif = sk->sk_bound_dev_if;
+       if (!oif)
+               oif = sk->sk_bound_dev_if;
+
+       if (!oif)
+               oif = np->sticky_pktinfo.ipi6_ifindex;
+
+       if (!oif && ipv6_addr_is_multicast(daddr))
+               oif = np->mcast_oif;
+       else if (!oif)
+               oif = np->ucast_oif;
 
        addr_type = ipv6_addr_type(daddr);
-       if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
-               return -EINVAL;
-       if (addr_type & IPV6_ADDR_MAPPED)
+       if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
+           (addr_type & IPV6_ADDR_MAPPED) ||
+           (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
                return -EINVAL;
 
        /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@@ -106,16 +111,12 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        fl6.flowi6_proto = IPPROTO_ICMPV6;
        fl6.saddr = np->saddr;
        fl6.daddr = *daddr;
+       fl6.flowi6_oif = oif;
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_icmp_type = user_icmph.icmp6_type;
        fl6.fl6_icmp_code = user_icmph.icmp6_code;
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
-               fl6.flowi6_oif = np->mcast_oif;
-       else if (!fl6.flowi6_oif)
-               fl6.flowi6_oif = np->ucast_oif;
-
        ipc6.tclass = np->tclass;
        fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
 
@@ -125,8 +126,10 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        rt = (struct rt6_info *) dst;
 
        np = inet6_sk(sk);
-       if (!np)
-               return -EBADF;
+       if (!np) {
+               err = -EBADF;
+               goto dst_err_out;
+       }
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
                fl6.flowi6_oif = np->mcast_oif;
@@ -162,6 +165,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
        release_sock(sk);
 
+dst_err_out:
+       dst_release(dst);
+
        if (err)
                return err;
 
index 49817555449e2956fcfe81555e646b5f9b53261a..e3a224b97905585287571ec2ce2c238580392793 100644 (file)
@@ -1986,9 +1986,18 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
                        if (!(gwa_type & IPV6_ADDR_UNICAST))
                                goto out;
 
-                       if (cfg->fc_table)
+                       if (cfg->fc_table) {
                                grt = ip6_nh_lookup_table(net, cfg, gw_addr);
 
+                               if (grt) {
+                                       if (grt->rt6i_flags & RTF_GATEWAY ||
+                                           (dev && dev != grt->dst.dev)) {
+                                               ip6_rt_put(grt);
+                                               grt = NULL;
+                                       }
+                               }
+                       }
+
                        if (!grt)
                                grt = rt6_lookup(net, gw_addr, NULL,
                                                 cfg->fc_ifindex, 1);
index 33df8b8575cceb6f4bf50d9a50ab5a6afa79cd86..94f4f89d73e791ba2ae7bdc7e7ac5bd7bc66a8d4 100644 (file)
@@ -944,9 +944,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
        /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
         * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
         */
+       /* RFC 7323 2.3
+        * The window field (SEG.WND) of every outgoing segment, with the
+        * exception of <SYN> segments, MUST be right-shifted by
+        * Rcv.Wind.Shift bits:
+        */
        tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
                        tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
-                       tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
+                       tcp_rsk(req)->rcv_nxt,
+                       req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
                        tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
                        0, 0);
index 81e2f98b958d4d42d137a4ae2d71c4c7217c71d6..19ac3a1c308dbfacf136e262dbab27ad280cfdcd 100644 (file)
@@ -1460,7 +1460,6 @@ struct proto udpv6_prot = {
        .sysctl_wmem       = &sysctl_udp_wmem_min,
        .sysctl_rmem       = &sysctl_udp_rmem_min,
        .obj_size          = sizeof(struct udp6_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
        .h.udp_table       = &udp_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udpv6_setsockopt,
index 9cf097e206e931c6e3c184f22b8adcabedc4c03a..fd6ef414899bea824ce26c2e738f4e96aa8af464 100644 (file)
@@ -50,7 +50,6 @@ struct proto udplitev6_prot = {
        .unhash            = udp_lib_unhash,
        .get_port          = udp_v6_get_port,
        .obj_size          = sizeof(struct udp6_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
        .h.udp_table       = &udplite_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udpv6_setsockopt,
index 0eaab1fa6be5751734d98d900ae4d296ca12651e..b5789562aded9274f706225df3f69e45efac5008 100644 (file)
@@ -21,8 +21,10 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
        return xfrm6_extract_header(skb);
 }
 
-int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+                 struct ip6_tnl *t)
 {
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
        return xfrm_input(skb, nexthdr, spi, 0);
@@ -48,13 +50,18 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
        return -1;
 }
 
-int xfrm6_rcv(struct sk_buff *skb)
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t)
 {
        return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
-                            0);
+                            0, t);
 }
-EXPORT_SYMBOL(xfrm6_rcv);
+EXPORT_SYMBOL(xfrm6_rcv_tnl);
 
+int xfrm6_rcv(struct sk_buff *skb)
+{
+       return xfrm6_rcv_tnl(skb, NULL);
+}
+EXPORT_SYMBOL(xfrm6_rcv);
 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
                     xfrm_address_t *saddr, u8 proto)
 {
index 6cc97003e4a9547462840c113fcf11be4bb92ed3..70a86adad875b66d3643ecfb073ea7681a53908d 100644 (file)
@@ -36,7 +36,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
        int err;
 
        memset(&fl6, 0, sizeof(fl6));
-       fl6.flowi6_oif = oif;
+       fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
        fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
        memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
        if (saddr)
index 5743044cd660b8a8557d1a87c4ee35508ebff7e0..e1c0bbe7996cf8ca00374db26488bd85e02a36ad 100644 (file)
@@ -236,7 +236,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
        __be32 spi;
 
        spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
-       return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
+       return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
 }
 
 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
index 8d2f7c9b491da5b7e7b13f90f2aa9c5d88f84faf..ccc244406fb959b11587a7ba3c2eb6e305d6a889 100644 (file)
@@ -832,7 +832,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        struct sock *sk = sock->sk;
        struct irda_sock *new, *self = irda_sk(sk);
        struct sock *newsk;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        int err;
 
        err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
@@ -900,7 +900,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        err = -EPERM; /* value does not seem to make sense. -arnd */
        if (!new->tsap) {
                pr_debug("%s(), dup failed!\n", __func__);
-               kfree_skb(skb);
                goto out;
        }
 
@@ -919,7 +918,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        /* Clean up the original one to keep it in listen state */
        irttp_listen(self->tsap);
 
-       kfree_skb(skb);
        sk->sk_ack_backlog--;
 
        newsock->state = SS_CONNECTED;
@@ -927,6 +925,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        irda_connect_response(new);
        err = 0;
 out:
+       kfree_skb(skb);
        release_sock(sk);
        return err;
 }
index 4a7ae32afa09b90fab1d57bcb807ca2b8a1200c6..1138eaf5c6829ac08431a91c31fb3339dfe6213d 100644 (file)
@@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
 
        self->magic = IAS_MAGIC;
        self->mode = mode;
-       if (mode == IAS_CLIENT)
-               iriap_register_lsap(self, slsap_sel, mode);
+       if (mode == IAS_CLIENT) {
+               if (iriap_register_lsap(self, slsap_sel, mode)) {
+                       kfree(self);
+                       return NULL;
+               }
+       }
 
        self->confirm = callback;
        self->priv = priv;
index cb39e05b166cf5eaa0729b775f3ee0b8a140398a..411693288648dc3eb27b0b2a0c542be92762ad8a 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/socket.h>
 #include <linux/uaccess.h>
 #include <linux/workqueue.h>
+#include <linux/syscalls.h>
 #include <net/kcm.h>
 #include <net/netns/generic.h>
 #include <net/sock.h>
@@ -2029,7 +2030,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        if (copy_to_user((void __user *)arg, &info,
                                         sizeof(info))) {
                                err = -EFAULT;
-                               sock_release(newsock);
+                               sys_close(info.fd);
                        }
                }
 
index 1e40dacaa137f00df3df79c0faf4eb4de65e86c2..a2ed3bda4ddcd70071bc8ac87120e5591d9dd681 100644 (file)
@@ -1855,6 +1855,9 @@ static __net_exit void l2tp_exit_net(struct net *net)
                (void)l2tp_tunnel_delete(tunnel);
        }
        rcu_read_unlock_bh();
+
+       flush_workqueue(l2tp_wq);
+       rcu_barrier();
 }
 
 static struct pernet_operations l2tp_net_ops = {
index d9560aa2dba38d87561548b5ff8fae897e034810..232cb92033e8225f76d72deb5af5ef93c4d6187f 100644 (file)
@@ -856,7 +856,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
        error = -ENOTCONN;
        if (sk == NULL)
                goto end;
-       if (sk->sk_state != PPPOX_CONNECTED)
+       if (!(sk->sk_state & PPPOX_CONNECTED))
                goto end;
 
        error = -EBADF;
index a9aff6079c42a39cab954d49a499d8c4caa5fa2f..afa94687d5e15b67c4c57f5991897f3bc5cb995d 100644 (file)
@@ -261,10 +261,16 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
                .timeout = timeout,
                .ssn = start_seq_num,
        };
-
        int i, ret = -EOPNOTSUPP;
        u16 status = WLAN_STATUS_REQUEST_DECLINED;
 
+       if (tid >= IEEE80211_FIRST_TSPEC_TSID) {
+               ht_dbg(sta->sdata,
+                      "STA %pM requests BA session on unsupported tid %d\n",
+                      sta->sta.addr, tid);
+               goto end_no_lock;
+       }
+
        if (!sta->sta.ht_cap.ht_supported) {
                ht_dbg(sta->sdata,
                       "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
index 5650c46bf91a6f51bacc6d6516beeb3823528837..45319cc01121a9eb17d49185a07a9675e7b2995d 100644 (file)
@@ -584,6 +584,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
            ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
                return -EINVAL;
 
+       if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
+               return -EINVAL;
+
        ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
               pubsta->addr, tid);
 
index 47e99ab8d97a93697790a5de0c373af16869bad7..543b1d4fc33d563f7a4234c92ff7d22d92e01265 100644 (file)
@@ -869,7 +869,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
 
        /* free all potentially still buffered bcast frames */
        local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
-       skb_queue_purge(&sdata->u.ap.ps.bc_buf);
+       ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
 
        mutex_lock(&local->mtx);
        ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
index 184473c257eb9aeca282134e4fd7ba715b074a19..ba5fc1f01e5353da7c0d8bf3a28156abb83a92ca 100644 (file)
@@ -1094,7 +1094,7 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
 
        trace_drv_get_expected_throughput(sta);
        if (local->ops->get_expected_throughput)
-               ret = local->ops->get_expected_throughput(sta);
+               ret = local->ops->get_expected_throughput(&local->hw, sta);
        trace_drv_return_u32(local, ret);
 
        return ret;
index c66411df986311c12e78b0243e3a18b7c3070366..42120d965263d2ec1719211da37b7900814e4122 100644 (file)
@@ -881,20 +881,22 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
 
        netif_carrier_off(sdata->dev);
 
+       /* flush STAs and mpaths on this iface */
+       sta_info_flush(sdata);
+       mesh_path_flush_by_iface(sdata);
+
        /* stop the beacon */
        ifmsh->mesh_id_len = 0;
        sdata->vif.bss_conf.enable_beacon = false;
        clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+
+       /* remove beacon */
        bcn = rcu_dereference_protected(ifmsh->beacon,
                                        lockdep_is_held(&sdata->wdev.mtx));
        RCU_INIT_POINTER(ifmsh->beacon, NULL);
        kfree_rcu(bcn, rcu_head);
 
-       /* flush STAs and mpaths on this iface */
-       sta_info_flush(sdata);
-       mesh_path_flush_by_iface(sdata);
-
        /* free all potentially still buffered group-addressed frames */
        local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
        skb_queue_purge(&ifmsh->ps.bc_buf);
index 8f9c3bde835f4a3a8f1c777c0b2af0fea81acf2d..faccef9776707b63b09bec88fbb5098b3d0ec32c 100644 (file)
@@ -746,6 +746,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
                sta = next_hop_deref_protected(mpath);
                if (mpath->flags & MESH_PATH_ACTIVE &&
                    ether_addr_equal(ta, sta->sta.addr) &&
+                   !(mpath->flags & MESH_PATH_FIXED) &&
                    (!(mpath->flags & MESH_PATH_SN_VALID) ||
                    SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
                        mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -1012,7 +1013,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
                goto enddiscovery;
 
        spin_lock_bh(&mpath->state_lock);
-       if (mpath->flags & MESH_PATH_DELETED) {
+       if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
                spin_unlock_bh(&mpath->state_lock);
                goto enddiscovery;
        }
index 6db2ddfa0695fe7c5e2f6d4071de3a5a97b3df55..f0e6175a9821f01d7aac2dfbda02c1ee5eeb31ec 100644 (file)
@@ -826,7 +826,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
        mpath->metric = 0;
        mpath->hop_count = 0;
        mpath->exp_time = 0;
-       mpath->flags |= MESH_PATH_FIXED;
+       mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
        mesh_path_activate(mpath);
        spin_unlock_bh(&mpath->state_lock);
        mesh_path_tx_pending(mpath);
index 2e8a9024625a2ae49ac046bccac671c77911120f..9dce3b157908b3d229fb293a8f5acbce9057ea63 100644 (file)
@@ -1268,7 +1268,7 @@ static void sta_ps_start(struct sta_info *sta)
        for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
                struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
 
-               if (!txqi->tin.backlog_packets)
+               if (txqi->tin.backlog_packets)
                        set_bit(tid, &sta->txq_buffered_tids);
                else
                        clear_bit(tid, &sta->txq_buffered_tids);
index 76b737dcc36f87fc73b014be8a1d07ce9bbd8714..aa58df80ede0210643714bc504ed3ee9f9791b02 100644 (file)
@@ -1616,7 +1616,6 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
 
                sta_info_recalc_tim(sta);
        } else {
-               unsigned long tids = sta->txq_buffered_tids & driver_release_tids;
                int tid;
 
                /*
@@ -1648,7 +1647,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
                for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
                        struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
 
-                       if (!(tids & BIT(tid)) || txqi->tin.backlog_packets)
+                       if (!(driver_release_tids & BIT(tid)) ||
+                           txqi->tin.backlog_packets)
                                continue;
 
                        sta_info_recalc_tim(sta);
index c6d5c724e0326e04921969c9f00d11fac777749d..a2a68269675de8d06236e4d97ab46a617715445b 100644 (file)
@@ -771,6 +771,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                        clear_sta_flag(sta, WLAN_STA_SP);
 
                acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
+
+               /* mesh Peer Service Period support */
+               if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
+                   ieee80211_is_data_qos(fc))
+                       ieee80211_mpsp_trigger_process(
+                               ieee80211_get_qos_ctl(hdr), sta, true, acked);
+
                if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
                        /*
                         * The STA is in power save mode, so assume
@@ -781,13 +788,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                        return;
                }
 
-               /* mesh Peer Service Period support */
-               if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
-                   ieee80211_is_data_qos(fc))
-                       ieee80211_mpsp_trigger_process(
-                                       ieee80211_get_qos_ctl(hdr),
-                                       sta, true, acked);
-
                if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
                    (ieee80211_is_data(hdr->frame_control)) &&
                    (rates_idx != -1))
index b5d28f14b9cff950e6896b7fb6df710d5a3b7c20..afca7d103684bbc8c953ae13bb0345934d8f5f35 100644 (file)
@@ -333,10 +333,11 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
        if (!uc.center_freq1)
                return;
 
-       /* proceed to downgrade the chandef until usable or the same */
+       /* proceed to downgrade the chandef until usable or the same as AP BW */
        while (uc.width > max_width ||
-              !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
-                                             sdata->wdev.iftype))
+              (uc.width > sta->tdls_chandef.width &&
+               !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
+                                              sdata->wdev.iftype)))
                ieee80211_chandef_downgrade(&uc);
 
        if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
index 91461c4155255eab59f570d85ee4104d23ef57a4..18b285e06bc89af3393fd35d30e4ed02e4e433cb 100644 (file)
@@ -368,7 +368,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
                skb = skb_dequeue(&ps->bc_buf);
                if (skb) {
                        purged++;
-                       dev_kfree_skb(skb);
+                       ieee80211_free_txskb(&local->hw, skb);
                }
                total += skb_queue_len(&ps->bc_buf);
        }
@@ -451,7 +451,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
        if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
                ps_dbg(tx->sdata,
                       "BC TX buffer full - dropping the oldest frame\n");
-               dev_kfree_skb(skb_dequeue(&ps->bc_buf));
+               ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
        } else
                tx->local->total_ps_buffered++;
 
@@ -796,6 +796,36 @@ static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
        return ret;
 }
 
+static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
+                                         struct ieee80211_vif *vif,
+                                         struct ieee80211_sta *pubsta,
+                                         struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_txq *txq = NULL;
+
+       if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
+           (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
+               return NULL;
+
+       if (!ieee80211_is_data(hdr->frame_control))
+               return NULL;
+
+       if (pubsta) {
+               u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+               txq = pubsta->txq[tid];
+       } else if (vif) {
+               txq = vif->txq;
+       }
+
+       if (!txq)
+               return NULL;
+
+       return to_txq_info(txq);
+}
+
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
 {
@@ -853,7 +883,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
        tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
        tx->sta->tx_stats.msdu[tid]++;
 
-       if (!tx->sta->sta.txq[0])
+       if (!ieee80211_get_txq(tx->local, info->control.vif, &tx->sta->sta,
+                              tx->skb))
                hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
 
        return TX_CONTINUE;
@@ -1243,36 +1274,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        return TX_CONTINUE;
 }
 
-static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
-                                         struct ieee80211_vif *vif,
-                                         struct ieee80211_sta *pubsta,
-                                         struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_txq *txq = NULL;
-
-       if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
-           (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
-               return NULL;
-
-       if (!ieee80211_is_data(hdr->frame_control))
-               return NULL;
-
-       if (pubsta) {
-               u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-
-               txq = pubsta->txq[tid];
-       } else if (vif) {
-               txq = vif->txq;
-       }
-
-       if (!txq)
-               return NULL;
-
-       return to_txq_info(txq);
-}
-
 static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
 {
        IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
@@ -1514,8 +1515,12 @@ out:
        spin_unlock_bh(&fq->lock);
 
        if (skb && skb_has_frag_list(skb) &&
-           !ieee80211_hw_check(&local->hw, TX_FRAG_LIST))
-               skb_linearize(skb);
+           !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
+               if (skb_linearize(skb)) {
+                       ieee80211_free_txskb(&local->hw, skb);
+                       return NULL;
+               }
+       }
 
        return skb;
 }
@@ -3264,7 +3269,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
 
        if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
                *ieee80211_get_qos_ctl(hdr) = tid;
-               if (!sta->sta.txq[0])
+               if (!ieee80211_get_txq(local, &sdata->vif, &sta->sta, skb))
                        hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
        } else {
                info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
@@ -4275,7 +4280,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
                        sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
                if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
                        break;
-               dev_kfree_skb_any(skb);
+               ieee80211_free_txskb(hw, skb);
        }
 
        info = IEEE80211_SKB_CB(skb);
index dd2c43abf9e28a4a25f35440bed5475b681ca4e5..9934b0c93c1e4513dc58cb2bf0154bfe89642a56 100644 (file)
@@ -1035,9 +1035,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        if (IS_ERR(ct))
                return (struct nf_conntrack_tuple_hash *)ct;
 
-       if (tmpl && nfct_synproxy(tmpl)) {
-               nfct_seqadj_ext_add(ct);
-               nfct_synproxy_ext_add(ct);
+       if (!nf_ct_add_synproxy(ct, tmpl)) {
+               nf_conntrack_free(ct);
+               return ERR_PTR(-ENOMEM);
        }
 
        timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
index 9e3693128313ffd57845fcabc28916f4b86515c2..f8dbacf66795d929a220d1acf35ea40298ec3be4 100644 (file)
@@ -574,7 +574,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
        helper = rcu_dereference(nfct_help(expect->master)->helper);
        if (helper) {
                seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
-               if (helper->expect_policy[expect->class].name)
+               if (helper->expect_policy[expect->class].name[0])
                        seq_printf(s, "/%s",
                                   helper->expect_policy[expect->class].name);
        }
index bb77a97961bfde7b64a74aabdeafe405f73090ec..5c0db5c64734a98d909e7e329ac7aecc54cfb431 100644 (file)
@@ -1473,7 +1473,8 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
                                 "timeout to %u seconds for",
                                 info->timeout);
                        nf_ct_dump_tuple(&exp->tuple);
-                       mod_timer(&exp->timeout, jiffies + info->timeout * HZ);
+                       mod_timer_pending(&exp->timeout,
+                                         jiffies + info->timeout * HZ);
                }
                spin_unlock_bh(&nf_conntrack_expect_lock);
        }
index 050bb3420a6baf2e75f1b6eac080fe0b92b7e6f6..fdfc71f416b7a2d084c3eb38d85affaf15776fd2 100644 (file)
@@ -1894,6 +1894,8 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
 
                        if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
                                return -EINVAL;
+                       if (otuple.dst.protonum != rtuple.dst.protonum)
+                               return -EINVAL;
 
                        ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
                                                        &rtuple, u3);
@@ -2362,12 +2364,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
                return PTR_ERR(exp);
 
        err = nf_ct_expect_related_report(exp, portid, report);
-       if (err < 0) {
-               nf_ct_expect_put(exp);
-               return err;
-       }
-
-       return 0;
+       nf_ct_expect_put(exp);
+       return err;
 }
 
 static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
index 8d9db9d4702b06965a896b3e85a3254aa2e3191b..7d77217de6a3b4e7821452f721c0902ca6fecd81 100644 (file)
@@ -1383,7 +1383,7 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
                return NF_DROP;
        }
        cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
-       if (!cseq) {
+       if (!cseq && *(*dptr + matchoff) != '0') {
                nf_ct_helper_log(skb, ct, "cannot get cseq");
                return NF_DROP;
        }
@@ -1446,7 +1446,7 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
                        return NF_DROP;
                }
                cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
-               if (!cseq) {
+               if (!cseq && *(*dptr + matchoff) != '0') {
                        nf_ct_helper_log(skb, ct, "cannot get cseq");
                        return NF_DROP;
                }
index 958a1455ca7f2c1718e38730479876ffa6477779..9f267c3ffb39856891d72f42da31cbfa44f5048d 100644 (file)
@@ -205,6 +205,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
        const struct nf_conntrack_l3proto *l3proto;
        const struct nf_conntrack_l4proto *l4proto;
+       struct net *net = seq_file_net(s);
        int ret = 0;
 
        NF_CT_ASSERT(ct);
@@ -215,6 +216,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
        if (NF_CT_DIRECTION(hash))
                goto release;
 
+       if (!net_eq(nf_ct_net(ct), net))
+               goto release;
+
        l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
        NF_CT_ASSERT(l3proto);
        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
index de31818417b88a6ebf04ee623d8f34c50bdc0f50..ecee105bbada7dca884d59bc4b1647f0f34f212d 100644 (file)
@@ -441,7 +441,8 @@ nf_nat_setup_info(struct nf_conn *ct,
                        ct->status |= IPS_DST_NAT;
 
                if (nfct_help(ct))
-                       nfct_seqadj_ext_add(ct);
+                       if (!nfct_seqadj_ext_add(ct))
+                               return NF_DROP;
        }
 
        if (maniptype == NF_NAT_MANIP_SRC) {
@@ -807,7 +808,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
        if (err < 0)
                return err;
 
-       return nf_nat_setup_info(ct, &range, manip);
+       return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
 }
 #else
 static int
index 5eefe4a355c640cf85c8328740a877c6909880ba..75d696f11045779bd08503bb49ec954abc63eeb4 100644 (file)
@@ -30,7 +30,6 @@ nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
        if (!iph)
                return;
 
-       iph = ip_hdr(skb);
        if (iph->ihl < 5 || iph->version != 4)
                return;
 
index 39eb1cc62e91fdda9608f96eb678f061f7b91a0c..fa24a5b398b1f889817b8873be1aa197bbf179d3 100644 (file)
@@ -237,7 +237,7 @@ void nft_trace_notify(struct nft_traceinfo *info)
                break;
        case NFT_TRACETYPE_POLICY:
                if (nla_put_be32(skb, NFTA_TRACE_POLICY,
-                                info->basechain->policy))
+                                htonl(info->basechain->policy)))
                        goto nla_put_failure;
                break;
        }
index 1b4de4bd695865c9bb9b2ca70b8b309d902ccc10..d44d89b561275e25bb31fe2b0ed198d13995533c 100644 (file)
@@ -326,14 +326,14 @@ static int nfnl_acct_try_del(struct nf_acct *cur)
 {
        int ret = 0;
 
-       /* we want to avoid races with nfnl_acct_find_get. */
-       if (atomic_dec_and_test(&cur->refcnt)) {
+       /* We want to avoid races with nfnl_acct_put. So only when the current
+        * refcnt is 1, we decrease it to 0.
+        */
+       if (atomic_cmpxchg(&cur->refcnt, 1, 0) == 1) {
                /* We are protected by nfnl mutex. */
                list_del_rcu(&cur->head);
                kfree_rcu(cur, rcu_head);
        } else {
-               /* still in use, restore reference counter. */
-               atomic_inc(&cur->refcnt);
                ret = -EBUSY;
        }
        return ret;
@@ -343,12 +343,12 @@ static int nfnl_acct_del(struct net *net, struct sock *nfnl,
                         struct sk_buff *skb, const struct nlmsghdr *nlh,
                         const struct nlattr * const tb[])
 {
-       char *acct_name;
-       struct nf_acct *cur;
+       struct nf_acct *cur, *tmp;
        int ret = -ENOENT;
+       char *acct_name;
 
        if (!tb[NFACCT_NAME]) {
-               list_for_each_entry(cur, &net->nfnl_acct_list, head)
+               list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head)
                        nfnl_acct_try_del(cur);
 
                return 0;
@@ -443,7 +443,7 @@ void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_update);
 
-static void nfnl_overquota_report(struct nf_acct *nfacct)
+static void nfnl_overquota_report(struct net *net, struct nf_acct *nfacct)
 {
        int ret;
        struct sk_buff *skb;
@@ -458,11 +458,12 @@ static void nfnl_overquota_report(struct nf_acct *nfacct)
                kfree_skb(skb);
                return;
        }
-       netlink_broadcast(init_net.nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
+       netlink_broadcast(net->nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
                          GFP_ATOMIC);
 }
 
-int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
+int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
+                       struct nf_acct *nfacct)
 {
        u64 now;
        u64 *quota;
@@ -480,7 +481,7 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
 
        if (now >= *quota &&
            !test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) {
-               nfnl_overquota_report(nfacct);
+               nfnl_overquota_report(net, nfacct);
        }
 
        return ret;
index 4cdcd969b64c8039ff07ba6d2d3e87a0e2c23564..139e0867e56e9e606942c98e75148eb17b2ec7eb 100644 (file)
@@ -98,31 +98,28 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
                break;
        }
 
-       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
-
-       /* This protocol is not supportted, skip. */
-       if (l4proto->l4proto != l4num) {
-               ret = -EOPNOTSUPP;
-               goto err_proto_put;
-       }
-
        if (matching) {
                if (nlh->nlmsg_flags & NLM_F_REPLACE) {
                        /* You cannot replace one timeout policy by another of
                         * different kind, sorry.
                         */
                        if (matching->l3num != l3num ||
-                           matching->l4proto->l4proto != l4num) {
-                               ret = -EINVAL;
-                               goto err_proto_put;
-                       }
-
-                       ret = ctnl_timeout_parse_policy(&matching->data,
-                                                       l4proto, net,
-                                                       cda[CTA_TIMEOUT_DATA]);
-                       return ret;
+                           matching->l4proto->l4proto != l4num)
+                               return -EINVAL;
+
+                       return ctnl_timeout_parse_policy(&matching->data,
+                                                        matching->l4proto, net,
+                                                        cda[CTA_TIMEOUT_DATA]);
                }
-               ret = -EBUSY;
+
+               return -EBUSY;
+       }
+
+       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+       /* This protocol is not supportted, skip. */
+       if (l4proto->l4proto != l4num) {
+               ret = -EOPNOTSUPP;
                goto err_proto_put;
        }
 
@@ -305,7 +302,16 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
        const struct hlist_nulls_node *nn;
        unsigned int last_hsize;
        spinlock_t *lock;
-       int i;
+       int i, cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+               spin_lock_bh(&pcpu->lock);
+               hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
+                       untimeout(h, timeout);
+               spin_unlock_bh(&pcpu->lock);
+       }
 
        local_bh_disable();
 restart:
@@ -330,16 +336,16 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
 {
        int ret = 0;
 
-       /* we want to avoid races with nf_ct_timeout_find_get. */
-       if (atomic_dec_and_test(&timeout->refcnt)) {
+       /* We want to avoid races with ctnl_timeout_put. So only when the
+        * current refcnt is 1, we decrease it to 0.
+        */
+       if (atomic_cmpxchg(&timeout->refcnt, 1, 0) == 1) {
                /* We are protected by nfnl mutex. */
                list_del_rcu(&timeout->head);
                nf_ct_l4proto_put(timeout->l4proto);
                ctnl_untimeout(net, timeout);
                kfree_rcu(timeout, rcu_head);
        } else {
-               /* still in use, restore reference counter. */
-               atomic_inc(&timeout->refcnt);
                ret = -EBUSY;
        }
        return ret;
@@ -350,12 +356,13 @@ static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
                                 const struct nlmsghdr *nlh,
                                 const struct nlattr * const cda[])
 {
-       struct ctnl_timeout *cur;
+       struct ctnl_timeout *cur, *tmp;
        int ret = -ENOENT;
        char *name;
 
        if (!cda[CTA_TIMEOUT_NAME]) {
-               list_for_each_entry(cur, &net->nfct_timeout_list, head)
+               list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list,
+                                        head)
                        ctnl_timeout_try_del(net, cur);
 
                return 0;
@@ -543,7 +550,9 @@ err:
 
 static void ctnl_timeout_put(struct ctnl_timeout *timeout)
 {
-       atomic_dec(&timeout->refcnt);
+       if (atomic_dec_and_test(&timeout->refcnt))
+               kfree_rcu(timeout, rcu_head);
+
        module_put(THIS_MODULE);
 }
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
@@ -591,7 +600,9 @@ static void __net_exit cttimeout_net_exit(struct net *net)
        list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) {
                list_del_rcu(&cur->head);
                nf_ct_l4proto_put(cur->l4proto);
-               kfree_rcu(cur, rcu_head);
+
+               if (atomic_dec_and_test(&cur->refcnt))
+                       kfree_rcu(cur, rcu_head);
        }
 }
 
index cbcfdfb586a6150d7b5e186356c1451b769a8ab2..6577db524ef672d7ba6aed4a39a0d7dc41baa226 100644 (file)
@@ -1147,6 +1147,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
 MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
 MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
 MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
+MODULE_ALIAS_NF_LOGGER(3, 1); /* NFPROTO_ARP */
 
 module_init(nfnetlink_log_init);
 module_exit(nfnetlink_log_fini);
index 5d36a0926b4a4859304fdd1808b428c87a2ee8c4..f49f45081acb2200cc8acaed205f82a3298888ba 100644 (file)
@@ -1145,10 +1145,8 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
        struct nfnl_queue_net *q = nfnl_queue_pernet(net);
        int err;
 
-       queue = instance_lookup(q, queue_num);
-       if (!queue)
-               queue = verdict_instance_lookup(q, queue_num,
-                                               NETLINK_CB(skb).portid);
+       queue = verdict_instance_lookup(q, queue_num,
+                                       NETLINK_CB(skb).portid);
        if (IS_ERR(queue))
                return PTR_ERR(queue);
 
index ba7aed13e1749442d3add6fde2e6b377fd1ad39d..82c264e402781d8b8c52d04332c1b993e5c83fed 100644 (file)
@@ -59,6 +59,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
                           const struct nlattr * const tb[])
 {
        struct nft_exthdr *priv = nft_expr_priv(expr);
+       u32 offset, len;
 
        if (tb[NFTA_EXTHDR_DREG] == NULL ||
            tb[NFTA_EXTHDR_TYPE] == NULL ||
@@ -66,9 +67,15 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
            tb[NFTA_EXTHDR_LEN] == NULL)
                return -EINVAL;
 
+       offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
+       len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+
+       if (offset > U8_MAX || len > U8_MAX)
+               return -ERANGE;
+
        priv->type   = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
-       priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
-       priv->len    = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+       priv->offset = offset;
+       priv->len    = len;
        priv->dreg   = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
 
        return nft_validate_register_store(ctx, priv->dreg, NULL,
index 2863f3493038d8b1fd92eebb9c6d846227e99bf6..8a6bc7630912dea058df1b3ed896daa535ad95f1 100644 (file)
@@ -291,10 +291,16 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
 }
 EXPORT_SYMBOL_GPL(nft_meta_get_init);
 
-static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nft_data **data)
 {
+       struct nft_meta *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (priv->key != NFT_META_PKTTYPE)
+               return 0;
+
        switch (ctx->afi->family) {
        case NFPROTO_BRIDGE:
                hooks = 1 << NF_BR_PRE_ROUTING;
@@ -308,6 +314,7 @@ static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
 
        return nft_chain_validate_hooks(ctx->chain, hooks);
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_validate);
 
 int nft_meta_set_init(const struct nft_ctx *ctx,
                      const struct nft_expr *expr,
@@ -327,15 +334,16 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
                len = sizeof(u8);
                break;
        case NFT_META_PKTTYPE:
-               err = nft_meta_set_init_pkttype(ctx);
-               if (err)
-                       return err;
                len = sizeof(u8);
                break;
        default:
                return -EOPNOTSUPP;
        }
 
+       err = nft_meta_set_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
+
        priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
        err = nft_validate_register_load(priv->sreg, len);
        if (err < 0)
@@ -407,6 +415,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
        .init           = nft_meta_set_init,
        .destroy        = nft_meta_set_destroy,
        .dump           = nft_meta_set_dump,
+       .validate       = nft_meta_set_validate,
 };
 
 static const struct nft_expr_ops *
index 6473936d05c67aa08b7c507bb2bfd8c836bdfcce..ffe9ae062d23e48fe39f9136e8e01d3737a852af 100644 (file)
@@ -70,7 +70,6 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
                } else if (d > 0)
                        parent = parent->rb_right;
                else {
-found:
                        if (!nft_set_elem_active(&rbe->ext, genmask)) {
                                parent = parent->rb_left;
                                continue;
@@ -84,9 +83,12 @@ found:
                }
        }
 
-       if (set->flags & NFT_SET_INTERVAL && interval != NULL) {
-               rbe = interval;
-               goto found;
+       if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+           nft_set_elem_active(&interval->ext, genmask) &&
+           !nft_rbtree_interval_end(interval)) {
+               spin_unlock_bh(&nft_rbtree_lock);
+               *ext = &interval->ext;
+               return true;
        }
 out:
        spin_unlock_bh(&nft_rbtree_lock);
index 0522fc9bfb0a88db513c480f45dff35a6f233000..c64de3f7379df551fa413a4af186f3c16886f112 100644 (file)
@@ -26,11 +26,27 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_reject_policy);
 
+int nft_reject_validate(const struct nft_ctx *ctx,
+                       const struct nft_expr *expr,
+                       const struct nft_data **data)
+{
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_LOCAL_IN) |
+                                       (1 << NF_INET_FORWARD) |
+                                       (1 << NF_INET_LOCAL_OUT));
+}
+EXPORT_SYMBOL_GPL(nft_reject_validate);
+
 int nft_reject_init(const struct nft_ctx *ctx,
                    const struct nft_expr *expr,
                    const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
+       int err;
+
+       err = nft_reject_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
index 759ca5248a3d22c20ce19dad1b7a7649a4ec0d6f..e79d9ca2ffee0002e734ee27880c512da3066900 100644 (file)
@@ -66,7 +66,11 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
                                const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
-       int icmp_code;
+       int icmp_code, err;
+
+       err = nft_reject_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
@@ -124,6 +128,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
        .eval           = nft_reject_inet_eval,
        .init           = nft_reject_inet_init,
        .dump           = nft_reject_inet_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_inet_type __read_mostly = {
index 7f4414d26a6622a46a6479049aadc557d128dbf3..663c4c3c907284254a09e692b84f2c60567545de 100644 (file)
@@ -127,6 +127,8 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
                                                    daddr, dport,
                                                    in->ifindex);
 
+                       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+                               sk = NULL;
                        /* NOTE: we return listeners even if bound to
                         * 0.0.0.0, those are filtered out in
                         * xt_socket, since xt_TPROXY needs 0 bound
@@ -195,6 +197,8 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
                                                   daddr, ntohs(dport),
                                                   in->ifindex);
 
+                       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+                               sk = NULL;
                        /* NOTE: we return listeners even if bound to
                         * 0.0.0.0, those are filtered out in
                         * xt_socket, since xt_TPROXY needs 0 bound
index 3048a7e3a90a5a27887b7e4ff731d00098f2c928..cf327593852a2b75cf3d5eb9e9e8d07af27b17a8 100644 (file)
@@ -26,7 +26,7 @@ static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
        nfnl_acct_update(skb, info->nfacct);
 
-       overquota = nfnl_acct_overquota(skb, info->nfacct);
+       overquota = nfnl_acct_overquota(par->net, skb, info->nfacct);
 
        return overquota == NFACCT_UNDERQUOTA ? false : true;
 }
index c644c78ed485d7c690ef8284df4276c6d18703c7..e054a748ff2502f2d1a63b61056c64a6aa24059e 100644 (file)
@@ -433,7 +433,6 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
        struct nf_conntrack_l4proto *l4proto;
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
-       enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        unsigned int dataoff;
        u8 protonum;
@@ -458,13 +457,8 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
 
        ct = nf_ct_tuplehash_to_ctrack(h);
 
-       ctinfo = ovs_ct_get_info(h);
-       if (ctinfo == IP_CT_NEW) {
-               /* This should not happen. */
-               WARN_ONCE(1, "ovs_ct_find_existing: new packet for %p\n", ct);
-       }
        skb->nfct = &ct->ct_general;
-       skb->nfctinfo = ctinfo;
+       skb->nfctinfo = ovs_ct_get_info(h);
        return ct;
 }
 
index 1a1fcec8869593a8c99710e2021baa45ebfdedc8..5aaf3babfc3fa0bf70b8a72ebd95c40962dd6ea2 100644 (file)
@@ -93,7 +93,14 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms)
                return ERR_CAST(dev);
        }
 
-       dev_change_flags(dev, dev->flags | IFF_UP);
+       err = dev_change_flags(dev, dev->flags | IFF_UP);
+       if (err < 0) {
+               rtnl_delete_link(dev);
+               rtnl_unlock();
+               ovs_vport_free(vport);
+               goto error;
+       }
+
        rtnl_unlock();
        return vport;
 error:
index 7f8897f33a67fe6512436aff86c43098c15e3445..0e72d95b0e8f1fcd2e6d1446fc0944b519c0500e 100644 (file)
@@ -54,6 +54,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
        struct net *net = ovs_dp_get_net(parms->dp);
        struct net_device *dev;
        struct vport *vport;
+       int err;
 
        vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
        if (IS_ERR(vport))
@@ -67,9 +68,15 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
                return ERR_CAST(dev);
        }
 
-       dev_change_flags(dev, dev->flags | IFF_UP);
-       rtnl_unlock();
+       err = dev_change_flags(dev, dev->flags | IFF_UP);
+       if (err < 0) {
+               rtnl_delete_link(dev);
+               rtnl_unlock();
+               ovs_vport_free(vport);
+               return ERR_PTR(err);
+       }
 
+       rtnl_unlock();
        return vport;
 }
 
index 434e04c3a189b91512b30ab70e9a238b7d5d9df2..95c36147a6e136b1e11db6bbbb673c4b7f248e1a 100644 (file)
@@ -140,7 +140,7 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 
 static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
 {
-       dev->needed_headroom = new_hr;
+       dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
 }
 
 static const struct net_device_ops internal_dev_netdev_ops = {
index 5eb7694348b5b82a3e80dc6262912eef441ec88e..7eb955e453e6d657d13d0aa7b35ce7c8b7de2f15 100644 (file)
@@ -130,7 +130,14 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
                return ERR_CAST(dev);
        }
 
-       dev_change_flags(dev, dev->flags | IFF_UP);
+       err = dev_change_flags(dev, dev->flags | IFF_UP);
+       if (err < 0) {
+               rtnl_delete_link(dev);
+               rtnl_unlock();
+               ovs_vport_free(vport);
+               goto error;
+       }
+
        rtnl_unlock();
        return vport;
 error:
index 1bb9e7ac9e14280a8a83dbf002f0bded91a3c5d6..ff83fb1ddd47fead1fc0f7141242991c3a1a1d1c 100644 (file)
@@ -425,6 +425,7 @@ struct rxrpc_call {
        spinlock_t              lock;
        rwlock_t                state_lock;     /* lock for state transition */
        atomic_t                usage;
+       atomic_t                skb_count;      /* Outstanding packets on this call */
        atomic_t                sequence;       /* Tx data packet sequence counter */
        u32                     local_abort;    /* local abort code */
        u32                     remote_abort;   /* remote abort code */
index 0b2832141bd079797deb27580dc3cbf59611b3fd..9bae21e66d6547580a9d575a3569a9303d3cc7ea 100644 (file)
@@ -130,6 +130,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
                        call->state = RXRPC_CALL_SERVER_ACCEPTING;
                        list_add_tail(&call->accept_link, &rx->acceptq);
                        rxrpc_get_call(call);
+                       atomic_inc(&call->skb_count);
                        nsp = rxrpc_skb(notification);
                        nsp->call = call;
 
index fc32aa5764a24268ecf09526f34969f5dc1e0845..e60cf65c223237fd1a9bbf8bd6e9d72a16742b32 100644 (file)
@@ -460,6 +460,7 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
        ASSERTCMP(sp->call, ==, NULL);
        sp->call = call;
        rxrpc_get_call(call);
+       atomic_inc(&call->skb_count);
 
        /* insert into the buffer in sequence order */
        spin_lock_bh(&call->lock);
@@ -734,6 +735,7 @@ all_acked:
                skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
                sp->call = call;
                rxrpc_get_call(call);
+               atomic_inc(&call->skb_count);
                spin_lock_bh(&call->lock);
                if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
                        BUG();
@@ -793,6 +795,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
                sp->error = error;
                sp->call = call;
                rxrpc_get_call(call);
+               atomic_inc(&call->skb_count);
 
                spin_lock_bh(&call->lock);
                ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
@@ -834,6 +837,9 @@ void rxrpc_process_call(struct work_struct *work)
                return;
        }
 
+       if (!call->conn)
+               goto skip_msg_init;
+
        /* there's a good chance we're going to have to send a message, so set
         * one up in advance */
        msg.msg_name    = &call->conn->params.peer->srx.transport;
@@ -856,6 +862,7 @@ void rxrpc_process_call(struct work_struct *work)
        memset(iov, 0, sizeof(iov));
        iov[0].iov_base = &whdr;
        iov[0].iov_len  = sizeof(whdr);
+skip_msg_init:
 
        /* deal with events of a final nature */
        if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
index 91287c9d01bb460c56eda06d3d4e7ceea074f14e..ae057e0740f3de43260d6f482818925ae7992cc8 100644 (file)
@@ -275,6 +275,7 @@ error:
        list_del_init(&call->link);
        write_unlock_bh(&rxrpc_call_lock);
 
+       set_bit(RXRPC_CALL_RELEASED, &call->flags);
        call->state = RXRPC_CALL_DEAD;
        rxrpc_put_call(call);
        _leave(" = %d", ret);
@@ -287,6 +288,7 @@ error:
         */
 found_user_ID_now_present:
        write_unlock(&rx->call_lock);
+       set_bit(RXRPC_CALL_RELEASED, &call->flags);
        call->state = RXRPC_CALL_DEAD;
        rxrpc_put_call(call);
        _leave(" = -EEXIST [%p]", call);
@@ -491,15 +493,9 @@ void rxrpc_release_call(struct rxrpc_call *call)
                spin_lock_bh(&call->lock);
                while ((skb = skb_dequeue(&call->rx_queue)) ||
                       (skb = skb_dequeue(&call->rx_oos_queue))) {
-                       sp = rxrpc_skb(skb);
-                       if (sp->call) {
-                               ASSERTCMP(sp->call, ==, call);
-                               rxrpc_put_call(call);
-                               sp->call = NULL;
-                       }
-                       skb->destructor = NULL;
                        spin_unlock_bh(&call->lock);
 
+                       sp = rxrpc_skb(skb);
                        _debug("- zap %s %%%u #%u",
                               rxrpc_pkts[sp->hdr.type],
                               sp->hdr.serial, sp->hdr.seq);
@@ -605,6 +601,7 @@ void __rxrpc_put_call(struct rxrpc_call *call)
 
        if (atomic_dec_and_test(&call->usage)) {
                _debug("call %d dead", call->debug_id);
+               WARN_ON(atomic_read(&call->skb_count) != 0);
                ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
                rxrpc_queue_work(&call->destroyer);
        }
index 991a20d250930307cd0fb94ef4f474e8457264bd..70bb77818deab9393b79a6499dcf0f42ecbd29e6 100644 (file)
@@ -55,9 +55,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
        if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
                _debug("already terminated");
                ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
-               skb->destructor = NULL;
-               sp->call = NULL;
-               rxrpc_put_call(call);
                rxrpc_free_skb(skb);
                return 0;
        }
@@ -111,13 +108,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
        ret = 0;
 
 out:
-       /* release the socket buffer */
-       if (skb) {
-               skb->destructor = NULL;
-               sp->call = NULL;
-               rxrpc_put_call(call);
-               rxrpc_free_skb(skb);
-       }
+       rxrpc_free_skb(skb);
 
        _leave(" = %d", ret);
        return ret;
@@ -133,11 +124,15 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
        struct rxrpc_skb_priv *sp;
        bool terminal;
        int ret, ackbit, ack;
+       u32 serial;
+       u8 flags;
 
        _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
 
        sp = rxrpc_skb(skb);
        ASSERTCMP(sp->call, ==, NULL);
+       flags = sp->hdr.flags;
+       serial = sp->hdr.serial;
 
        spin_lock(&call->lock);
 
@@ -200,8 +195,9 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
 
        sp->call = call;
        rxrpc_get_call(call);
-       terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
-                   !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
+       atomic_inc(&call->skb_count);
+       terminal = ((flags & RXRPC_LAST_PACKET) &&
+                   !(flags & RXRPC_CLIENT_INITIATED));
        ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
        if (ret < 0) {
                if (ret == -ENOMEM || ret == -ENOBUFS) {
@@ -213,12 +209,13 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
        }
 
        skb = NULL;
+       sp = NULL;
 
        _debug("post #%u", seq);
        ASSERTCMP(call->rx_data_post, ==, seq);
        call->rx_data_post++;
 
-       if (sp->hdr.flags & RXRPC_LAST_PACKET)
+       if (flags & RXRPC_LAST_PACKET)
                set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
 
        /* if we've reached an out of sequence packet then we need to drain
@@ -234,7 +231,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
 
        spin_unlock(&call->lock);
        atomic_inc(&call->ackr_not_idle);
-       rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
+       rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false);
        _leave(" = 0 [posted]");
        return 0;
 
@@ -247,7 +244,7 @@ out:
 
 discard_and_ack:
        _debug("discard and ACK packet %p", skb);
-       __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
+       __rxrpc_propose_ACK(call, ack, serial, true);
 discard:
        spin_unlock(&call->lock);
        rxrpc_free_skb(skb);
@@ -255,7 +252,7 @@ discard:
        return 0;
 
 enqueue_and_ack:
-       __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
+       __rxrpc_propose_ACK(call, ack, serial, true);
 enqueue_packet:
        _net("defer skb %p", skb);
        spin_unlock(&call->lock);
@@ -575,13 +572,13 @@ done:
  * post connection-level events to the connection
  * - this includes challenges, responses and some aborts
  */
-static bool rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
+static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
                                      struct sk_buff *skb)
 {
        _enter("%p,%p", conn, skb);
 
        skb_queue_tail(&conn->rx_queue, skb);
-       return rxrpc_queue_conn(conn);
+       rxrpc_queue_conn(conn);
 }
 
 /*
@@ -702,7 +699,6 @@ void rxrpc_data_ready(struct sock *sk)
 
        rcu_read_lock();
 
-retry_find_conn:
        conn = rxrpc_find_connection_rcu(local, skb);
        if (!conn)
                goto cant_route_call;
@@ -710,8 +706,7 @@ retry_find_conn:
        if (sp->hdr.callNumber == 0) {
                /* Connection-level packet */
                _debug("CONN %p {%d}", conn, conn->debug_id);
-               if (!rxrpc_post_packet_to_conn(conn, skb))
-                       goto retry_find_conn;
+               rxrpc_post_packet_to_conn(conn, skb);
        } else {
                /* Call-bound packets are routed by connection channel. */
                unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -749,6 +744,8 @@ cant_route_call:
        if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
                _debug("reject type %d",sp->hdr.type);
                rxrpc_reject_packet(local, skb);
+       } else {
+               rxrpc_free_skb(skb);
        }
        _leave(" [no call]");
        return;
index a3fa2ed85d6306af3e126d25ed6597f15e716e87..9ed66d533002df33c8f0167a6eb43d94d8d4132c 100644 (file)
@@ -203,6 +203,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                }
 
                /* we transferred the whole data packet */
+               if (!(flags & MSG_PEEK))
+                       rxrpc_kernel_data_consumed(call, skb);
+
                if (sp->hdr.flags & RXRPC_LAST_PACKET) {
                        _debug("last");
                        if (rxrpc_conn_is_client(call->conn)) {
@@ -359,28 +362,6 @@ wait_error:
 
 }
 
-/**
- * rxrpc_kernel_data_delivered - Record delivery of data message
- * @skb: Message holding data
- *
- * Record the delivery of a data message.  This permits RxRPC to keep its
- * tracking correct.  The socket buffer will be deleted.
- */
-void rxrpc_kernel_data_delivered(struct sk_buff *skb)
-{
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       struct rxrpc_call *call = sp->call;
-
-       ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
-       ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
-       call->rx_data_recv = sp->hdr.seq;
-
-       ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
-       rxrpc_free_skb(skb);
-}
-
-EXPORT_SYMBOL(rxrpc_kernel_data_delivered);
-
 /**
  * rxrpc_kernel_is_data_last - Determine if data message is last one
  * @skb: Message holding data
index eee0cfd9ac8c0d8c2a7cb4e52a213df0df2f6082..06c51d4b622d67ae77d54813a668e4f722a4c8a9 100644 (file)
@@ -98,11 +98,39 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
        spin_unlock_bh(&call->lock);
 }
 
+/**
+ * rxrpc_kernel_data_consumed - Record consumption of data message
+ * @call: The call to which the message pertains.
+ * @skb: Message holding data
+ *
+ * Record the consumption of a data message and generate an ACK if appropriate.
+ * The call state is shifted if this was the final packet.  The caller must be
+ * in process context with no spinlocks held.
+ *
+ * TODO: Actually generate the ACK here rather than punting this to the
+ * workqueue.
+ */
+void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq);
+
+       ASSERTCMP(sp->call, ==, call);
+       ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA);
+
+       /* TODO: Fix the sequence number tracking */
+       ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
+       ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
+       ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
+
+       call->rx_data_recv = sp->hdr.seq;
+       rxrpc_hard_ACK_data(call, sp);
+}
+EXPORT_SYMBOL(rxrpc_kernel_data_consumed);
+
 /*
- * destroy a packet that has an RxRPC control buffer
- * - advance the hard-ACK state of the parent call (done here in case something
- *   in the kernel bypasses recvmsg() and steals the packet directly off of the
- *   socket receive queue)
+ * Destroy a packet that has an RxRPC control buffer
  */
 void rxrpc_packet_destructor(struct sk_buff *skb)
 {
@@ -112,9 +140,8 @@ void rxrpc_packet_destructor(struct sk_buff *skb)
        _enter("%p{%p}", skb, call);
 
        if (call) {
-               /* send the final ACK on a client call */
-               if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
-                       rxrpc_hard_ACK_data(call, sp);
+               if (atomic_dec_return(&call->skb_count) < 0)
+                       BUG();
                rxrpc_put_call(call);
                sp->call = NULL;
        }
index e4a5f2607ffa2edb6c9c3206c96b578946edfa57..d09d0687594b0b5ae04ad54a2b937044522bfbea 100644 (file)
@@ -64,7 +64,6 @@ int __tcf_hash_release(struct tc_action *p, bool bind, bool strict)
                if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
                        if (p->ops->cleanup)
                                p->ops->cleanup(p, bind);
-                       list_del(&p->list);
                        tcf_hash_destroy(p->hinfo, p);
                        ret = ACT_P_DELETED;
                }
@@ -421,18 +420,19 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
        return res;
 }
 
-int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
-                   struct tcf_result *res)
+int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
+                   int nr_actions, struct tcf_result *res)
 {
-       const struct tc_action *a;
-       int ret = -1;
+       int ret = -1, i;
 
        if (skb->tc_verd & TC_NCLS) {
                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
                ret = TC_ACT_OK;
                goto exec_done;
        }
-       list_for_each_entry(a, actions, list) {
+       for (i = 0; i < nr_actions; i++) {
+               const struct tc_action *a = actions[i];
+
 repeat:
                ret = a->ops->act(skb, a, res);
                if (ret == TC_ACT_REPEAT)
@@ -754,16 +754,6 @@ err_out:
        return ERR_PTR(err);
 }
 
-static void cleanup_a(struct list_head *actions)
-{
-       struct tc_action *a, *tmp;
-
-       list_for_each_entry_safe(a, tmp, actions, list) {
-               list_del(&a->list);
-               kfree(a);
-       }
-}
-
 static int tca_action_flush(struct net *net, struct nlattr *nla,
                            struct nlmsghdr *n, u32 portid)
 {
@@ -905,7 +895,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
                return ret;
        }
 err:
-       cleanup_a(&actions);
+       tcf_action_destroy(&actions, 0);
        return ret;
 }
 
@@ -942,15 +932,9 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
 
        ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
        if (ret)
-               goto done;
+               return ret;
 
-       /* dump then free all the actions after update; inserted policy
-        * stays intact
-        */
-       ret = tcf_add_notify(net, n, &actions, portid);
-       cleanup_a(&actions);
-done:
-       return ret;
+       return tcf_add_notify(net, n, &actions, portid);
 }
 
 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
index 141a06eeb1e502ec8273de1897302cef11c07e0c..e87cd81315e1340869d81a6535230ceb2648521a 100644 (file)
@@ -53,7 +53,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
        u32 *tlv = (u32 *)(skbdata);
        u16 totlen = nla_total_size(dlen);      /*alignment + hdr */
        char *dptr = (char *)tlv + NLA_HDRLEN;
-       u32 htlv = attrtype << 16 | totlen;
+       u32 htlv = attrtype << 16 | dlen;
 
        *tlv = htonl(htlv);
        memset(dptr, 0, totlen - NLA_HDRLEN);
@@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(ife_release_meta_gen);
 
 int ife_validate_meta_u32(void *val, int len)
 {
-       if (len == 4)
+       if (len == sizeof(u32))
                return 0;
 
        return -EINVAL;
@@ -144,8 +144,8 @@ EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
 
 int ife_validate_meta_u16(void *val, int len)
 {
-       /* length will include padding */
-       if (len == NLA_ALIGN(2))
+       /* length will not include padding */
+       if (len == sizeof(u16))
                return 0;
 
        return -EINVAL;
@@ -652,12 +652,14 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
                u8 *tlvdata = (u8 *)tlv;
                u16 mtype = tlv->type;
                u16 mlen = tlv->len;
+               u16 alen;
 
                mtype = ntohs(mtype);
                mlen = ntohs(mlen);
+               alen = NLA_ALIGN(mlen);
 
-               if (find_decode_metaid(skb, ife, mtype, (mlen - 4),
-                                      (void *)(tlvdata + 4))) {
+               if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN),
+                                      (void *)(tlvdata + NLA_HDRLEN))) {
                        /* abuse overlimits to count when we receive metadata
                         * but dont have an ops for it
                         */
@@ -666,8 +668,8 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
                        ife->tcf_qstats.overlimits++;
                }
 
-               tlvdata += mlen;
-               ifehdrln -= mlen;
+               tlvdata += alen;
+               ifehdrln -= alen;
                tlv = (struct meta_tlvhdr *)tlvdata;
        }
 
index b3c7e975fc9e255a412d704c6c08dd6d6df0d1ea..8a3be1d99775b0589bcb09f97e7bf347a4017868 100644 (file)
@@ -63,49 +63,8 @@ static int tcf_act_police_walker(struct net *net, struct sk_buff *skb,
                                 const struct tc_action_ops *ops)
 {
        struct tc_action_net *tn = net_generic(net, police_net_id);
-       struct tcf_hashinfo *hinfo = tn->hinfo;
-       int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
-       struct nlattr *nest;
-
-       spin_lock_bh(&hinfo->lock);
-
-       s_i = cb->args[0];
-
-       for (i = 0; i < (POL_TAB_MASK + 1); i++) {
-               struct hlist_head *head;
-               struct tc_action *p;
-
-               head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)];
-
-               hlist_for_each_entry_rcu(p, head, tcfa_head) {
-                       index++;
-                       if (index < s_i)
-                               continue;
-                       nest = nla_nest_start(skb, index);
-                       if (nest == NULL)
-                               goto nla_put_failure;
-                       if (type == RTM_DELACTION)
-                               err = tcf_action_dump_1(skb, p, 0, 1);
-                       else
-                               err = tcf_action_dump_1(skb, p, 0, 0);
-                       if (err < 0) {
-                               index--;
-                               nla_nest_cancel(skb, nest);
-                               goto done;
-                       }
-                       nla_nest_end(skb, nest);
-                       n_i++;
-               }
-       }
-done:
-       spin_unlock_bh(&hinfo->lock);
-       if (n_i)
-               cb->args[0] += n_i;
-       return n_i;
 
-nla_put_failure:
-       nla_nest_cancel(skb, nest);
-       goto done;
+       return tcf_generic_walker(tn, skb, cb, type, ops);
 }
 
 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
@@ -125,6 +84,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
        struct tcf_police *police;
        struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
        struct tc_action_net *tn = net_generic(net, police_net_id);
+       bool exists = false;
        int size;
 
        if (nla == NULL)
@@ -139,24 +99,24 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
        size = nla_len(tb[TCA_POLICE_TBF]);
        if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
                return -EINVAL;
+
        parm = nla_data(tb[TCA_POLICE_TBF]);
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
 
-       if (parm->index) {
-               if (tcf_hash_check(tn, parm->index, a, bind)) {
-                       if (ovr)
-                               goto override;
-                       /* not replacing */
-                       return -EEXIST;
-               }
-       } else {
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, NULL, a,
                                      &act_police_ops, bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
+       } else {
+               tcf_hash_release(*a, bind);
+               if (!ovr)
+                       return -EEXIST;
        }
 
-override:
        police = to_police(*a);
        if (parm->rate.rate) {
                err = -ENOMEM;
index 843a716a4303e71cb39c7ad8d95106a6d5c5e872..a7c5645373afb02a90d2ad76595e6dfcf14fe6b7 100644 (file)
@@ -541,8 +541,12 @@ out:
 void tcf_exts_destroy(struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND);
-       INIT_LIST_HEAD(&exts->actions);
+       LIST_HEAD(actions);
+
+       tcf_exts_to_list(exts, &actions);
+       tcf_action_destroy(&actions, TCA_ACT_UNBIND);
+       kfree(exts->actions);
+       exts->nr_actions = 0;
 #endif
 }
 EXPORT_SYMBOL(tcf_exts_destroy);
@@ -554,7 +558,6 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
        {
                struct tc_action *act;
 
-               INIT_LIST_HEAD(&exts->actions);
                if (exts->police && tb[exts->police]) {
                        act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
                                                "police", ovr,
@@ -563,14 +566,20 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                                return PTR_ERR(act);
 
                        act->type = exts->type = TCA_OLD_COMPAT;
-                       list_add(&act->list, &exts->actions);
+                       exts->actions[0] = act;
+                       exts->nr_actions = 1;
                } else if (exts->action && tb[exts->action]) {
-                       int err;
+                       LIST_HEAD(actions);
+                       int err, i = 0;
+
                        err = tcf_action_init(net, tb[exts->action], rate_tlv,
                                              NULL, ovr,
-                                             TCA_ACT_BIND, &exts->actions);
+                                             TCA_ACT_BIND, &actions);
                        if (err)
                                return err;
+                       list_for_each_entry(act, &actions, list)
+                               exts->actions[i++] = act;
+                       exts->nr_actions = i;
                }
        }
 #else
@@ -587,37 +596,49 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
                     struct tcf_exts *src)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       LIST_HEAD(tmp);
+       struct tcf_exts old = *dst;
+
        tcf_tree_lock(tp);
-       list_splice_init(&dst->actions, &tmp);
-       list_splice(&src->actions, &dst->actions);
+       dst->nr_actions = src->nr_actions;
+       dst->actions = src->actions;
        dst->type = src->type;
        tcf_tree_unlock(tp);
-       tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
+
+       tcf_exts_destroy(&old);
 #endif
 }
 EXPORT_SYMBOL(tcf_exts_change);
 
-#define tcf_exts_first_act(ext)                                        \
-       list_first_entry_or_null(&(exts)->actions,              \
-                                struct tc_action, list)
+#ifdef CONFIG_NET_CLS_ACT
+static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
+{
+       if (exts->nr_actions == 0)
+               return NULL;
+       else
+               return exts->actions[0];
+}
+#endif
 
 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
        struct nlattr *nest;
 
-       if (exts->action && !list_empty(&exts->actions)) {
+       if (exts->action && exts->nr_actions) {
                /*
                 * again for backward compatible mode - we want
                 * to work with both old and new modes of entering
                 * tc data even if iproute2  was newer - jhs
                 */
                if (exts->type != TCA_OLD_COMPAT) {
+                       LIST_HEAD(actions);
+
                        nest = nla_nest_start(skb, exts->action);
                        if (nest == NULL)
                                goto nla_put_failure;
-                       if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0)
+
+                       tcf_exts_to_list(exts, &actions);
+                       if (tcf_action_dump(skb, &actions, 0, 0) < 0)
                                goto nla_put_failure;
                        nla_nest_end(skb, nest);
                } else if (exts->police) {
index e95b67cd571844798de98dc4a97743ed89e0d37b..657c13362b19ca3726ab972a8d51b22ccd846c18 100644 (file)
@@ -643,18 +643,19 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
        struct Qdisc *sch;
 
        if (!try_module_get(ops->owner))
-               goto errout;
+               return NULL;
 
        sch = qdisc_alloc(dev_queue, ops);
-       if (IS_ERR(sch))
-               goto errout;
+       if (IS_ERR(sch)) {
+               module_put(ops->owner);
+               return NULL;
+       }
        sch->parent = parentid;
 
        if (!ops->init || ops->init(sch, NULL) == 0)
                return sch;
 
        qdisc_destroy(sch);
-errout:
        return NULL;
 }
 EXPORT_SYMBOL(qdisc_create_dflt);
index c182db7d691ff44a52923fb36c9170e49c141c04..1555fb8c68e0ed753e67d9c0c857a7d62be71743 100644 (file)
@@ -119,7 +119,13 @@ int sctp_rcv(struct sk_buff *skb)
                       skb_transport_offset(skb))
                goto discard_it;
 
-       if (!pskb_may_pull(skb, sizeof(struct sctphdr)))
+       /* If the packet is fragmented and we need to do crc checking,
+        * it's better to just linearize it otherwise crc computing
+        * takes longer.
+        */
+       if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
+            skb_linearize(skb)) ||
+           !pskb_may_pull(skb, sizeof(struct sctphdr)))
                goto discard_it;
 
        /* Pull up the IP header. */
@@ -790,27 +796,34 @@ struct sctp_hash_cmp_arg {
 static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
                                const void *ptr)
 {
+       struct sctp_transport *t = (struct sctp_transport *)ptr;
        const struct sctp_hash_cmp_arg *x = arg->key;
-       const struct sctp_transport *t = ptr;
-       struct sctp_association *asoc = t->asoc;
-       const struct net *net = x->net;
+       struct sctp_association *asoc;
+       int err = 1;
 
        if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
-               return 1;
-       if (!net_eq(sock_net(asoc->base.sk), net))
-               return 1;
+               return err;
+       if (!sctp_transport_hold(t))
+               return err;
+
+       asoc = t->asoc;
+       if (!net_eq(sock_net(asoc->base.sk), x->net))
+               goto out;
        if (x->ep) {
                if (x->ep != asoc->ep)
-                       return 1;
+                       goto out;
        } else {
                if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port))
-                       return 1;
+                       goto out;
                if (!sctp_bind_addr_match(&asoc->base.bind_addr,
                                          x->laddr, sctp_sk(asoc->base.sk)))
-                       return 1;
+                       goto out;
        }
 
-       return 0;
+       err = 0;
+out:
+       sctp_transport_put(t);
+       return err;
 }
 
 static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
@@ -1177,9 +1190,6 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
        if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP)
                return NULL;
 
-       if (skb_linearize(skb))
-               return NULL;
-
        ch = (sctp_chunkhdr_t *) skb->data;
 
        /* The code below will attempt to walk the chunk and extract
index c30ddb0f31907f57c5ce85b00dbe04260ca1cb2e..6437aa97cfd79f14c633499c2b131389204c435b 100644 (file)
@@ -170,19 +170,6 @@ next_chunk:
 
                chunk = list_entry(entry, struct sctp_chunk, list);
 
-               /* Linearize if it's not GSO */
-               if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
-                   skb_is_nonlinear(chunk->skb)) {
-                       if (skb_linearize(chunk->skb)) {
-                               __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
-                               sctp_chunk_free(chunk);
-                               goto next_chunk;
-                       }
-
-                       /* Update sctp_hdr as it probably changed */
-                       chunk->sctp_hdr = sctp_hdr(chunk->skb);
-               }
-
                if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
                        /* GSO-marked skbs but without frags, handle
                         * them normally
index 1f1682b9a6a82cc18d2571cf8287cfd194c7840d..31b7bc35895d578d37fee79a8a56a074f43932fe 100644 (file)
@@ -878,7 +878,7 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
                                        struct sctp_chunk *chunk,
                                        u16 chunk_len)
 {
-       size_t psize, pmtu;
+       size_t psize, pmtu, maxsize;
        sctp_xmit_t retval = SCTP_XMIT_OK;
 
        psize = packet->size;
@@ -906,6 +906,17 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
                        goto out;
                }
 
+               /* Similarly, if this chunk was built before a PMTU
+                * reduction, we have to fragment it at IP level now. So
+                * if the packet already contains something, we need to
+                * flush.
+                */
+               maxsize = pmtu - packet->overhead;
+               if (packet->auth)
+                       maxsize -= WORD_ROUND(packet->auth->skb->len);
+               if (chunk_len > maxsize)
+                       retval = SCTP_XMIT_PMTU_FULL;
+
                /* It is also okay to fragment if the chunk we are
                 * adding is a control chunk, but only if current packet
                 * is not a GSO one otherwise it causes fragmentation of
index 4cb5aedfe3ee2cf188385ef1f18cd63908a558ba..ef8ba77a5beace906ac3e83b3c0824a2134e6a63 100644 (file)
@@ -293,6 +293,7 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
                return ERR_PTR(err);
        }
 
+       iter->start_fail = 0;
        return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
 }
 
index f69edcf219e514d864c4af2d57eb0429e8f4938a..f3508aa758154c7214a83c42f19821556cbc5fb4 100644 (file)
@@ -13,6 +13,7 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
 {
        union sctp_addr laddr, paddr;
        struct dst_entry *dst;
+       struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
 
        laddr = list_entry(asoc->base.bind_addr.address_list.next,
                           struct sctp_sockaddr_entry, list)->a;
@@ -40,10 +41,15 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
        }
 
        r->idiag_state = asoc->state;
-       r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
-       r->idiag_retrans = asoc->rtx_data_chunks;
-       r->idiag_expires = jiffies_to_msecs(
-               asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies);
+       if (timer_pending(t3_rtx)) {
+               r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
+               r->idiag_retrans = asoc->rtx_data_chunks;
+               r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
+       } else {
+               r->idiag_timer = 0;
+               r->idiag_retrans = 0;
+               r->idiag_expires = 0;
+       }
 }
 
 static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
@@ -350,7 +356,7 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
        if (cb->args[4] < cb->args[1])
                goto next;
 
-       if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
+       if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
                goto next;
 
        if (r->sdiag_family != AF_UNSPEC &&
@@ -418,11 +424,13 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
                paddr.v4.sin_family = AF_INET;
        } else {
                laddr.v6.sin6_port = req->id.idiag_sport;
-               memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64);
+               memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
+                      sizeof(laddr.v6.sin6_addr));
                laddr.v6.sin6_family = AF_INET6;
 
                paddr.v6.sin6_port = req->id.idiag_dport;
-               memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64);
+               memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
+                      sizeof(paddr.v6.sin6_addr));
                paddr.v6.sin6_family = AF_INET6;
        }
 
@@ -465,7 +473,7 @@ skip:
         * 3 : to mark if we have dumped the ep info of the current asoc
         * 4 : to work as a temporary variable to traversal list
         */
-       if (!(idiag_states & ~TCPF_LISTEN))
+       if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
                goto done;
        sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
 done:
index 1bc4f71aaba860776a0a6e4b664ef1c3a0f8af37..d85b803da11d21202d876c95811bcab4e1fb507e 100644 (file)
@@ -702,14 +702,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         */
        sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
 
-       sctp_ulpevent_receive_data(event, asoc);
-
        /* And hold the chunk as we need it for getting the IP headers
         * later in recvmsg
         */
        sctp_chunk_hold(chunk);
        event->chunk = chunk;
 
+       sctp_ulpevent_receive_data(event, asoc);
+
        event->stream = ntohs(chunk->subh.data_hdr->stream);
        event->ssn = ntohs(chunk->subh.data_hdr->ssn);
        event->ppid = chunk->subh.data_hdr->ppid;
index 23c8e7c3965651ad5ee03ee617ad92d06646802f..976c7812bbd520e51d34eb542b15f0e4730034b9 100644 (file)
@@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
 }
 
 static struct gss_upcall_msg *
-__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
+__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
 {
        struct gss_upcall_msg *pos;
        list_for_each_entry(pos, &pipe->in_downcall, list) {
                if (!uid_eq(pos->uid, uid))
                        continue;
+               if (auth && pos->auth->service != auth->service)
+                       continue;
                atomic_inc(&pos->count);
                dprintk("RPC:       %s found msg %p\n", __func__, pos);
                return pos;
@@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg)
        struct gss_upcall_msg *old;
 
        spin_lock(&pipe->lock);
-       old = __gss_find_upcall(pipe, gss_msg->uid);
+       old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
        if (old == NULL) {
                atomic_inc(&gss_msg->count);
                list_add(&gss_msg->list, &pipe->in_downcall);
@@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
        err = -ENOENT;
        /* Find a matching upcall */
        spin_lock(&pipe->lock);
-       gss_msg = __gss_find_upcall(pipe, uid);
+       gss_msg = __gss_find_upcall(pipe, uid, NULL);
        if (gss_msg == NULL) {
                spin_unlock(&pipe->lock);
                goto err_put_ctx;
index 1d281816f2bf14e34a71932863e5579e4a5caa35..d8582028b34600dc20e4447a99aeb5384b9ecb54 100644 (file)
@@ -569,9 +569,10 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
        struct rsc *found;
 
        memset(&rsci, 0, sizeof(rsci));
-       rsci.handle.data = handle->data;
-       rsci.handle.len = handle->len;
+       if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
+               return NULL;
        found = rsc_lookup(cd, &rsci);
+       rsc_free(&rsci);
        if (!found)
                return NULL;
        if (cache_check(cd, &found->h, NULL))
index cb49898a5a58aacfadceda27a07ceb45eb88a8d3..66f23b376fa04a91134eddf6d8ee22a2f5de5808 100644 (file)
@@ -453,7 +453,7 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
        struct rpc_xprt_switch *xps;
 
        if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
-               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
                xps = args->bc_xprt->xpt_bc_xps;
                xprt_switch_get(xps);
        } else {
@@ -520,7 +520,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
        char servername[48];
 
        if (args->bc_xprt) {
-               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
                xprt = args->bc_xprt->xpt_bc_xprt;
                if (xprt) {
                        xprt_get(xprt);
@@ -2638,6 +2638,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
 {
        struct rpc_xprt_switch *xps;
        struct rpc_xprt *xprt;
+       unsigned long reconnect_timeout;
        unsigned char resvport;
        int ret = 0;
 
@@ -2649,6 +2650,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
                return -EAGAIN;
        }
        resvport = xprt->resvport;
+       reconnect_timeout = xprt->max_reconnect_timeout;
        rcu_read_unlock();
 
        xprt = xprt_create_transport(xprtargs);
@@ -2657,6 +2659,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
                goto out_put_switch;
        }
        xprt->resvport = resvport;
+       xprt->max_reconnect_timeout = reconnect_timeout;
 
        rpc_xprt_switch_set_roundrobin(xps);
        if (setup) {
@@ -2673,6 +2676,27 @@ out_put_switch:
 }
 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
 
+static int
+rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
+               struct rpc_xprt *xprt,
+               void *data)
+{
+       unsigned long timeout = *((unsigned long *)data);
+
+       if (timeout < xprt->max_reconnect_timeout)
+               xprt->max_reconnect_timeout = timeout;
+       return 0;
+}
+
+void
+rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo)
+{
+       rpc_clnt_iterate_for_each_xprt(clnt,
+                       rpc_xprt_cap_max_reconnect_timeout,
+                       &timeo);
+}
+EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
+
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 static void rpc_show_header(void)
 {
index 8313960cac524dd36d220f9b55d124435400f25a..ea244b29138b0b86cf7860ce5c1e4605ade86a2a 100644 (file)
@@ -680,6 +680,20 @@ out:
        spin_unlock_bh(&xprt->transport_lock);
 }
 
+static bool
+xprt_has_timer(const struct rpc_xprt *xprt)
+{
+       return xprt->idle_timeout != 0;
+}
+
+static void
+xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
+       __must_hold(&xprt->transport_lock)
+{
+       if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
+               mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
+}
+
 static void
 xprt_init_autodisconnect(unsigned long data)
 {
@@ -688,6 +702,8 @@ xprt_init_autodisconnect(unsigned long data)
        spin_lock(&xprt->transport_lock);
        if (!list_empty(&xprt->recv))
                goto out_abort;
+       /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
+       xprt->last_used = jiffies;
        if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
                goto out_abort;
        spin_unlock(&xprt->transport_lock);
@@ -725,6 +741,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
                goto out;
        xprt->snd_task =NULL;
        xprt->ops->release_xprt(xprt, NULL);
+       xprt_schedule_autodisconnect(xprt);
 out:
        spin_unlock_bh(&xprt->transport_lock);
        wake_up_bit(&xprt->state, XPRT_LOCKED);
@@ -888,11 +905,6 @@ static void xprt_timer(struct rpc_task *task)
        spin_unlock_bh(&xprt->transport_lock);
 }
 
-static inline int xprt_has_timer(struct rpc_xprt *xprt)
-{
-       return xprt->idle_timeout != 0;
-}
-
 /**
  * xprt_prepare_transmit - reserve the transport before sending a request
  * @task: RPC task about to send a request
@@ -1280,9 +1292,7 @@ void xprt_release(struct rpc_task *task)
        if (!list_empty(&req->rq_list))
                list_del(&req->rq_list);
        xprt->last_used = jiffies;
-       if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
-               mod_timer(&xprt->timer,
-                               xprt->last_used + xprt->idle_timeout);
+       xprt_schedule_autodisconnect(xprt);
        spin_unlock_bh(&xprt->transport_lock);
        if (req->rq_buffer)
                xprt->ops->buf_free(req->rq_buffer);
index 536d0be3f61bdd3995f95a5ae3893e9cf9d84997..799cce6cbe45e89f4030d4cd1a17a15b56cf3cf2 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/slab.h>
 #include <linux/prefetch.h>
 #include <linux/sunrpc/addr.h>
+#include <linux/sunrpc/svc_rdma.h>
 #include <asm/bitops.h>
 #include <linux/module.h> /* try_module_get()/module_put() */
 
@@ -923,7 +924,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
        }
 
        INIT_LIST_HEAD(&buf->rb_recv_bufs);
-       for (i = 0; i < buf->rb_max_requests; i++) {
+       for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
                struct rpcrdma_rep *rep;
 
                rep = rpcrdma_create_rep(r_xprt);
@@ -1018,6 +1019,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
                rep = rpcrdma_buffer_get_rep_locked(buf);
                rpcrdma_destroy_rep(ia, rep);
        }
+       buf->rb_send_count = 0;
 
        spin_lock(&buf->rb_reqslock);
        while (!list_empty(&buf->rb_allreqs)) {
@@ -1032,6 +1034,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
                spin_lock(&buf->rb_reqslock);
        }
        spin_unlock(&buf->rb_reqslock);
+       buf->rb_recv_count = 0;
 
        rpcrdma_destroy_mrs(buf);
 }
@@ -1074,8 +1077,27 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
        spin_unlock(&buf->rb_mwlock);
 }
 
+static struct rpcrdma_rep *
+rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
+{
+       /* If an RPC previously completed without a reply (say, a
+        * credential problem or a soft timeout occurs) then hold off
+        * on supplying more Receive buffers until the number of new
+        * pending RPCs catches up to the number of posted Receives.
+        */
+       if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
+               return NULL;
+
+       if (unlikely(list_empty(&buffers->rb_recv_bufs)))
+               return NULL;
+       buffers->rb_recv_count++;
+       return rpcrdma_buffer_get_rep_locked(buffers);
+}
+
 /*
  * Get a set of request/reply buffers.
+ *
+ * Reply buffer (if available) is attached to send buffer upon return.
  */
 struct rpcrdma_req *
 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
@@ -1085,21 +1107,15 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
        spin_lock(&buffers->rb_lock);
        if (list_empty(&buffers->rb_send_bufs))
                goto out_reqbuf;
+       buffers->rb_send_count++;
        req = rpcrdma_buffer_get_req_locked(buffers);
-       if (list_empty(&buffers->rb_recv_bufs))
-               goto out_repbuf;
-       req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
+       req->rl_reply = rpcrdma_buffer_get_rep(buffers);
        spin_unlock(&buffers->rb_lock);
        return req;
 
 out_reqbuf:
        spin_unlock(&buffers->rb_lock);
-       pr_warn("rpcrdma: out of request buffers (%p)\n", buffers);
-       return NULL;
-out_repbuf:
-       list_add(&req->rl_free, &buffers->rb_send_bufs);
-       spin_unlock(&buffers->rb_lock);
-       pr_warn("rpcrdma: out of reply buffers (%p)\n", buffers);
+       pr_warn("RPC:       %s: out of request buffers\n", __func__);
        return NULL;
 }
 
@@ -1117,9 +1133,12 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
        req->rl_reply = NULL;
 
        spin_lock(&buffers->rb_lock);
+       buffers->rb_send_count--;
        list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
-       if (rep)
+       if (rep) {
+               buffers->rb_recv_count--;
                list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
+       }
        spin_unlock(&buffers->rb_lock);
 }
 
@@ -1133,8 +1152,7 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
        struct rpcrdma_buffer *buffers = req->rl_buffer;
 
        spin_lock(&buffers->rb_lock);
-       if (!list_empty(&buffers->rb_recv_bufs))
-               req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
+       req->rl_reply = rpcrdma_buffer_get_rep(buffers);
        spin_unlock(&buffers->rb_lock);
 }
 
@@ -1148,6 +1166,7 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
        struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
 
        spin_lock(&buffers->rb_lock);
+       buffers->rb_recv_count--;
        list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
        spin_unlock(&buffers->rb_lock);
 }
index 670fad57153a109b5f8d14ac9cd3733e874c81d3..a71b0f5897d8721ee8edaed291388c34f61d3cee 100644 (file)
@@ -321,6 +321,7 @@ struct rpcrdma_buffer {
        char                    *rb_pool;
 
        spinlock_t              rb_lock;        /* protect buf lists */
+       int                     rb_send_count, rb_recv_count;
        struct list_head        rb_send_bufs;
        struct list_head        rb_recv_bufs;
        u32                     rb_max_requests;
index 111767ab124aa4037dfe8c7040866d7196343292..bf168838a0296e9387de33ad5afabb3bdf3b35f0 100644 (file)
@@ -177,7 +177,6 @@ static struct ctl_table sunrpc_table[] = {
  * increase over time if the server is down or not responding.
  */
 #define XS_TCP_INIT_REEST_TO   (3U * HZ)
-#define XS_TCP_MAX_REEST_TO    (5U * 60 * HZ)
 
 /*
  * TCP idle timeout; client drops the transport socket if it is idle
@@ -1075,7 +1074,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
                skb = skb_recv_datagram(sk, 0, 1, &err);
                if (skb != NULL) {
                        xs_udp_data_read_skb(&transport->xprt, sk, skb);
-                       skb_free_datagram(sk, skb);
+                       skb_free_datagram_locked(sk, skb);
                        continue;
                }
                if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
@@ -2173,6 +2172,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                write_unlock_bh(&sk->sk_callback_lock);
        }
        xs_udp_do_set_buffer_size(xprt);
+
+       xprt->stat.connect_start = jiffies;
 }
 
 static void xs_udp_setup_socket(struct work_struct *work)
@@ -2236,6 +2237,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                unsigned int keepcnt = xprt->timeout->to_retries + 1;
                unsigned int opt_on = 1;
                unsigned int timeo;
+               unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
 
                /* TCP Keepalive options */
                kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
@@ -2247,6 +2249,16 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
                                (char *)&keepcnt, sizeof(keepcnt));
 
+               /* Avoid temporary address, they are bad for long-lived
+                * connections such as NFS mounts.
+                * RFC4941, section 3.6 suggests that:
+                *    Individual applications, which have specific
+                *    knowledge about the normal duration of connections,
+                *    MAY override this as appropriate.
+                */
+               kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
+                               (char *)&addr_pref, sizeof(addr_pref));
+
                /* TCP user timeout (see RFC5482) */
                timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
                        (xprt->timeout->to_retries + 1);
@@ -2295,6 +2307,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                /* SYN_SENT! */
                if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
                        xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+               break;
+       case -EADDRNOTAVAIL:
+               /* Source port number is unavailable. Try a new one! */
+               transport->srcport = 0;
        }
 out:
        return ret;
@@ -2369,6 +2385,25 @@ out:
        xprt_wake_pending_tasks(xprt, status);
 }
 
+static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
+{
+       unsigned long start, now = jiffies;
+
+       start = xprt->stat.connect_start + xprt->reestablish_timeout;
+       if (time_after(start, now))
+               return start - now;
+       return 0;
+}
+
+static void xs_reconnect_backoff(struct rpc_xprt *xprt)
+{
+       xprt->reestablish_timeout <<= 1;
+       if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
+               xprt->reestablish_timeout = xprt->max_reconnect_timeout;
+       if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
+               xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+}
+
 /**
  * xs_connect - connect a socket to a remote endpoint
  * @xprt: pointer to transport structure
@@ -2386,6 +2421,7 @@ out:
 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+       unsigned long delay = 0;
 
        WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
 
@@ -2397,19 +2433,15 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
                /* Start by resetting any existing state */
                xs_reset_transport(transport);
 
-               queue_delayed_work(xprtiod_workqueue,
-                                  &transport->connect_worker,
-                                  xprt->reestablish_timeout);
-               xprt->reestablish_timeout <<= 1;
-               if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
-                       xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
-               if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
-                       xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
-       } else {
+               delay = xs_reconnect_delay(xprt);
+               xs_reconnect_backoff(xprt);
+
+       } else
                dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
-               queue_delayed_work(xprtiod_workqueue,
-                                  &transport->connect_worker, 0);
-       }
+
+       queue_delayed_work(xprtiod_workqueue,
+                       &transport->connect_worker,
+                       delay);
 }
 
 /**
@@ -2961,6 +2993,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
        xprt->ops = &xs_tcp_ops;
        xprt->timeout = &xs_tcp_default_timeout;
 
+       xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
+
        INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
        INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
 
index b62caa1c770c042fa7449f401bcb333b3defa485..ed97a5876ebef128937906d4115d3c1db6d16998 100644 (file)
@@ -728,12 +728,13 @@ int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
                             u32 bearer_id, u32 *prev_node)
 {
        struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
-       struct tipc_peer *peer = mon->self;
+       struct tipc_peer *peer;
 
        if (!mon)
                return -EINVAL;
 
        read_lock_bh(&mon->lock);
+       peer = mon->self;
        do {
                if (*prev_node) {
                        if (peer->addr == *prev_node)
index 6b626a64b5179e9b7d8fc909be203bdf6facfbdd..a04fe9be1c60e2a7c1cb2f90c80731e08dcc910d 100644 (file)
@@ -62,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
 
 /**
  * named_prepare_buf - allocate & initialize a publication message
+ *
+ * The buffer returned is of size INT_H_SIZE + payload size
  */
 static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
                                         u32 dest)
@@ -141,9 +143,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
        struct publication *publ;
        struct sk_buff *skb = NULL;
        struct distr_item *item = NULL;
-       uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
-                       ITEM_SIZE;
-       uint msg_rem = msg_dsz;
+       u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+                       ITEM_SIZE) * ITEM_SIZE;
+       u32 msg_rem = msg_dsz;
 
        list_for_each_entry(publ, pls, local_list) {
                /* Prepare next buffer: */
index c49b8df438cbeee021bbedf3c96631d82fb16670..f9f5f3c3dab530c0b798d314873800500ccc30b5 100644 (file)
@@ -2180,7 +2180,8 @@ restart:
                                              TIPC_CONN_MSG, SHORT_H_SIZE,
                                              0, dnode, onode, dport, oport,
                                              TIPC_CONN_SHUTDOWN);
-                       tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
+                       if (skb)
+                               tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
                }
                tsk->connected = 0;
                sock->state = SS_DISCONNECTING;
index b016c011970b7e5089b277c5bcd09a8fa31a3af0..ae7e14cae0857933c6c140a4382d9ab054ce353f 100644 (file)
@@ -396,10 +396,13 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
        tuncfg.encap_destroy = NULL;
        setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
 
-       if (enable_mcast(ub, remote))
+       err = enable_mcast(ub, remote);
+       if (err)
                goto err;
        return 0;
 err:
+       if (ub->ubsock)
+               udp_tunnel_sock_release(ub->ubsock);
        kfree(ub);
        return err;
 }
index f1dffe84f0d5e56b0c57271831a427ee137a37c4..8309687a56b04f750560245ace8ed4bfbf969a1e 100644 (file)
@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
 {
        struct unix_sock *u = unix_sk(sk);
 
-       if (mutex_lock_interruptible(&u->readlock))
+       if (mutex_lock_interruptible(&u->iolock))
                return -EINTR;
 
        sk->sk_peek_off = val;
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
 
        return 0;
 }
@@ -779,7 +779,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
        spin_lock_init(&u->lock);
        atomic_long_set(&u->inflight, 0);
        INIT_LIST_HEAD(&u->link);
-       mutex_init(&u->readlock); /* single task reading lock */
+       mutex_init(&u->iolock); /* single task reading lock */
+       mutex_init(&u->bindlock); /* single task binding lock */
        init_waitqueue_head(&u->peer_wait);
        init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
        unix_insert_socket(unix_sockets_unbound(sk), sk);
@@ -848,7 +849,7 @@ static int unix_autobind(struct socket *sock)
        int err;
        unsigned int retries = 0;
 
-       err = mutex_lock_interruptible(&u->readlock);
+       err = mutex_lock_interruptible(&u->bindlock);
        if (err)
                return err;
 
@@ -895,7 +896,7 @@ retry:
        spin_unlock(&unix_table_lock);
        err = 0;
 
-out:   mutex_unlock(&u->readlock);
+out:   mutex_unlock(&u->bindlock);
        return err;
 }
 
@@ -954,20 +955,32 @@ fail:
        return NULL;
 }
 
-static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode,
-                     struct path *res)
+static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
 {
-       int err;
+       struct dentry *dentry;
+       struct path path;
+       int err = 0;
+       /*
+        * Get the parent directory, calculate the hash for last
+        * component.
+        */
+       dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+       err = PTR_ERR(dentry);
+       if (IS_ERR(dentry))
+               return err;
 
-       err = security_path_mknod(path, dentry, mode, 0);
+       /*
+        * All right, let's create it.
+        */
+       err = security_path_mknod(&path, dentry, mode, 0);
        if (!err) {
-               err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
+               err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
                if (!err) {
-                       res->mnt = mntget(path->mnt);
+                       res->mnt = mntget(path.mnt);
                        res->dentry = dget(dentry);
                }
        }
-
+       done_path_create(&path, dentry);
        return err;
 }
 
@@ -978,12 +991,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        struct unix_sock *u = unix_sk(sk);
        struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
        char *sun_path = sunaddr->sun_path;
-       int err, name_err;
+       int err;
        unsigned int hash;
        struct unix_address *addr;
        struct hlist_head *list;
-       struct path path;
-       struct dentry *dentry;
 
        err = -EINVAL;
        if (sunaddr->sun_family != AF_UNIX)
@@ -999,34 +1010,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        addr_len = err;
 
-       name_err = 0;
-       dentry = NULL;
-       if (sun_path[0]) {
-               /* Get the parent directory, calculate the hash for last
-                * component.
-                */
-               dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
-
-               if (IS_ERR(dentry)) {
-                       /* delay report until after 'already bound' check */
-                       name_err = PTR_ERR(dentry);
-                       dentry = NULL;
-               }
-       }
-
-       err = mutex_lock_interruptible(&u->readlock);
+       err = mutex_lock_interruptible(&u->bindlock);
        if (err)
-               goto out_path;
+               goto out;
 
        err = -EINVAL;
        if (u->addr)
                goto out_up;
 
-       if (name_err) {
-               err = name_err == -EEXIST ? -EADDRINUSE : name_err;
-               goto out_up;
-       }
-
        err = -ENOMEM;
        addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
        if (!addr)
@@ -1037,11 +1028,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        addr->hash = hash ^ sk->sk_type;
        atomic_set(&addr->refcnt, 1);
 
-       if (dentry) {
-               struct path u_path;
+       if (sun_path[0]) {
+               struct path path;
                umode_t mode = S_IFSOCK |
                       (SOCK_INODE(sock)->i_mode & ~current_umask());
-               err = unix_mknod(dentry, &path, mode, &u_path);
+               err = unix_mknod(sun_path, mode, &path);
                if (err) {
                        if (err == -EEXIST)
                                err = -EADDRINUSE;
@@ -1049,9 +1040,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out_up;
                }
                addr->hash = UNIX_HASH_SIZE;
-               hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+               hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
                spin_lock(&unix_table_lock);
-               u->path = u_path;
+               u->path = path;
                list = &unix_socket_table[hash];
        } else {
                spin_lock(&unix_table_lock);
@@ -1073,11 +1064,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 out_unlock:
        spin_unlock(&unix_table_lock);
 out_up:
-       mutex_unlock(&u->readlock);
-out_path:
-       if (dentry)
-               done_path_create(&path, dentry);
-
+       mutex_unlock(&u->bindlock);
 out:
        return err;
 }
@@ -1969,17 +1956,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
        if (false) {
 alloc_skb:
                unix_state_unlock(other);
-               mutex_unlock(&unix_sk(other)->readlock);
+               mutex_unlock(&unix_sk(other)->iolock);
                newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
                                              &err, 0);
                if (!newskb)
                        goto err;
        }
 
-       /* we must acquire readlock as we modify already present
+       /* we must acquire iolock as we modify already present
         * skbs in the sk_receive_queue and mess with skb->len
         */
-       err = mutex_lock_interruptible(&unix_sk(other)->readlock);
+       err = mutex_lock_interruptible(&unix_sk(other)->iolock);
        if (err) {
                err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
                goto err;
@@ -2046,7 +2033,7 @@ alloc_skb:
        }
 
        unix_state_unlock(other);
-       mutex_unlock(&unix_sk(other)->readlock);
+       mutex_unlock(&unix_sk(other)->iolock);
 
        other->sk_data_ready(other);
        scm_destroy(&scm);
@@ -2055,7 +2042,7 @@ alloc_skb:
 err_state_unlock:
        unix_state_unlock(other);
 err_unlock:
-       mutex_unlock(&unix_sk(other)->readlock);
+       mutex_unlock(&unix_sk(other)->iolock);
 err:
        kfree_skb(newskb);
        if (send_sigpipe && !(flags & MSG_NOSIGNAL))
@@ -2123,7 +2110,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
        timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 
        do {
-               mutex_lock(&u->readlock);
+               mutex_lock(&u->iolock);
 
                skip = sk_peek_offset(sk, flags);
                skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
@@ -2131,14 +2118,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
                if (skb)
                        break;
 
-               mutex_unlock(&u->readlock);
+               mutex_unlock(&u->iolock);
 
                if (err != -EAGAIN)
                        break;
        } while (timeo &&
                 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
 
-       if (!skb) { /* implies readlock unlocked */
+       if (!skb) { /* implies iolock unlocked */
                unix_state_lock(sk);
                /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
                if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
@@ -2203,7 +2190,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
 
 out_free:
        skb_free_datagram(sk, skb);
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
 out:
        return err;
 }
@@ -2298,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
        /* Lock the socket to prevent queue disordering
         * while sleeps in memcpy_tomsg
         */
-       mutex_lock(&u->readlock);
+       mutex_lock(&u->iolock);
 
        if (flags & MSG_PEEK)
                skip = sk_peek_offset(sk, flags);
@@ -2340,7 +2327,7 @@ again:
                                break;
                        }
 
-                       mutex_unlock(&u->readlock);
+                       mutex_unlock(&u->iolock);
 
                        timeo = unix_stream_data_wait(sk, timeo, last,
                                                      last_len);
@@ -2351,7 +2338,7 @@ again:
                                goto out;
                        }
 
-                       mutex_lock(&u->readlock);
+                       mutex_lock(&u->iolock);
                        goto redo;
 unlock:
                        unix_state_unlock(sk);
@@ -2454,7 +2441,7 @@ unlock:
                }
        } while (size);
 
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
        if (state->msg)
                scm_recv(sock, state->msg, &scm, flags);
        else
@@ -2495,9 +2482,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
        int ret;
        struct unix_sock *u = unix_sk(sk);
 
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
        ret = splice_to_pipe(pipe, spd);
-       mutex_lock(&u->readlock);
+       mutex_lock(&u->iolock);
 
        return ret;
 }
index 699dfabdbccd5e6af0f4910cdf2467b36bab3b60..936d7eee62d03efbac1e278272ca8fd917a40622 100644 (file)
@@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
 
        vq = vsock->vqs[VSOCK_VQ_TX];
 
-       /* Avoid unnecessary interrupts while we're processing the ring */
-       virtqueue_disable_cb(vq);
-
        for (;;) {
                struct virtio_vsock_pkt *pkt;
                struct scatterlist hdr, buf, *sgs[2];
@@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
                spin_lock_bh(&vsock->send_pkt_list_lock);
                if (list_empty(&vsock->send_pkt_list)) {
                        spin_unlock_bh(&vsock->send_pkt_list_lock);
-                       virtqueue_enable_cb(vq);
                        break;
                }
 
@@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work)
                }
 
                ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
+               /* Usually this means that there is no more space available in
+                * the vq
+                */
                if (ret < 0) {
                        spin_lock_bh(&vsock->send_pkt_list_lock);
                        list_add(&pkt->list, &vsock->send_pkt_list);
                        spin_unlock_bh(&vsock->send_pkt_list_lock);
-
-                       if (!virtqueue_enable_cb(vq) && ret == -ENOSPC)
-                               continue; /* retry now that we have more space */
                        break;
                }
 
index b0e11b6dc994fcfd6fdf53a94bb09fbe22f89192..0f506220a3bde0e4b73c043fa9ea6a7f6b404ed0 100644 (file)
@@ -513,6 +513,7 @@ static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
                r = cfg80211_get_chans_dfs_available(wiphy,
                                                     chandef->center_freq2,
                                                     width);
+               break;
        default:
                WARN_ON(chandef->center_freq2);
                break;
index 46417f9cce6810675f8d79f153260e61a689a140..4809f4d2cdcc2fe09c07ce8ddfa936a4ff6033b5 100644 (file)
@@ -5380,6 +5380,7 @@ static int nl80211_parse_mesh_config(struct genl_info *info,
 {
        struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
        u32 mask = 0;
+       u16 ht_opmode;
 
 #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
 do {                                                                       \
@@ -5471,9 +5472,36 @@ do {                                                                         \
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
                                  mask, NL80211_MESHCONF_RSSI_THRESHOLD,
                                  nl80211_check_s32);
-       FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16,
-                                 mask, NL80211_MESHCONF_HT_OPMODE,
-                                 nl80211_check_u16);
+       /*
+        * Check HT operation mode based on
+        * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+        */
+       if (tb[NL80211_MESHCONF_HT_OPMODE]) {
+               ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
+
+               if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION |
+                                 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
+                                 IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+                       return -EINVAL;
+
+               if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
+                   (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+                       return -EINVAL;
+
+               switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
+               case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+               case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+                       if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
+                               return -EINVAL;
+                       break;
+               case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+               case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+                       if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+                               return -EINVAL;
+                       break;
+               }
+               cfg->ht_opmode = ht_opmode;
+       }
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
                                  1, 65535, mask,
                                  NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
@@ -6950,7 +6978,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
 
                params.n_counter_offsets_presp = len / sizeof(u16);
                if (rdev->wiphy.max_num_csa_counters &&
-                   (params.n_counter_offsets_beacon >
+                   (params.n_counter_offsets_presp >
                     rdev->wiphy.max_num_csa_counters))
                        return -EINVAL;
 
index dbb2738e356ad83785caa748a0b84352a189bb8b..6250b1cfcde58758bb480758d1c61217d37a7cd1 100644 (file)
@@ -958,29 +958,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
                        return private(dev, iwr, cmd, info, handler);
        }
        /* Old driver API : call driver ioctl handler */
-       if (dev->netdev_ops->ndo_do_ioctl) {
-#ifdef CONFIG_COMPAT
-               if (info->flags & IW_REQUEST_FLAG_COMPAT) {
-                       int ret = 0;
-                       struct iwreq iwr_lcl;
-                       struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
-
-                       memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
-                       iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
-                       iwr_lcl.u.data.length = iwp_compat->length;
-                       iwr_lcl.u.data.flags = iwp_compat->flags;
-
-                       ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
-
-                       iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
-                       iwp_compat->length = iwr_lcl.u.data.length;
-                       iwp_compat->flags = iwr_lcl.u.data.flags;
-
-                       return ret;
-               } else
-#endif
-                       return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
-       }
+       if (dev->netdev_ops->ndo_do_ioctl)
+               return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
        return -EOPNOTSUPP;
 }
 
index 1c4ad477ce935eb9f217fe6aebbd61551b0c1eea..6e3f0254d8a11bcc5075915abf953fcc21008aec 100644 (file)
@@ -207,15 +207,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        family = XFRM_SPI_SKB_CB(skb)->family;
 
        /* if tunnel is present override skb->mark value with tunnel i_key */
-       if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
-               switch (family) {
-               case AF_INET:
+       switch (family) {
+       case AF_INET:
+               if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
                        mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
-                       break;
-               case AF_INET6:
+               break;
+       case AF_INET6:
+               if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
                        mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
-                       break;
-               }
+               break;
        }
 
        /* Allocate new secpath or COW existing one. */
index b5e665b3cfb05f4d08970387802fee8aec8243dd..45f9cf97ea25d98bea83a7dd48365357fb8f4bee 100644 (file)
@@ -626,6 +626,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
        /* re-insert all policies by order of creation */
        list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+               if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+                       /* skip socket policies */
+                       continue;
+               }
                newpos = NULL;
                chain = policy_hash_bysel(net, &policy->selector,
                                          policy->family,
index 9895a8c56d8c2675393e1aad3a1a438e16d4d47c..a30f898dc1c5a82fb6221248799f5adbb07f2236 100644 (file)
@@ -332,6 +332,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
 {
        tasklet_hrtimer_cancel(&x->mtimer);
        del_timer_sync(&x->rtimer);
+       kfree(x->aead);
        kfree(x->aalg);
        kfree(x->ealg);
        kfree(x->calg);
index d516845e16e30b69d2d47160ef9f45e6a4587a6b..08892091cfe3a65c13994b2743586f5137c89fd9 100644 (file)
@@ -581,9 +581,12 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
        if (err)
                goto error;
 
-       if (attrs[XFRMA_SEC_CTX] &&
-           security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
-               goto error;
+       if (attrs[XFRMA_SEC_CTX]) {
+               err = security_xfrm_state_alloc(x,
+                                               nla_data(attrs[XFRMA_SEC_CTX]));
+               if (err)
+                       goto error;
+       }
 
        if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
                                               attrs[XFRMA_REPLAY_ESN_VAL])))
@@ -896,7 +899,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
        struct sock *sk = cb->skb->sk;
        struct net *net = sock_net(sk);
 
-       xfrm_state_walk_done(walk, net);
+       if (cb->args[0])
+               xfrm_state_walk_done(walk, net);
        return 0;
 }
 
@@ -921,8 +925,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                u8 proto = 0;
                int err;
 
-               cb->args[0] = 1;
-
                err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
                                  xfrma_policy);
                if (err < 0)
@@ -939,6 +941,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                        proto = nla_get_u8(attrs[XFRMA_PROTO]);
 
                xfrm_state_walk_init(walk, proto, filter);
+               cb->args[0] = 1;
        }
 
        (void) xfrm_state_walk(net, walk, dump_one_state, &info);
@@ -2051,9 +2054,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (up->hard) {
                xfrm_policy_delete(xp, p->dir);
                xfrm_audit_policy_delete(xp, 1, true);
-       } else {
-               // reset the timers here?
-               WARN(1, "Don't know what to do with soft policy expire\n");
        }
        km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
@@ -2117,7 +2117,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = verify_newpolicy_info(&ua->policy);
        if (err)
-               goto bad_policy;
+               goto free_state;
 
        /*   build an XP */
        xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
@@ -2149,8 +2149,6 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        return 0;
 
-bad_policy:
-       WARN(1, "BAD policy passed\n");
 free_state:
        kfree(x);
 nomem:
index 217c8d507f2e8d6a6fae5a958c1edc1d3be3e99d..7927a090fa0d03241ef379db6ab91c5330c5f44b 100644 (file)
@@ -72,8 +72,8 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag
        (void *) BPF_FUNC_l3_csum_replace;
 static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
        (void *) BPF_FUNC_l4_csum_replace;
-static int (*bpf_skb_in_cgroup)(void *ctx, void *map, int index) =
-       (void *) BPF_FUNC_skb_in_cgroup;
+static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
+       (void *) BPF_FUNC_skb_under_cgroup;
 
 #if defined(__x86_64__)
 
index 2732c37c8d5be1e09364f1d4d97ae2126d40195e..10ff73404e3a80fe8bab464188335317ee71515a 100644 (file)
@@ -57,7 +57,7 @@ int handle_egress(struct __sk_buff *skb)
                bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg),
                                 eth->h_proto, ip6h->nexthdr);
                return TC_ACT_OK;
-       } else if (bpf_skb_in_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
+       } else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
                bpf_trace_printk(pass_msg, sizeof(pass_msg));
                return TC_ACT_OK;
        } else {
index 47bf0858f9e47d1b85a908ab94518c46b11ffecd..cce2b59751ebcbd632926e492bb08bafa8711ed5 100644 (file)
@@ -68,7 +68,16 @@ static void test_hashmap_sanity(int i, void *data)
        assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
               errno == E2BIG);
 
+       /* update existing element, thought the map is full */
+       key = 1;
+       assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0);
+       key = 2;
+       assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
+       key = 1;
+       assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
+
        /* check that key = 0 doesn't exist */
+       key = 0;
        assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
 
        /* iterate over two elements */
@@ -413,10 +422,12 @@ static void do_work(int fn, void *data)
 
        for (i = fn; i < MAP_SIZE; i += TASKS) {
                key = value = i;
-               if (do_update)
+               if (do_update) {
                        assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0);
-               else
+                       assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0);
+               } else {
                        assert(bpf_delete_elem(map_fd, &key) == 0);
+               }
        }
 }
 
index 15b196fc2f49b35f6663b6b359493f6a27564797..179219845dfcdfbeb586d12c5ec1296095d9fbf4 100644 (file)
@@ -108,16 +108,20 @@ as-option = $(call try-run,\
 as-instr = $(call try-run,\
        printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
 
+# Do not attempt to build with gcc plugins during cc-option tests.
+# (And this uses delayed resolution so the flags will be up to date.)
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+
 # cc-option
 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
 
 cc-option = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
 
 # cc-option-yn
 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
 cc-option-yn = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
 
 # cc-option-align
 # Prefix align with either -falign or -malign
@@ -127,7 +131,7 @@ cc-option-align = $(subst -functions=0,,\
 # cc-disable-warning
 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
 cc-disable-warning = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-name
 # Expands to either gcc or clang
index 5e22b60589c1ea7b5155daf92ff9d59cf33019b3..61f0e6db909bbf0a7016d60f4b58a4c4bd3c0dba 100644 (file)
@@ -19,25 +19,42 @@ ifdef CONFIG_GCC_PLUGINS
     endif
   endif
 
-  GCC_PLUGINS_CFLAGS := $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y))
+  GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
 
-  export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN
+  export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
 
+  ifneq ($(PLUGINCC),)
+    # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
+    GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
+  endif
+
+  KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+  GCC_PLUGIN := $(gcc-plugin-y)
+  GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y)
+endif
+
+# If plugins aren't supported, abort the build before hard-to-read compiler
+# errors start getting spewed by the main build.
+PHONY += gcc-plugins-check
+gcc-plugins-check: FORCE
+ifdef CONFIG_GCC_PLUGINS
   ifeq ($(PLUGINCC),)
     ifneq ($(GCC_PLUGINS_CFLAGS),)
       ifeq ($(call cc-ifversion, -ge, 0405, y), y)
-        PLUGINCC := $(shell $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)")
-        $(warning warning: your gcc installation does not support plugins, perhaps the necessary headers are missing?)
+       $(Q)$(srctree)/scripts/gcc-plugin.sh --show-error "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)" || true
+       @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc installation does not support plugins, perhaps the necessary headers are missing?" >&2 && exit 1
       else
-        $(warning warning: your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least)
+       @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc version does not support plugins, you should upgrade it to at least gcc 4.5" >&2 && exit 1
       endif
     endif
-  else
-    # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
-    GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
   endif
+endif
+       @:
 
-  KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-  GCC_PLUGIN := $(gcc-plugin-y)
-
+# Actually do the build, if requested.
+PHONY += gcc-plugins
+gcc-plugins: scripts_basic gcc-plugins-check
+ifdef CONFIG_GCC_PLUGINS
+       $(Q)$(MAKE) $(build)=scripts/gcc-plugins
 endif
+       @:
index 4de3cc42fc50dca7a3d620ccca99cb4a55b685f5..206a6b346a8dbc3d38b1771093671fa37f1daf27 100755 (executable)
@@ -3570,15 +3570,6 @@ sub process {
                        }
                }
 
-# check for uses of DEFINE_PCI_DEVICE_TABLE
-               if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
-                       if (WARN("DEFINE_PCI_DEVICE_TABLE",
-                                "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) &&
-                           $fix) {
-                               $fixed[$fixlinenr] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /;
-                       }
-               }
-
 # check for new typedefs, only function parameters and sparse annotations
 # make sense.
                if ($line =~ /\btypedef\s/ &&
diff --git a/scripts/faddr2line b/scripts/faddr2line
new file mode 100755 (executable)
index 0000000..450b332
--- /dev/null
@@ -0,0 +1,177 @@
+#!/bin/bash
+#
+# Translate stack dump function offsets.
+#
+# addr2line doesn't work with KASLR addresses.  This works similarly to
+# addr2line, but instead takes the 'func+0x123' format as input:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux meminfo_proc_show+0x5/0x568
+#   meminfo_proc_show+0x5/0x568:
+#   meminfo_proc_show at fs/proc/meminfo.c:27
+#
+# If the address is part of an inlined function, the full inline call chain is
+# printed:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux native_write_msr+0x6/0x27
+#   native_write_msr+0x6/0x27:
+#   arch_static_branch at arch/x86/include/asm/msr.h:121
+#    (inlined by) static_key_false at include/linux/jump_label.h:125
+#    (inlined by) native_write_msr at arch/x86/include/asm/msr.h:125
+#
+# The function size after the '/' in the input is optional, but recommended.
+# It's used to help disambiguate any duplicate symbol names, which can occur
+# rarely.  If the size is omitted for a duplicate symbol then it's possible for
+# multiple code sites to be printed:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux raw_ioctl+0x5
+#   raw_ioctl+0x5/0x20:
+#   raw_ioctl at drivers/char/raw.c:122
+#
+#   raw_ioctl+0x5/0xb1:
+#   raw_ioctl at net/ipv4/raw.c:876
+#
+# Multiple addresses can be specified on a single command line:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux type_show+0x10/45 free_reserved_area+0x90
+#   type_show+0x10/0x2d:
+#   type_show at drivers/video/backlight/backlight.c:213
+#
+#   free_reserved_area+0x90/0x123:
+#   free_reserved_area at mm/page_alloc.c:6429 (discriminator 2)
+
+
+set -o errexit
+set -o nounset
+
+command -v awk >/dev/null 2>&1 || die "awk isn't installed"
+command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
+command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+
+usage() {
+       echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
+       exit 1
+}
+
+warn() {
+       echo "$1" >&2
+}
+
+die() {
+       echo "ERROR: $1" >&2
+       exit 1
+}
+
+# Try to figure out the source directory prefix so we can remove it from the
+# addr2line output.  HACK ALERT: This assumes that start_kernel() is in
+# kernel/init.c!  This only works for vmlinux.  Otherwise it falls back to
+# printing the absolute path.
+find_dir_prefix() {
+       local objfile=$1
+
+       local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+       [[ -z $start_kernel_addr ]] && return
+
+       local file_line=$(addr2line -e $objfile $start_kernel_addr)
+       [[ -z $file_line ]] && return
+
+       local prefix=${file_line%init/main.c:*}
+       if [[ -z $prefix ]] || [[ $prefix = $file_line ]]; then
+               return
+       fi
+
+       DIR_PREFIX=$prefix
+       return 0
+}
+
+__faddr2line() {
+       local objfile=$1
+       local func_addr=$2
+       local dir_prefix=$3
+       local print_warnings=$4
+
+       local func=${func_addr%+*}
+       local offset=${func_addr#*+}
+       offset=${offset%/*}
+       local size=
+       [[ $func_addr =~ "/" ]] && size=${func_addr#*/}
+
+       if [[ -z $func ]] || [[ -z $offset ]] || [[ $func = $func_addr ]]; then
+               warn "bad func+offset $func_addr"
+               DONE=1
+               return
+       fi
+
+       # Go through each of the object's symbols which match the func name.
+       # In rare cases there might be duplicates.
+       while read symbol; do
+               local fields=($symbol)
+               local sym_base=0x${fields[1]}
+               local sym_size=${fields[2]}
+               local sym_type=${fields[3]}
+
+               # calculate the address
+               local addr=$(($sym_base + $offset))
+               if [[ -z $addr ]] || [[ $addr = 0 ]]; then
+                       warn "bad address: $sym_base + $offset"
+                       DONE=1
+                       return
+               fi
+               local hexaddr=0x$(printf %x $addr)
+
+               # weed out non-function symbols
+               if [[ $sym_type != "FUNC" ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to non-function symbol"
+                       continue
+               fi
+
+               # if the user provided a size, make sure it matches the symbol's size
+               if [[ -n $size ]] && [[ $size -ne $sym_size ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to size mismatch ($size != $sym_size)"
+                       continue;
+               fi
+
+               # make sure the provided offset is within the symbol's range
+               if [[ $offset -gt $sym_size ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to size mismatch ($offset > $sym_size)"
+                       continue
+               fi
+
+               # separate multiple entries with a blank line
+               [[ $FIRST = 0 ]] && echo
+               FIRST=0
+
+               local hexsize=0x$(printf %x $sym_size)
+               echo "$func+$offset/$hexsize:"
+               addr2line -fpie $objfile $hexaddr | sed "s; $dir_prefix\(\./\)*; ;"
+               DONE=1
+
+       done < <(readelf -sW $objfile | awk -v f=$func '$8 == f {print}')
+}
+
+[[ $# -lt 2 ]] && usage
+
+objfile=$1
+[[ ! -f $objfile ]] && die "can't find objfile $objfile"
+shift
+
+DIR_PREFIX=supercalifragilisticexpialidocious
+find_dir_prefix $objfile
+
+FIRST=1
+while [[ $# -gt 0 ]]; do
+       func_addr=$1
+       shift
+
+       # print any matches found
+       DONE=0
+       __faddr2line $objfile $func_addr $DIR_PREFIX 0
+
+       # if no match was found, print warnings
+       if [[ $DONE = 0 ]]; then
+               __faddr2line $objfile $func_addr $DIR_PREFIX 1
+               warn "no match for $func_addr"
+       fi
+done
index fb92075654711393cd19af2c40fb37dcf7605dd0..b65224bfb847302db905ed2ac33dc5d598b9b48b 100755 (executable)
@@ -1,5 +1,12 @@
 #!/bin/sh
 srctree=$(dirname "$0")
+
+SHOW_ERROR=
+if [ "$1" = "--show-error" ] ; then
+       SHOW_ERROR=1
+       shift || true
+fi
+
 gccplugins_dir=$($3 -print-file-name=plugin)
 plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF
 #include "gcc-common.h"
@@ -13,6 +20,9 @@ EOF
 
 if [ $? -ne 0 ]
 then
+       if [ -n "$SHOW_ERROR" ] ; then
+               echo "${plugincc}" >&2
+       fi
        exit 1
 fi
 
@@ -48,4 +58,8 @@ then
        echo "$2"
        exit 0
 fi
+
+if [ -n "$SHOW_ERROR" ] ; then
+       echo "${plugincc}" >&2
+fi
 exit 1
index 88c8ec47232b1c8595992109fbdd8a503cc6cc09..8b29dc17c73cad2730531464d3528c27973cb659 100644 (file)
@@ -12,16 +12,18 @@ else
   export HOST_EXTRACXXFLAGS
 endif
 
-export GCCPLUGINS_DIR HOSTLIBS
-
 ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN))
   GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN))
 endif
 
-$(HOSTLIBS)-y := $(GCC_PLUGIN)
+export HOSTLIBS
+
+$(HOSTLIBS)-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p)))
 always := $($(HOSTLIBS)-y)
 
-cyc_complexity_plugin-objs := cyc_complexity_plugin.o
-sancov_plugin-objs := sancov_plugin.o
+$(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o))
+
+subdir-y := $(GCC_PLUGIN_SUBDIR)
+subdir-  += $(GCC_PLUGIN_SUBDIR)
 
 clean-files += *.so
index 122fcdaf42c86cec7a5fbce08cc3b406f692f6c5..aed4511f0304e4922ff6770a3d573777b4f235ed 100755 (executable)
@@ -432,7 +432,7 @@ foreach my $file (@ARGV) {
            die "$P: file '${file}' not found\n";
        }
     }
-    if ($from_filename || vcs_file_exists($file)) {
+    if ($from_filename || ($file ne "&STDIN" && vcs_file_exists($file))) {
        $file =~ s/^\Q${cur_path}\E//;  #strip any absolute path
        $file =~ s/^\Q${lk_path}\E//;   #or the path to the lk tree
        push(@files, $file);
@@ -2136,9 +2136,11 @@ sub vcs_file_exists {
 
     my $cmd = $VCS_cmds{"file_exists_cmd"};
     $cmd =~ s/(\$\w+)/$1/eeg;          # interpolate $cmd
-
+    $cmd .= " 2>&1";
     $exists = &{$VCS_cmds{"execute_cmd"}}($cmd);
 
+    return 0 if ($? != 0);
+
     return $exists;
 }
 
index e1c09e2f9be7ebc88406676b3b1e46ecdd729ac0..8ea9fd2b65736c42b055791ee88b9151a573a02d 100755 (executable)
@@ -332,7 +332,9 @@ if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
        (cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
 fi
 (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
-(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
+if grep -q '^CONFIG_GCC_PLUGINS=y' $KCONFIG_CONFIG ; then
+       (cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
+fi
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
index ed7eef24ef89946b9fc4ffb2098ff769d0531f93..b3775a9604eac9ff7d7e4b667003969408d3d491 100755 (executable)
@@ -206,7 +206,6 @@ regex_c=(
        '/\<DEFINE_PER_CPU_SHARED_ALIGNED([^,]*, *\([[:alnum:]_]*\)/\1/v/'
        '/\<DECLARE_WAIT_QUEUE_HEAD(\([[:alnum:]_]*\)/\1/v/'
        '/\<DECLARE_\(TASKLET\|WORK\|DELAYED_WORK\)(\([[:alnum:]_]*\)/\2/v/'
-       '/\<DEFINE_PCI_DEVICE_TABLE(\([[:alnum:]_]*\)/\1/v/'
        '/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/'
        '/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/'
        '/\<DEFINE_HASHTABLE(\([[:alnum:]_]*\)/\1/v/'
index 176758cdfa577f4c25e3d4afdea4f6292a0be0c3..118f4549404ef2ed0241e86faceb03f2d3646d79 100644 (file)
@@ -118,6 +118,46 @@ config LSM_MMAP_MIN_ADDR
          this low address space will need the permission specific to the
          systems running LSM.
 
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+       bool
+       help
+         The heap allocator implements __check_heap_object() for
+         validating memory ranges against heap object sizes in
+         support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+       bool
+       help
+         The architecture supports CONFIG_HARDENED_USERCOPY by
+         calling check_object_size() just before performing the
+         userspace copies in the low level implementation of
+         copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+       bool "Harden memory copies between kernel and userspace"
+       depends on HAVE_ARCH_HARDENED_USERCOPY
+       depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
+       select BUG
+       help
+         This option checks for obviously wrong memory regions when
+         copying memory to/from the kernel (via copy_to_user() and
+         copy_from_user() functions) by rejecting memory ranges that
+         are larger than the specified heap object, span multiple
+         separately allocates pages, are not on the process stack,
+         or are part of the kernel text. This kills entire classes
+         of heap overflow exploits and similar kernel memory exposures.
+
+config HARDENED_USERCOPY_PAGESPAN
+       bool "Refuse to copy allocations that span multiple pages"
+       depends on HARDENED_USERCOPY
+       depends on EXPERT
+       help
+         When a multi-page allocation is done without __GFP_COMP,
+         hardened usercopy will reject attempts to copy it. There are,
+         however, several cases of this in the kernel that have not all
+         been removed. This config is intended to be used only while
+         trying to find such users.
+
 source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig
index 5adbfc32242f81b0f6396fd7d0656c5aa4eca6ff..17a06105ccb616ea91aaa5635341136dda1fc915 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/rcupdate.h>
 #include <linux/scatterlist.h>
 #include <linux/ctype.h>
+#include <crypto/aes.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
 #include <crypto/skcipher.h>
@@ -478,6 +479,7 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        struct crypto_skcipher *tfm;
        struct skcipher_request *req;
        unsigned int encrypted_datalen;
+       u8 iv[AES_BLOCK_SIZE];
        unsigned int padlen;
        char pad[16];
        int ret;
@@ -500,8 +502,8 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        sg_init_table(sg_out, 1);
        sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
 
-       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
-                                  epayload->iv);
+       memcpy(iv, epayload->iv, sizeof(iv));
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
        ret = crypto_skcipher_encrypt(req);
        tfm = crypto_skcipher_reqtfm(req);
        skcipher_request_free(req);
@@ -581,6 +583,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
        struct crypto_skcipher *tfm;
        struct skcipher_request *req;
        unsigned int encrypted_datalen;
+       u8 iv[AES_BLOCK_SIZE];
        char pad[16];
        int ret;
 
@@ -599,8 +602,8 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
                   epayload->decrypted_datalen);
        sg_set_buf(&sg_out[1], pad, sizeof pad);
 
-       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
-                                  epayload->iv);
+       memcpy(iv, epayload->iv, sizeof(iv));
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
        ret = crypto_skcipher_decrypt(req);
        tfm = crypto_skcipher_reqtfm(req);
        skcipher_request_free(req);
index 795437b1008200cd534f9a46ad0272f3dbc20ca4..b450a27588c8e679cca28fe4384850b031c93168 100644 (file)
@@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
                return -EBUSY;
        }
        list_add_tail(&rmidi->list, &snd_rawmidi_devices);
+       mutex_unlock(&register_mutex);
        err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
                                  rmidi->card, rmidi->device,
                                  &snd_rawmidi_f_ops, rmidi, &rmidi->dev);
        if (err < 0) {
                rmidi_err(rmidi, "unable to register\n");
+               mutex_lock(&register_mutex);
                list_del(&rmidi->list);
                mutex_unlock(&register_mutex);
                return err;
@@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
        if (rmidi->ops && rmidi->ops->dev_register &&
            (err = rmidi->ops->dev_register(rmidi)) < 0) {
                snd_unregister_device(&rmidi->dev);
+               mutex_lock(&register_mutex);
                list_del(&rmidi->list);
                mutex_unlock(&register_mutex);
                return err;
@@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
                }
        }
 #endif /* CONFIG_SND_OSSEMUL */
-       mutex_unlock(&register_mutex);
        sprintf(name, "midi%d", rmidi->device);
        entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
        if (entry) {
index 9a6157ea6881703310586bea76f5e1c61fcfb2a5..fc144f43faa67c177dba17a40c246a2af3edef7c 100644 (file)
@@ -35,6 +35,9 @@
 #include <sound/initval.h>
 #include <linux/kmod.h>
 
+/* internal flags */
+#define SNDRV_TIMER_IFLG_PAUSED                0x00010000
+
 #if IS_ENABLED(CONFIG_SND_HRTIMER)
 #define DEFAULT_TIMER_LIMIT 4
 #else
@@ -294,8 +297,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
                get_device(&timer->card->card_dev);
        timeri->slave_class = tid->dev_sclass;
        timeri->slave_id = slave_id;
-       if (list_empty(&timer->open_list_head) && timer->hw.open)
-               timer->hw.open(timer);
+
+       if (list_empty(&timer->open_list_head) && timer->hw.open) {
+               int err = timer->hw.open(timer);
+               if (err) {
+                       kfree(timeri->owner);
+                       kfree(timeri);
+
+                       if (timer->card)
+                               put_device(&timer->card->card_dev);
+                       module_put(timer->module);
+                       mutex_unlock(&register_mutex);
+                       return err;
+               }
+       }
+
        list_add_tail(&timeri->open_list, &timer->open_list_head);
        snd_timer_check_master(timeri);
        mutex_unlock(&register_mutex);
@@ -526,6 +542,10 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
                }
        }
        timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+       if (stop)
+               timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
+       else
+               timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
        snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
                          SNDRV_TIMER_EVENT_CONTINUE);
  unlock:
@@ -587,6 +607,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
  */
 int snd_timer_continue(struct snd_timer_instance *timeri)
 {
+       /* timer can continue only after pause */
+       if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+               return -EINVAL;
+
        if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
                return snd_timer_start_slave(timeri, false);
        else
@@ -813,6 +837,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
        timer->tmr_subdevice = tid->subdevice;
        if (id)
                strlcpy(timer->id, id, sizeof(timer->id));
+       timer->sticks = 1;
        INIT_LIST_HEAD(&timer->device_list);
        INIT_LIST_HEAD(&timer->open_list_head);
        INIT_LIST_HEAD(&timer->active_list_head);
@@ -1817,6 +1842,9 @@ static int snd_timer_user_continue(struct file *file)
        tu = file->private_data;
        if (!tu->timeri)
                return -EBADFD;
+       /* start timer instead of continue if it's not used before */
+       if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+               return snd_timer_user_start(file);
        tu->timeri->lost = 0;
        return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
 }
@@ -1958,6 +1986,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
+               mutex_lock(&tu->ioctl_lock);
                if (tu->tread) {
                        if (copy_to_user(buffer, &tu->tqueue[qhead],
                                         sizeof(struct snd_timer_tread)))
@@ -1967,6 +1996,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                                         sizeof(struct snd_timer_read)))
                                err = -EFAULT;
                }
+               mutex_unlock(&tu->ioctl_lock);
 
                spin_lock_irq(&tu->qlock);
                if (err < 0)
index 03ed35237e2bb89af027b18fddbc334f5e4620b7..d73c12b8753da276da380672c3c4771c87c33086 100644 (file)
@@ -108,7 +108,6 @@ struct snd_efw {
        u8 *resp_buf;
        u8 *pull_ptr;
        u8 *push_ptr;
-       unsigned int resp_queues;
 };
 
 int snd_efw_transaction_cmd(struct fw_unit *unit,
index 33df8655fe81f28872e01bbecd817ba7c37f8ede..2e1d9a23920c0c3ebf4b6edda5019b672dab4e82 100644 (file)
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
 {
        unsigned int length, till_end, type;
        struct snd_efw_transaction *t;
+       u8 *pull_ptr;
        long count = 0;
 
        if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
        buf += sizeof(type);
 
        /* write into buffer as many responses as possible */
-       while (efw->resp_queues > 0) {
-               t = (struct snd_efw_transaction *)(efw->pull_ptr);
+       spin_lock_irq(&efw->lock);
+
+       /*
+        * When another task reaches here during this task's access to user
+        * space, it picks up current position in buffer and can read the same
+        * series of responses.
+        */
+       pull_ptr = efw->pull_ptr;
+
+       while (efw->push_ptr != pull_ptr) {
+               t = (struct snd_efw_transaction *)(pull_ptr);
                length = be32_to_cpu(t->length) * sizeof(__be32);
 
                /* confirm enough space for this response */
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
                /* copy from ring buffer to user buffer */
                while (length > 0) {
                        till_end = snd_efw_resp_buf_size -
-                               (unsigned int)(efw->pull_ptr - efw->resp_buf);
+                               (unsigned int)(pull_ptr - efw->resp_buf);
                        till_end = min_t(unsigned int, length, till_end);
 
-                       if (copy_to_user(buf, efw->pull_ptr, till_end))
+                       spin_unlock_irq(&efw->lock);
+
+                       if (copy_to_user(buf, pull_ptr, till_end))
                                return -EFAULT;
 
-                       efw->pull_ptr += till_end;
-                       if (efw->pull_ptr >= efw->resp_buf +
-                                            snd_efw_resp_buf_size)
-                               efw->pull_ptr -= snd_efw_resp_buf_size;
+                       spin_lock_irq(&efw->lock);
+
+                       pull_ptr += till_end;
+                       if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
+                               pull_ptr -= snd_efw_resp_buf_size;
 
                        length -= till_end;
                        buf += till_end;
                        count += till_end;
                        remained -= till_end;
                }
-
-               efw->resp_queues--;
        }
 
+       /*
+        * All of tasks can read from the buffer nearly simultaneously, but the
+        * last position for each task is different depending on the length of
+        * given buffer. Here, for simplicity, a position of buffer is set by
+        * the latest task. It's better for a listening application to allow one
+        * thread to read from the buffer. Unless, each task can read different
+        * sequence of responses depending on variation of buffer length.
+        */
+       efw->pull_ptr = pull_ptr;
+
+       spin_unlock_irq(&efw->lock);
+
        return count;
 }
 
@@ -76,14 +99,17 @@ static long
 hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
                  loff_t *offset)
 {
-       union snd_firewire_event event;
+       union snd_firewire_event event = {
+               .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+       };
 
-       memset(&event, 0, sizeof(event));
+       spin_lock_irq(&efw->lock);
 
-       event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
        event.lock_status.status = (efw->dev_lock_count > 0);
        efw->dev_lock_changed = false;
 
+       spin_unlock_irq(&efw->lock);
+
        count = min_t(long, count, sizeof(event.lock_status));
 
        if (copy_to_user(buf, &event, count))
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
 {
        struct snd_efw *efw = hwdep->private_data;
        DEFINE_WAIT(wait);
+       bool dev_lock_changed;
+       bool queued;
 
        spin_lock_irq(&efw->lock);
 
-       while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
+       dev_lock_changed = efw->dev_lock_changed;
+       queued = efw->push_ptr != efw->pull_ptr;
+
+       while (!dev_lock_changed && !queued) {
                prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
                spin_unlock_irq(&efw->lock);
                schedule();
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                if (signal_pending(current))
                        return -ERESTARTSYS;
                spin_lock_irq(&efw->lock);
+               dev_lock_changed = efw->dev_lock_changed;
+               queued = efw->push_ptr != efw->pull_ptr;
        }
 
-       if (efw->dev_lock_changed)
+       spin_unlock_irq(&efw->lock);
+
+       if (dev_lock_changed)
                count = hwdep_read_locked(efw, buf, count, offset);
-       else if (efw->resp_queues > 0)
+       else if (queued)
                count = hwdep_read_resp_buf(efw, buf, count, offset);
 
-       spin_unlock_irq(&efw->lock);
-
        return count;
 }
 
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
        poll_wait(file, &efw->hwdep_wait, wait);
 
        spin_lock_irq(&efw->lock);
-       if (efw->dev_lock_changed || (efw->resp_queues > 0))
+       if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
                events = POLLIN | POLLRDNORM;
        else
                events = 0;
index 0639dcb13f7df76ab2199259716bd22ba2e308de..beb0a0ffee57c4cfbb72845dd16a6f27b1c165ba 100644 (file)
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
        else
                consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
 
-       snd_iprintf(buffer, "%d %d/%d\n",
-                   efw->resp_queues, consumed, snd_efw_resp_buf_size);
+       snd_iprintf(buffer, "%d/%d\n",
+                   consumed, snd_efw_resp_buf_size);
 }
 
 static void
index f550808d178416cfd8c67cb9b78722687b2001e5..36a08ba51ec793ddd520ef5be9259b8fffaa0ed2 100644 (file)
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        size_t capacity, till_end;
        struct snd_efw_transaction *t;
 
-       spin_lock_irq(&efw->lock);
-
        t = (struct snd_efw_transaction *)data;
        length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
 
+       spin_lock_irq(&efw->lock);
+
        if (efw->push_ptr < efw->pull_ptr)
                capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
        else
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        }
 
        /* for hwdep */
-       efw->resp_queues++;
        wake_up(&efw->hwdep_wait);
 
        *rcode = RCODE_COMPLETE;
index 131267c3a04254fac3bd00767810abbb5fa9c7a3..106406cbfaa3926f294ad0831180472a1e50523d 100644 (file)
 
 #include "tascam.h"
 
-static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
-                             long count)
-{
-       union snd_firewire_event event;
-
-       memset(&event, 0, sizeof(event));
-
-       event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
-       event.lock_status.status = (tscm->dev_lock_count > 0);
-       tscm->dev_lock_changed = false;
-
-       count = min_t(long, count, sizeof(event.lock_status));
-
-       if (copy_to_user(buf, &event, count))
-               return -EFAULT;
-
-       return count;
-}
-
 static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                       loff_t *offset)
 {
        struct snd_tscm *tscm = hwdep->private_data;
        DEFINE_WAIT(wait);
-       union snd_firewire_event event;
+       union snd_firewire_event event = {
+               .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+       };
 
        spin_lock_irq(&tscm->lock);
 
@@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                spin_lock_irq(&tscm->lock);
        }
 
-       memset(&event, 0, sizeof(event));
-       count = hwdep_read_locked(tscm, buf, count);
+       event.lock_status.status = (tscm->dev_lock_count > 0);
+       tscm->dev_lock_changed = false;
+
        spin_unlock_irq(&tscm->lock);
 
+       count = min_t(long, count, sizeof(event.lock_status));
+
+       if (copy_to_user(buf, &event, count))
+               return -EFAULT;
+
        return count;
 }
 
index 89dacf9b4e6cbcdd7caaed257ce77a510083c3c4..160c7f71372289034f87953de4650fb08498298a 100644 (file)
@@ -906,20 +906,23 @@ static int azx_resume(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
        struct hda_intel *hda;
+       struct hdac_bus *bus;
 
        if (!card)
                return 0;
 
        chip = card->private_data;
        hda = container_of(chip, struct hda_intel, chip);
+       bus = azx_bus(chip);
        if (chip->disabled || hda->init_failed || !chip->running)
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
-               && hda->need_i915_power) {
-               snd_hdac_display_power(azx_bus(chip), true);
-               snd_hdac_i915_set_bclk(azx_bus(chip));
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+               snd_hdac_display_power(bus, true);
+               if (hda->need_i915_power)
+                       snd_hdac_i915_set_bclk(bus);
        }
+
        if (chip->msi)
                if (pci_enable_msi(pci) < 0)
                        chip->msi = 0;
@@ -929,6 +932,11 @@ static int azx_resume(struct device *dev)
 
        hda_intel_init_chip(chip, true);
 
+       /* power down again for link-controlled chips */
+       if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+           !hda->need_i915_power)
+               snd_hdac_display_power(bus, false);
+
        snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 
        trace_azx_resume(chip);
@@ -1008,6 +1016,7 @@ static int azx_runtime_resume(struct device *dev)
 
        chip = card->private_data;
        hda = container_of(chip, struct hda_intel, chip);
+       bus = azx_bus(chip);
        if (chip->disabled || hda->init_failed)
                return 0;
 
@@ -1015,15 +1024,9 @@ static int azx_runtime_resume(struct device *dev)
                return 0;
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
-               bus = azx_bus(chip);
-               if (hda->need_i915_power) {
-                       snd_hdac_display_power(bus, true);
+               snd_hdac_display_power(bus, true);
+               if (hda->need_i915_power)
                        snd_hdac_i915_set_bclk(bus);
-               } else {
-                       /* toggle codec wakeup bit for STATESTS read */
-                       snd_hdac_set_codec_wakeup(bus, true);
-                       snd_hdac_set_codec_wakeup(bus, false);
-               }
        }
 
        /* Read STATESTS before controller reset */
@@ -1043,6 +1046,11 @@ static int azx_runtime_resume(struct device *dev)
        azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
                        ~STATESTS_INT_MASK);
 
+       /* power down again for link-controlled chips */
+       if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+           !hda->need_i915_power)
+               snd_hdac_display_power(bus, false);
+
        trace_azx_runtime_resume(chip);
        return 0;
 }
index 574b1b48996f1d9199bd1349ca08b0f108199762..575cefd8cc4a6c59f1a0dc3b198fedcae84afed2 100644 (file)
@@ -4828,7 +4828,7 @@ enum {
        ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC292_FIXUP_TPT440_DOCK,
        ALC292_FIXUP_TPT440,
-       ALC283_FIXUP_BXBT2807_MIC,
+       ALC283_FIXUP_HEADSET_MIC,
        ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
        ALC282_FIXUP_ASPIRE_V5_PINS,
        ALC280_FIXUP_HP_GPIO4,
@@ -4855,6 +4855,7 @@ enum {
        ALC221_FIXUP_HP_FRONT_MIC,
        ALC292_FIXUP_TPT460,
        ALC298_FIXUP_SPK_VOLUME,
+       ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5321,7 +5322,7 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC292_FIXUP_TPT440_DOCK,
        },
-       [ALC283_FIXUP_BXBT2807_MIC] = {
+       [ALC283_FIXUP_HEADSET_MIC] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
                        { 0x19, 0x04a110f0 },
@@ -5516,6 +5517,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
        },
+       [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x90170151 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5560,6 +5570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -5651,7 +5662,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
-       SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
+       SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
@@ -5894,6 +5906,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60170},
                {0x14, 0x90170120},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60180},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
index 54c09acd3fed06f4fba7ef602d0bd7466825c100..16e459aedffe4c3a4af7733b9ea6cf22087c7fa2 100644 (file)
@@ -299,8 +299,9 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
        clk_enable(ssc_p->ssc->clk);
        ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
 
-       /* Reset the SSC to keep it at a clean status */
-       ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
+       /* Reset the SSC unless initialized to keep it in a clean state */
+       if (!ssc_p->initialized)
+               ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                dir = 0;
index e5527bc570ae550394807f8062092af3e8f0a6e1..bcf1834c564812a6af86d547eb880cd93e83214e 100644 (file)
@@ -1247,8 +1247,8 @@ static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
                return -EINVAL;
        }
 
-       /* By default only 32 BCLK per WCLK is supported */
-       dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_32;
+       /* By default only 64 BCLK per WCLK is supported */
+       dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_64;
 
        snd_soc_write(codec, DA7213_DAI_CLK_MODE, dai_clk_mode);
        snd_soc_update_bits(codec, DA7213_DAI_CTRL, DA7213_DAI_FORMAT_MASK,
index cf0a39bb631aac9ae9bccd6c0b5163e65d8178b1..02352ed8961c47ccb270ef9ff8f7aff3784bd4ca 100644 (file)
@@ -412,6 +412,7 @@ static int max98371_i2c_remove(struct i2c_client *client)
 
 static const struct i2c_device_id max98371_i2c_id[] = {
        { "max98371", 0 },
+       { }
 };
 
 MODULE_DEVICE_TABLE(i2c, max98371_i2c_id);
index 5c9707ac4bbff20e1240d9961ea28bcc8d5af82f..2e59a85e360b67cb89ffed8fca86148db8d0a6c1 100644 (file)
@@ -212,31 +212,6 @@ static const unsigned short logtable[256] = {
        0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47
 };
 
-static struct snd_soc_dai *nau8825_get_codec_dai(struct nau8825 *nau8825)
-{
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(nau8825->dapm);
-       struct snd_soc_component *component = &codec->component;
-       struct snd_soc_dai *codec_dai, *_dai;
-
-       list_for_each_entry_safe(codec_dai, _dai, &component->dai_list, list) {
-               if (!strncmp(codec_dai->name, NUVOTON_CODEC_DAI,
-                       strlen(NUVOTON_CODEC_DAI)))
-                       return codec_dai;
-       }
-       return NULL;
-}
-
-static bool nau8825_dai_is_active(struct nau8825 *nau8825)
-{
-       struct snd_soc_dai *codec_dai = nau8825_get_codec_dai(nau8825);
-
-       if (codec_dai) {
-               if (codec_dai->playback_active || codec_dai->capture_active)
-                       return true;
-       }
-       return false;
-}
-
 /**
  * nau8825_sema_acquire - acquire the semaphore of nau88l25
  * @nau8825:  component to register the codec private data with
@@ -250,19 +225,26 @@ static bool nau8825_dai_is_active(struct nau8825 *nau8825)
  * Acquires the semaphore without jiffies. If no more tasks are allowed
  * to acquire the semaphore, calling this function will put the task to
  * sleep until the semaphore is released.
- * It returns if the semaphore was acquired.
+ * If the semaphore is not released within the specified number of jiffies,
+ * this function returns -ETIME.
+ * If the sleep is interrupted by a signal, this function will return -EINTR.
+ * It returns 0 if the semaphore was acquired successfully.
  */
-static void nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
+static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
 {
        int ret;
 
-       if (timeout)
+       if (timeout) {
                ret = down_timeout(&nau8825->xtalk_sem, timeout);
-       else
+               if (ret < 0)
+                       dev_warn(nau8825->dev, "Acquire semaphone timeout\n");
+       } else {
                ret = down_interruptible(&nau8825->xtalk_sem);
+               if (ret < 0)
+                       dev_warn(nau8825->dev, "Acquire semaphone fail\n");
+       }
 
-       if (ret < 0)
-               dev_warn(nau8825->dev, "Acquire semaphone fail\n");
+       return ret;
 }
 
 /**
@@ -1205,6 +1187,8 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
        struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
        unsigned int val_len = 0;
 
+       nau8825_sema_acquire(nau8825, 2 * HZ);
+
        switch (params_width(params)) {
        case 16:
                val_len |= NAU8825_I2S_DL_16;
@@ -1225,6 +1209,9 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
                NAU8825_I2S_DL_MASK, val_len);
 
+       /* Release the semaphone. */
+       nau8825_sema_release(nau8825);
+
        return 0;
 }
 
@@ -1234,6 +1221,8 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
        unsigned int ctrl1_val = 0, ctrl2_val = 0;
 
+       nau8825_sema_acquire(nau8825, 2 * HZ);
+
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBM_CFM:
                ctrl2_val |= NAU8825_I2S_MS_MASTER;
@@ -1282,6 +1271,9 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
                NAU8825_I2S_MS_MASK, ctrl2_val);
 
+       /* Release the semaphone. */
+       nau8825_sema_release(nau8825);
+
        return 0;
 }
 
@@ -1611,8 +1603,11 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
                                         * cess and restore changes if process
                                         * is ongoing when ejection.
                                         */
+                                       int ret;
                                        nau8825->xtalk_protect = true;
-                                       nau8825_sema_acquire(nau8825, 0);
+                                       ret = nau8825_sema_acquire(nau8825, 0);
+                                       if (ret < 0)
+                                               nau8825->xtalk_protect = false;
                                }
                                /* Startup cross talk detection process */
                                nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
@@ -2238,23 +2233,14 @@ static int __maybe_unused nau8825_suspend(struct snd_soc_codec *codec)
 static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
 {
        struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+       int ret;
 
        regcache_cache_only(nau8825->regmap, false);
        regcache_sync(nau8825->regmap);
-       if (nau8825_is_jack_inserted(nau8825->regmap)) {
-               /* If the jack is inserted, we need to check whether the play-
-                * back is active before suspend. If active, the driver has to
-                * raise the protection for cross talk function to avoid the
-                * playback recovers before cross talk process finish. Other-
-                * wise, the playback will be interfered by cross talk func-
-                * tion. It is better to apply hardware related parameters
-                * before starting playback or record.
-                */
-               if (nau8825_dai_is_active(nau8825)) {
-                       nau8825->xtalk_protect = true;
-                       nau8825_sema_acquire(nau8825, 0);
-               }
-       }
+       nau8825->xtalk_protect = true;
+       ret = nau8825_sema_acquire(nau8825, 0);
+       if (ret < 0)
+               nau8825->xtalk_protect = false;
        enable_irq(nau8825->irq);
 
        return 0;
index a67ea10f41a1adce51e3625986e1170904429837..f2664396be6fd752c3fa62f398a6e2e1857eef91 100644 (file)
@@ -581,7 +581,7 @@ static int wm2000_anc_transition(struct wm2000_priv *wm2000,
        if (anc_transitions[i].dest == ANC_OFF)
                clk_disable_unprepare(wm2000->mclk);
 
-       return ret;
+       return 0;
 }
 
 static int wm2000_anc_set_mode(struct wm2000_priv *wm2000)
index 45602ca8536ea7935cf0a019eb399d2dd432cb5f..2d53c8d70705b2688a748967bbd0517335138fe2 100644 (file)
@@ -1,5 +1,5 @@
-obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) := simple-card-utils.o
-
+snd-soc-simple-card-utils-objs := simple-card-utils.o
 snd-soc-simple-card-objs       := simple-card.o
 
-obj-$(CONFIG_SND_SIMPLE_CARD)  += snd-soc-simple-card.o
+obj-$(CONFIG_SND_SIMPLE_CARD_UTILS)    += snd-soc-simple-card-utils.o
+obj-$(CONFIG_SND_SIMPLE_CARD)          += snd-soc-simple-card.o
index d89a9a1b2471554042df5d372b8220f7a54a7cf2..9599de69a880eb2a59ecb3fe142eb84a7ddacbcc 100644 (file)
@@ -7,6 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/module.h>
 #include <linux/of.h>
 #include <sound/simple_card_utils.h>
 
@@ -95,3 +96,8 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
        return 0;
 }
 EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
+
+/* Module information */
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("ALSA SoC Simple Card Utils");
+MODULE_LICENSE("GPL v2");
index 25fcb796bd86414c49a08df1af445b1b4127a7a2..ddcb52a5185481057fc40b406bf3f09077b51984 100644 (file)
@@ -123,6 +123,11 @@ int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
 
        uuid_mod = (uuid_le *)uuid;
 
+       if (list_empty(&ctx->uuid_list)) {
+               dev_err(ctx->dev, "Module list is empty\n");
+               return -EINVAL;
+       }
+
        list_for_each_entry(module, &ctx->uuid_list, list) {
                if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
                        dfw_config->module_id = module->id;
index cd59536a761dd84bcca2fb3afb44dc82adf1b417..e3e7641677657ecffb828ec94c865d0e80201dbf 100644 (file)
@@ -672,8 +672,10 @@ static int skl_probe(struct pci_dev *pci,
 
        skl->nhlt = skl_nhlt_init(bus->dev);
 
-       if (skl->nhlt == NULL)
+       if (skl->nhlt == NULL) {
+               err = -ENODEV;
                goto out_free;
+       }
 
        skl_nhlt_update_topology_bin(skl);
 
index 0843a68f277c2cffb0bdffa9a5a200b86544950f..f61b3b58083b9c7cf6d8848de3318b00902ce61b 100644 (file)
 struct abe_twl6040 {
        int     jack_detection; /* board can detect jack events */
        int     mclk_freq;      /* MCLK frequency speed for twl6040 */
-
-       struct platform_device *dmic_codec_dev;
 };
 
+struct platform_device *dmic_codec_dev;
+
 static int omap_abe_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params)
 {
@@ -258,8 +258,6 @@ static int omap_abe_probe(struct platform_device *pdev)
        if (priv == NULL)
                return -ENOMEM;
 
-       priv->dmic_codec_dev = ERR_PTR(-EINVAL);
-
        if (snd_soc_of_parse_card_name(card, "ti,model")) {
                dev_err(&pdev->dev, "Card name is not provided\n");
                return -ENODEV;
@@ -284,13 +282,6 @@ static int omap_abe_probe(struct platform_device *pdev)
                num_links = 2;
                abe_twl6040_dai_links[1].cpu_of_node = dai_node;
                abe_twl6040_dai_links[1].platform_of_node = dai_node;
-
-               priv->dmic_codec_dev = platform_device_register_simple(
-                                               "dmic-codec", -1, NULL, 0);
-               if (IS_ERR(priv->dmic_codec_dev)) {
-                       dev_err(&pdev->dev, "Can't instantiate dmic-codec\n");
-                       return PTR_ERR(priv->dmic_codec_dev);
-               }
        } else {
                num_links = 1;
        }
@@ -299,16 +290,14 @@ static int omap_abe_probe(struct platform_device *pdev)
        of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq);
        if (!priv->mclk_freq) {
                dev_err(&pdev->dev, "MCLK frequency not provided\n");
-               ret = -EINVAL;
-               goto err_unregister;
+               return -EINVAL;
        }
 
        card->fully_routed = 1;
 
        if (!priv->mclk_freq) {
                dev_err(&pdev->dev, "MCLK frequency missing\n");
-               ret = -ENODEV;
-               goto err_unregister;
+               return -ENODEV;
        }
 
        card->dai_link = abe_twl6040_dai_links;
@@ -317,17 +306,9 @@ static int omap_abe_probe(struct platform_device *pdev)
        snd_soc_card_set_drvdata(card, priv);
 
        ret = snd_soc_register_card(card);
-       if (ret) {
+       if (ret)
                dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
                        ret);
-               goto err_unregister;
-       }
-
-       return 0;
-
-err_unregister:
-       if (!IS_ERR(priv->dmic_codec_dev))
-               platform_device_unregister(priv->dmic_codec_dev);
 
        return ret;
 }
@@ -335,13 +316,9 @@ err_unregister:
 static int omap_abe_remove(struct platform_device *pdev)
 {
        struct snd_soc_card *card = platform_get_drvdata(pdev);
-       struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
 
        snd_soc_unregister_card(card);
 
-       if (!IS_ERR(priv->dmic_codec_dev))
-               platform_device_unregister(priv->dmic_codec_dev);
-
        return 0;
 }
 
@@ -361,7 +338,33 @@ static struct platform_driver omap_abe_driver = {
        .remove = omap_abe_remove,
 };
 
-module_platform_driver(omap_abe_driver);
+static int __init omap_abe_init(void)
+{
+       int ret;
+
+       dmic_codec_dev = platform_device_register_simple("dmic-codec", -1, NULL,
+                                                        0);
+       if (IS_ERR(dmic_codec_dev)) {
+               pr_err("%s: dmic-codec device registration failed\n", __func__);
+               return PTR_ERR(dmic_codec_dev);
+       }
+
+       ret = platform_driver_register(&omap_abe_driver);
+       if (ret) {
+               pr_err("%s: platform driver registration failed\n", __func__);
+               platform_device_unregister(dmic_codec_dev);
+       }
+
+       return ret;
+}
+module_init(omap_abe_init);
+
+static void __exit omap_abe_exit(void)
+{
+       platform_driver_unregister(&omap_abe_driver);
+       platform_device_unregister(dmic_codec_dev);
+}
+module_exit(omap_abe_exit);
 
 MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
 MODULE_DESCRIPTION("ALSA SoC for OMAP boards with ABE and twl6040 codec");
index e7cdc51fd806587a9ec1f8b4c8103bbffb5858d8..64609c77a79d1b875ac577801beadd5029a6fcac 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/irq.h>
-#include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
 #include <linux/of_device.h>
@@ -55,7 +54,6 @@ struct omap_mcpdm {
        unsigned long phys_base;
        void __iomem *io_base;
        int irq;
-       struct clk *pdmclk;
 
        struct mutex mutex;
 
@@ -390,15 +388,14 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai)
        struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
        int ret;
 
-       clk_prepare_enable(mcpdm->pdmclk);
        pm_runtime_enable(mcpdm->dev);
 
        /* Disable lines while request is ongoing */
        pm_runtime_get_sync(mcpdm->dev);
        omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00);
 
-       ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler,
-                               0, "McPDM", (void *)mcpdm);
+       ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM",
+                         (void *)mcpdm);
 
        pm_runtime_put_sync(mcpdm->dev);
 
@@ -423,9 +420,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
 {
        struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
 
+       free_irq(mcpdm->irq, (void *)mcpdm);
        pm_runtime_disable(mcpdm->dev);
 
-       clk_disable_unprepare(mcpdm->pdmclk);
        return 0;
 }
 
@@ -445,8 +442,6 @@ static int omap_mcpdm_suspend(struct snd_soc_dai *dai)
                mcpdm->pm_active_count++;
        }
 
-       clk_disable_unprepare(mcpdm->pdmclk);
-
        return 0;
 }
 
@@ -454,8 +449,6 @@ static int omap_mcpdm_resume(struct snd_soc_dai *dai)
 {
        struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
 
-       clk_prepare_enable(mcpdm->pdmclk);
-
        if (mcpdm->pm_active_count) {
                while (mcpdm->pm_active_count--)
                        pm_runtime_get_sync(mcpdm->dev);
@@ -549,15 +542,6 @@ static int asoc_mcpdm_probe(struct platform_device *pdev)
 
        mcpdm->dev = &pdev->dev;
 
-       mcpdm->pdmclk = devm_clk_get(&pdev->dev, "pdmclk");
-       if (IS_ERR(mcpdm->pdmclk)) {
-               if (PTR_ERR(mcpdm->pdmclk) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-               dev_warn(&pdev->dev, "Error getting pdmclk (%ld)!\n",
-                        PTR_ERR(mcpdm->pdmclk));
-               mcpdm->pdmclk = NULL;
-       }
-
        ret =  devm_snd_soc_register_component(&pdev->dev,
                                               &omap_mcpdm_component,
                                               &omap_mcpdm_dai, 1);
index 50849e137fc0edf2455a7d643b17c936f2e63794..92e88bca386e8199d4316898d27195e64251a626 100644 (file)
@@ -58,10 +58,12 @@ static struct platform_device *s3c24xx_uda134x_snd_device;
 
 static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
 {
-       int ret = 0;
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 #ifdef ENFORCE_RATES
        struct snd_pcm_runtime *runtime = substream->runtime;
 #endif
+       int ret = 0;
 
        mutex_lock(&clk_lock);
        pr_debug("%s %d\n", __func__, clk_users);
@@ -71,8 +73,7 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
                        printk(KERN_ERR "%s cannot get xtal\n", __func__);
                        ret = PTR_ERR(xtal);
                } else {
-                       pclk = clk_get(&s3c24xx_uda134x_snd_device->dev,
-                                      "pclk");
+                       pclk = clk_get(cpu_dai->dev, "iis");
                        if (IS_ERR(pclk)) {
                                printk(KERN_ERR "%s cannot get pclk\n",
                                       __func__);
index e39f916d0f2fa811b1faece1d212b0f8fe1fc5d8..969a5169de255a2fe66202ffc89808c89bfe98ec 100644 (file)
@@ -226,8 +226,12 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        ifscr = 0;
        fsrate = 0;
        if (fin != fout) {
+               u64 n;
+
                ifscr = 1;
-               fsrate = 0x0400000 / fout * fin;
+               n = (u64)0x0400000 * fin;
+               do_div(n, fout);
+               fsrate = n;
        }
 
        /*
index d2df46c14c6823599c6210b3a31c9b1aa60ce72e..bf7b52fce597422cc2a2c171b9d6ac97c4e0c1e2 100644 (file)
@@ -121,7 +121,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
 
                dpcm_be_disconnect(fe, stream);
                fe->dpcm[stream].runtime = NULL;
-               goto fe_err;
+               goto path_err;
        }
 
        dpcm_clear_pending_state(fe, stream);
@@ -136,6 +136,8 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
 
        return 0;
 
+path_err:
+       dpcm_path_put(&list);
 fe_err:
        if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
                fe->dai_link->compr_ops->shutdown(cstream);
index 16369cad480388c326f2463273e17ebef0324924..4afa8dba5e982f664b1a214226688c50dfee1212 100644 (file)
@@ -1056,7 +1056,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
        if (!rtd->platform) {
                dev_err(card->dev, "ASoC: platform %s not registered\n",
                        dai_link->platform_name);
-               return -EPROBE_DEFER;
+               goto _err_defer;
        }
 
        soc_add_pcm_runtime(card, rtd);
@@ -2083,14 +2083,13 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        /* remove auxiliary devices */
        soc_remove_aux_devices(card);
 
+       snd_soc_dapm_free(&card->dapm);
        soc_cleanup_card_debugfs(card);
 
        /* remove the card */
        if (card->remove)
                card->remove(card);
 
-       snd_soc_dapm_free(&card->dapm);
-
        snd_card_free(card->snd_card);
        return 0;
 
index 8698c26773b3c8d43e52a5610155348d6261a2e5..d908ff8f97554cacdf6658288222744209287aee 100644 (file)
@@ -3493,6 +3493,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
        const struct snd_soc_pcm_stream *config = w->params + w->params_select;
        struct snd_pcm_substream substream;
        struct snd_pcm_hw_params *params = NULL;
+       struct snd_pcm_runtime *runtime = NULL;
        u64 fmt;
        int ret;
 
@@ -3541,6 +3542,14 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
 
        memset(&substream, 0, sizeof(substream));
 
+       /* Allocate a dummy snd_pcm_runtime for startup() and other ops() */
+       runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
+       if (!runtime) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       substream.runtime = runtime;
+
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
                substream.stream = SNDRV_PCM_STREAM_CAPTURE;
@@ -3606,6 +3615,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
        }
 
 out:
+       kfree(runtime);
        kfree(params);
        return ret;
 }
index 204cc074adb96f8f99ebd9d69bd7e1e4cf91f98e..41aa3355e920499830aeefdaf961659dfcabdba3 100644 (file)
@@ -55,7 +55,6 @@ static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol,
                err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
                if (err < 0) {
                        line6pcm->impulse_volume = 0;
-                       line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
                        return err;
                }
        } else {
@@ -211,7 +210,9 @@ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction,
        spin_lock_irqsave(&pstr->lock, flags);
        clear_bit(type, &pstr->running);
        if (!pstr->running) {
+               spin_unlock_irqrestore(&pstr->lock, flags);
                line6_unlink_audio_urbs(line6pcm, pstr);
+               spin_lock_irqsave(&pstr->lock, flags);
                if (direction == SNDRV_PCM_STREAM_CAPTURE) {
                        line6pcm->prev_fbuf = NULL;
                        line6pcm->prev_fsize = 0;
index daf81d169a42043f265a48df58afcc5490d40f97..45dd34874f43a7f00fc481b25d7b5bf80ffb6c02 100644 (file)
@@ -244,8 +244,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
 static ssize_t serial_number_show(struct device *dev,
                                  struct device_attribute *attr, char *buf)
 {
-       struct usb_interface *interface = to_usb_interface(dev);
-       struct usb_line6_pod *pod = usb_get_intfdata(interface);
+       struct snd_card *card = dev_to_snd_card(dev);
+       struct usb_line6_pod *pod = card->private_data;
 
        return sprintf(buf, "%u\n", pod->serial_number);
 }
@@ -256,8 +256,8 @@ static ssize_t serial_number_show(struct device *dev,
 static ssize_t firmware_version_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
-       struct usb_interface *interface = to_usb_interface(dev);
-       struct usb_line6_pod *pod = usb_get_intfdata(interface);
+       struct snd_card *card = dev_to_snd_card(dev);
+       struct usb_line6_pod *pod = card->private_data;
 
        return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
                       pod->firmware_version % 100);
@@ -269,8 +269,8 @@ static ssize_t firmware_version_show(struct device *dev,
 static ssize_t device_id_show(struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
-       struct usb_interface *interface = to_usb_interface(dev);
-       struct usb_line6_pod *pod = usb_get_intfdata(interface);
+       struct snd_card *card = dev_to_snd_card(dev);
+       struct usb_line6_pod *pod = card->private_data;
 
        return sprintf(buf, "%d\n", pod->device_id);
 }
index 6adde457b602e08aedd1806e8b79863d65e006cc..152292e5ee2b2246ab649a65a920ce4b1f7e1669 100644 (file)
@@ -1128,6 +1128,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
 {
        /* devices which do not support reading the sample rate. */
        switch (chip->usb_id) {
+       case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */
        case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
        case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
        case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
@@ -1138,7 +1139,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+       case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
        case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
        case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
        case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
index f209ea151dca8ab8559243dffd8662008844b27c..3051f86a9b5f4ab976568b266b41f47313f86b82 100644 (file)
@@ -87,9 +87,11 @@ struct kvm_regs {
 /* Supported VGICv3 address types  */
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
+#define KVM_VGIC_ITS_ADDR_TYPE         4
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
+#define KVM_VGIC_V3_ITS_SIZE           (2 * SZ_64K)
 
 #define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
 #define KVM_ARM_VCPU_EL1_32BIT         1 /* CPU running a 32bit VM */
index 3b8e99ef9d58d44dc37ca64ced292cc7efcd6f39..a2ffec4139ad1cb8cebe816a9f0b3e261cd97d42 100644 (file)
@@ -93,6 +93,47 @@ struct kvm_s390_vm_cpu_machine {
        __u64 fac_list[256];
 };
 
+#define KVM_S390_VM_CPU_PROCESSOR_FEAT 2
+#define KVM_S390_VM_CPU_MACHINE_FEAT   3
+
+#define KVM_S390_VM_CPU_FEAT_NR_BITS   1024
+#define KVM_S390_VM_CPU_FEAT_ESOP      0
+#define KVM_S390_VM_CPU_FEAT_SIEF2     1
+#define KVM_S390_VM_CPU_FEAT_64BSCAO   2
+#define KVM_S390_VM_CPU_FEAT_SIIF      3
+#define KVM_S390_VM_CPU_FEAT_GPERE     4
+#define KVM_S390_VM_CPU_FEAT_GSLS      5
+#define KVM_S390_VM_CPU_FEAT_IB                6
+#define KVM_S390_VM_CPU_FEAT_CEI       7
+#define KVM_S390_VM_CPU_FEAT_IBS       8
+#define KVM_S390_VM_CPU_FEAT_SKEY      9
+#define KVM_S390_VM_CPU_FEAT_CMMA      10
+#define KVM_S390_VM_CPU_FEAT_PFMFI     11
+#define KVM_S390_VM_CPU_FEAT_SIGPIF    12
+struct kvm_s390_vm_cpu_feat {
+       __u64 feat[16];
+};
+
+#define KVM_S390_VM_CPU_PROCESSOR_SUBFUNC      4
+#define KVM_S390_VM_CPU_MACHINE_SUBFUNC                5
+/* for "test bit" instructions MSB 0 bit ordering, for "query" raw blocks */
+struct kvm_s390_vm_cpu_subfunc {
+       __u8 plo[32];           /* always */
+       __u8 ptff[16];          /* with TOD-clock steering */
+       __u8 kmac[16];          /* with MSA */
+       __u8 kmc[16];           /* with MSA */
+       __u8 km[16];            /* with MSA */
+       __u8 kimd[16];          /* with MSA */
+       __u8 klmd[16];          /* with MSA */
+       __u8 pckmo[16];         /* with MSA3 */
+       __u8 kmctr[16];         /* with MSA4 */
+       __u8 kmf[16];           /* with MSA4 */
+       __u8 kmo[16];           /* with MSA4 */
+       __u8 pcc[16];           /* with MSA4 */
+       __u8 ppno[16];          /* with MSA5 */
+       __u8 reserved[1824];
+};
+
 /* kvm attributes for crypto */
 #define KVM_S390_VM_CRYPTO_ENABLE_AES_KW       0
 #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW       1
index 8fb5d4a6dd25bccfae4e2f3beef5ed81725adb60..3ac6343689394d0b128907a3f338958ec0f32471 100644 (file)
        exit_code_ipa0(0xB2, 0x4c, "TAR"),      \
        exit_code_ipa0(0xB2, 0x50, "CSP"),      \
        exit_code_ipa0(0xB2, 0x54, "MVPG"),     \
+       exit_code_ipa0(0xB2, 0x56, "STHYI"),    \
        exit_code_ipa0(0xB2, 0x58, "BSG"),      \
        exit_code_ipa0(0xB2, 0x5a, "BSA"),      \
        exit_code_ipa0(0xB2, 0x5f, "CHSC"),     \
index 4a413485f9eb8ef58ec71c77ff2594f4300c8ea6..92a8308b96f64cb6ce845a8379ca06cb9a6a00d6 100644 (file)
 #define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
 #define X86_FEATURE_ADX                ( 9*32+19) /* The ADCX and ADOX instructions */
 #define X86_FEATURE_SMAP       ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_PCOMMIT    ( 9*32+22) /* PCOMMIT instruction */
 #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
 #define X86_FEATURE_CLWB       ( 9*32+24) /* CLWB instruction */
 #define X86_FEATURE_AVX512PF   ( 9*32+26) /* AVX-512 Prefetch */
 #define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
 #define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG       X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE   X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
 #ifdef CONFIG_X86_32
 /*
  * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
  */
 #define X86_BUG_ESPFIX         X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
 #endif
-
+#define X86_BUG_NULL_SEG       X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR                X86_BUG(12) /* IPI required to wake up remote CPU */
 #endif /* _ASM_X86_CPUFEATURES_H */
index 911e9358ceb184b6b0b0f38b9c7b853fc4506fbe..85599ad4d0247863cef655d02b9a4b3f83c77fb7 100644 (file)
@@ -56,5 +56,7 @@
 #define DISABLED_MASK14        0
 #define DISABLED_MASK15        0
 #define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE)
+#define DISABLED_MASK17        0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
 #endif /* _ASM_X86_DISABLED_FEATURES_H */
index 4916144e3c42668a3e07af33859b4a1af3f2985b..fac9a5c0abe94b233b72b35bca8c7a665847b694 100644 (file)
@@ -99,5 +99,7 @@
 #define REQUIRED_MASK14        0
 #define REQUIRED_MASK15        0
 #define REQUIRED_MASK16        0
+#define REQUIRED_MASK17        0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
 #endif /* _ASM_X86_REQUIRED_FEATURES_H */
index 5b15d94a33f818d04ee7ae2a0f5685125bd89a40..37fee272618f1de348a7d5961f1792debba72991 100644 (file)
@@ -78,7 +78,6 @@
 #define EXIT_REASON_PML_FULL            62
 #define EXIT_REASON_XSAVES              63
 #define EXIT_REASON_XRSTORS             64
-#define EXIT_REASON_PCOMMIT             65
 
 #define VMX_EXIT_REASONS \
        { EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
        { EXIT_REASON_INVVPID,               "INVVPID" }, \
        { EXIT_REASON_INVPCID,               "INVPCID" }, \
        { EXIT_REASON_XSAVES,                "XSAVES" }, \
-       { EXIT_REASON_XRSTORS,               "XRSTORS" }, \
-       { EXIT_REASON_PCOMMIT,               "PCOMMIT" }
+       { EXIT_REASON_XRSTORS,               "XRSTORS" }
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
index 448ed96b3b4f88edde1bdcad5d7750edf73099e6..1c14c25951583611167fe0d48ab00479d108232a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * gpio-hammer - example swiss army knife to shake GPIO lines on a system
+ * gpio-event-mon - monitor GPIO line events from userspace
  *
  * Copyright (C) 2016 Linus Walleij
  *
index 0e8a1f7a292d13edba366a9ad60b91dd2b47747c..f39c0e9c0d5c92205384279fbc2996aaa2ba8619 100644 (file)
@@ -348,7 +348,7 @@ int main(int argc, char **argv)
        int notrigger = 0;
        char *dummy;
 
-       struct iio_channel_info *channels;
+       struct iio_channel_info *channels = NULL;
 
        register_cleanup();
 
@@ -456,7 +456,7 @@ int main(int argc, char **argv)
 
        if (notrigger) {
                printf("trigger-less mode selected\n");
-       } if (trig_num >= 0) {
+       } else if (trig_num >= 0) {
                char *trig_dev_name;
                ret = asprintf(&trig_dev_name, "%strigger%d", iio_dir, trig_num);
                if (ret < 0) {
index b968794773116527208be6eb4b7dfccff00edd2f..f436d2420a18575095e66b7bfbf15b0abbe83fe8 100644 (file)
@@ -8,7 +8,11 @@ void *memdup(const void *src, size_t len);
 
 int strtobool(const char *s, bool *res);
 
-#ifdef __GLIBC__
+/*
+ * glibc based builds needs the extern while uClibc doesn't.
+ * However uClibc headers also define __GLIBC__ hence the hack below
+ */
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
 extern size_t strlcpy(char *dest, const char *src, size_t size);
 #endif
 
index 406459b935a27c4f9b518426a4fd60493432221d..da218fec605657ee415f8ad71a95d8851330a9de 100644 (file)
@@ -84,6 +84,7 @@ enum bpf_map_type {
        BPF_MAP_TYPE_PERCPU_HASH,
        BPF_MAP_TYPE_PERCPU_ARRAY,
        BPF_MAP_TYPE_STACK_TRACE,
+       BPF_MAP_TYPE_CGROUP_ARRAY,
 };
 
 enum bpf_prog_type {
@@ -93,6 +94,7 @@ enum bpf_prog_type {
        BPF_PROG_TYPE_SCHED_CLS,
        BPF_PROG_TYPE_SCHED_ACT,
        BPF_PROG_TYPE_TRACEPOINT,
+       BPF_PROG_TYPE_XDP,
 };
 
 #define BPF_PSEUDO_MAP_FD      1
@@ -313,6 +315,66 @@ enum bpf_func_id {
         */
        BPF_FUNC_skb_get_tunnel_opt,
        BPF_FUNC_skb_set_tunnel_opt,
+
+       /**
+        * bpf_skb_change_proto(skb, proto, flags)
+        * Change protocol of the skb. Currently supported is
+        * v4 -> v6, v6 -> v4 transitions. The helper will also
+        * resize the skb. eBPF program is expected to fill the
+        * new headers via skb_store_bytes and lX_csum_replace.
+        * @skb: pointer to skb
+        * @proto: new skb->protocol type
+        * @flags: reserved
+        * Return: 0 on success or negative error
+        */
+       BPF_FUNC_skb_change_proto,
+
+       /**
+        * bpf_skb_change_type(skb, type)
+        * Change packet type of skb.
+        * @skb: pointer to skb
+        * @type: new skb->pkt_type type
+        * Return: 0 on success or negative error
+        */
+       BPF_FUNC_skb_change_type,
+
+       /**
+        * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
+        * @skb: pointer to skb
+        * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+        * @index: index of the cgroup in the bpf_map
+        * Return:
+        *   == 0 skb failed the cgroup2 descendant test
+        *   == 1 skb succeeded the cgroup2 descendant test
+        *    < 0 error
+        */
+       BPF_FUNC_skb_in_cgroup,
+
+       /**
+        * bpf_get_hash_recalc(skb)
+        * Retrieve and possibly recalculate skb->hash.
+        * @skb: pointer to skb
+        * Return: hash
+        */
+       BPF_FUNC_get_hash_recalc,
+
+       /**
+        * u64 bpf_get_current_task(void)
+        * Returns current task_struct
+        * Return: current
+        */
+       BPF_FUNC_get_current_task,
+
+       /**
+        * bpf_probe_write_user(void *dst, void *src, int len)
+        * safely attempt to write to a location
+        * @dst: destination address in userspace
+        * @src: source address on stack
+        * @len: number of bytes to copy
+        * Return: 0 on success or negative error
+        */
+       BPF_FUNC_probe_write_user,
+
        __BPF_FUNC_MAX_ID,
 };
 
@@ -347,9 +409,11 @@ enum bpf_func_id {
 #define BPF_F_ZERO_CSUM_TX             (1ULL << 1)
 #define BPF_F_DONT_FRAGMENT            (1ULL << 2)
 
-/* BPF_FUNC_perf_event_output flags. */
+/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
 #define BPF_F_INDEX_MASK               0xffffffffULL
 #define BPF_F_CURRENT_CPU              BPF_F_INDEX_MASK
+/* BPF_FUNC_perf_event_output for sk_buff input context. */
+#define BPF_F_CTXLEN_MASK              (0xfffffULL << 32)
 
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
@@ -386,4 +450,24 @@ struct bpf_tunnel_key {
        __u32 tunnel_label;
 };
 
+/* User return codes for XDP prog type.
+ * A valid XDP program must return one of these defined values. All other
+ * return codes are reserved for future use. Unknown return codes will result
+ * in packet drop.
+ */
+enum xdp_action {
+       XDP_ABORTED = 0,
+       XDP_DROP,
+       XDP_PASS,
+       XDP_TX,
+};
+
+/* user accessible metadata for XDP packet hook
+ * new fields must be added to the end of this structure
+ */
+struct xdp_md {
+       __u32 data;
+       __u32 data_end;
+};
+
 #endif /* _UAPI__LINUX_BPF_H__ */
index d9836c5eb694c48c30a6d07827ee36ed086b4309..11c8d9bc762ef0c4bde99dec4292e84ff00d3477 100644 (file)
@@ -3266,6 +3266,9 @@ int main(int argc, char *argv[])
                }
        }
 
+       /* If we exit via err(), this kills all the threads, restores tty. */
+       atexit(cleanup_devices);
+
        /* We always have a console device, and it's always device 1. */
        setup_console();
 
@@ -3369,9 +3372,6 @@ int main(int argc, char *argv[])
        /* Ensure that we terminate if a device-servicing child dies. */
        signal(SIGCHLD, kill_launcher);
 
-       /* If we exit via err(), this kills all the threads, restores tty. */
-       atexit(cleanup_devices);
-
        /* If requested, chroot to a directory */
        if (chroot_path) {
                if (chroot(chroot_path) != 0)
index 736da44596e451fa1a14d9a045f7feda269a0779..b303bcdd8ed15fb9d140e0e7369388bc714aaace 100644 (file)
@@ -176,10 +176,18 @@ Each probe argument follows below syntax.
 
 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
 '$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters.
-'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
+'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), signedness casting (u/s), "string" and bitfield are supported. (see TYPES for detail)
 
 On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
 
+TYPES
+-----
+Basic types (u8/u16/u32/u64/s8/s16/s32/s64) are integer types. Prefix 's' and 'u' means those types are signed and unsigned respectively. Traced arguments are shown in decimal (signed) or hex (unsigned). You can also use 's' or 'u' to specify only signedness and leave its size auto-detected by perf probe.
+String type is a special type, which fetches a "null-terminated" string from kernel space. This means it will fail and store NULL if the string container has been paged out. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
+Bitfield is another special type, which takes 3 parameters, bit-width, bit-offset, and container-size (usually 32). The syntax is;
+
+ b<bit-width>@<bit-offset>/<container-size>
+
 LINE SYNTAX
 -----------
 Line range is described by following syntax.
index 1f6c70594f0f79e378163c6430b9028642eee45d..053bbbd84ece30c673afe7e176328f8390a6bda9 100644 (file)
@@ -116,8 +116,8 @@ OPTIONS
 --fields::
         Comma separated list of fields to print. Options are:
         comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
-       srcline, period, iregs, brstack, brstacksym, flags.
-        Field list can be prepended with the type, trace, sw or hw,
+        srcline, period, iregs, brstack, brstacksym, flags, bpf-output,
+        callindent. Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
         e.g., -F sw:comm,tid,time,ip,sym  and -F trace:time,cpu,trace
 
index c6d0f91731a14732333af62d0a40a3ea43fb4c99..35745a733100e70f27c6c95ce97ce4ef5a6fabfa 100644 (file)
@@ -54,10 +54,6 @@ int arch__compare_symbol_names(const char *namea, const char *nameb)
 #endif
 
 #if defined(_CALL_ELF) && _CALL_ELF == 2
-bool arch__prefers_symtab(void)
-{
-       return true;
-}
 
 #ifdef HAVE_LIBELF_SUPPORT
 void arch__sym_update(struct symbol *s, GElf_Sym *sym)
@@ -100,4 +96,29 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev,
                        tev->point.offset += lep_offset;
        }
 }
+
+#ifdef HAVE_LIBELF_SUPPORT
+void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
+                                          int ntevs)
+{
+       struct probe_trace_event *tev;
+       struct map *map;
+       struct symbol *sym = NULL;
+       struct rb_node *tmp;
+       int i = 0;
+
+       map = get_target_map(pev->target, pev->uprobes);
+       if (!map || map__load(map, NULL) < 0)
+               return;
+
+       for (i = 0; i < ntevs; i++) {
+               tev = &pev->tevs[i];
+               map__for_each_symbol(map, sym, tmp) {
+                       if (map->unmap_ip(map, sym->start) == tev->point.address)
+                               arch__fix_tev_from_maps(pev, tev, map, sym);
+               }
+       }
+}
+#endif /* HAVE_LIBELF_SUPPORT */
+
 #endif
index fb51457ba338f8c1a3dc549582213eaf5446ec56..a2412e9d883b5246e8ebe1f0b356bbbebe01adcf 100644 (file)
@@ -501,7 +501,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
        struct intel_pt_recording *ptr =
                        container_of(itr, struct intel_pt_recording, itr);
        struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
-       bool have_timing_info;
+       bool have_timing_info, need_immediate = false;
        struct perf_evsel *evsel, *intel_pt_evsel = NULL;
        const struct cpu_map *cpus = evlist->cpus;
        bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
@@ -655,6 +655,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                                ptr->have_sched_switch = 3;
                        } else {
                                opts->record_switch_events = true;
+                               need_immediate = true;
                                if (cpu_wide)
                                        ptr->have_sched_switch = 3;
                                else
@@ -700,6 +701,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                tracking_evsel->attr.freq = 0;
                tracking_evsel->attr.sample_period = 1;
 
+               if (need_immediate)
+                       tracking_evsel->immediate = true;
+
                /* In per-cpu case, always need the time of mmap events etc */
                if (!cpu_map__empty(cpus)) {
                        perf_evsel__set_sample_bit(tracking_evsel, TIME);
index d608a2c9e48cd219e82697bdfa9331a477e9eeed..d1ce29be560e5e7dad2a2faca9a68c2246510a11 100644 (file)
@@ -88,6 +88,9 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
        if (mem->operation & MEM_OPERATION_LOAD)
                perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true;
 
+       if (mem->operation & MEM_OPERATION_STORE)
+               perf_mem_events[PERF_MEM_EVENTS__STORE].record = true;
+
        if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record)
                rec_argv[i++] = "-W";
 
index 971ff91b16cb3be52702cca780c3df818d52c51a..c859e59dfe3e7efae711fa056967c910a987d989 100644 (file)
@@ -371,14 +371,16 @@ static int perf_session__check_output_opt(struct perf_session *session)
 
        if (!no_callchain) {
                bool use_callchain = false;
+               bool not_pipe = false;
 
                evlist__for_each_entry(session->evlist, evsel) {
+                       not_pipe = true;
                        if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
                                use_callchain = true;
                                break;
                        }
                }
-               if (!use_callchain)
+               if (not_pipe && !use_callchain)
                        symbol_conf.use_callchain = false;
        }
 
@@ -1690,8 +1692,13 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
        snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
 
        scripts_dir = opendir(scripts_path);
-       if (!scripts_dir)
-               return -1;
+       if (!scripts_dir) {
+               fprintf(stdout,
+                       "open(%s) failed.\n"
+                       "Check \"PERF_EXEC_PATH\" env to set scripts dir.\n",
+                       scripts_path);
+               exit(-1);
+       }
 
        for_each_lang(scripts_path, scripts_dir, lang_dirent) {
                snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
@@ -2116,7 +2123,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                     "Valid types: hw,sw,trace,raw. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
                     "addr,symoff,period,iregs,brstack,brstacksym,flags,"
-                    "callindent", parse_output_fields),
+                    "bpf-output,callindent", parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
        OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
index 0c16d20d7e32fa2eb7377247c2e5542d2a5d076a..3c7452b39f57649b05d675db3d19395fb765df2d 100644 (file)
@@ -331,7 +331,7 @@ static int read_counter(struct perf_evsel *counter)
        return 0;
 }
 
-static void read_counters(bool close_counters)
+static void read_counters(void)
 {
        struct perf_evsel *counter;
 
@@ -341,11 +341,6 @@ static void read_counters(bool close_counters)
 
                if (perf_stat_process_counter(&stat_config, counter))
                        pr_warning("failed to process counter %s\n", counter->name);
-
-               if (close_counters) {
-                       perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
-                                            thread_map__nr(evsel_list->threads));
-               }
        }
 }
 
@@ -353,7 +348,7 @@ static void process_interval(void)
 {
        struct timespec ts, rs;
 
-       read_counters(false);
+       read_counters();
 
        clock_gettime(CLOCK_MONOTONIC, &ts);
        diff_timespec(&rs, &ts, &ref_time);
@@ -380,6 +375,17 @@ static void enable_counters(void)
                perf_evlist__enable(evsel_list);
 }
 
+static void disable_counters(void)
+{
+       /*
+        * If we don't have tracee (attaching to task or cpu), counters may
+        * still be running. To get accurate group ratios, we must stop groups
+        * from counting before reading their constituent counters.
+        */
+       if (!target__none(&target))
+               perf_evlist__disable(evsel_list);
+}
+
 static volatile int workload_exec_errno;
 
 /*
@@ -657,11 +663,20 @@ try_again:
                }
        }
 
+       disable_counters();
+
        t1 = rdclock();
 
        update_stats(&walltime_nsecs_stats, t1 - t0);
 
-       read_counters(true);
+       /*
+        * Closing a group leader splits the group, and as we only disable
+        * group leaders, results in remaining events becoming enabled. To
+        * avoid arbitrary skew, we must read all counters before closing any
+        * group leaders.
+        */
+       read_counters();
+       perf_evlist__close(evsel_list);
 
        return WEXITSTATUS(status);
 }
index d9b80ef881cde43a1e134b89c6a752f2ae06d108..21fd573106edd90ee4c33b3a0a9b46e43ecd052b 100644 (file)
@@ -507,17 +507,17 @@ static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
        u8 op, result, type = (config >>  0) & 0xff;
        const char *err = "unknown-ext-hardware-cache-type";
 
-       if (type > PERF_COUNT_HW_CACHE_MAX)
+       if (type >= PERF_COUNT_HW_CACHE_MAX)
                goto out_err;
 
        op = (config >>  8) & 0xff;
        err = "unknown-ext-hardware-cache-op";
-       if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+       if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
                goto out_err;
 
        result = (config >> 16) & 0xff;
        err = "unknown-ext-hardware-cache-result";
-       if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+       if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
                goto out_err;
 
        err = "invalid-cache";
index 9c8f15da86ce8ad8f735815cb3c0f55ca10d7167..8ff6c6a61291f9bdfbb8c50e9e0018eb887a8665 100644 (file)
@@ -123,8 +123,6 @@ struct intel_pt_decoder {
        bool have_calc_cyc_to_tsc;
        int exec_mode;
        unsigned int insn_bytes;
-       uint64_t sign_bit;
-       uint64_t sign_bits;
        uint64_t period;
        enum intel_pt_period_type period_type;
        uint64_t tot_insn_cnt;
@@ -191,9 +189,6 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
        decoder->data               = params->data;
        decoder->return_compression = params->return_compression;
 
-       decoder->sign_bit           = (uint64_t)1 << 47;
-       decoder->sign_bits          = ~(((uint64_t)1 << 48) - 1);
-
        decoder->period             = params->period;
        decoder->period_type        = params->period_type;
 
@@ -362,21 +357,30 @@ int intel_pt__strerror(int code, char *buf, size_t buflen)
        return 0;
 }
 
-static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder,
-                                const struct intel_pt_pkt *packet,
+static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
                                 uint64_t last_ip)
 {
        uint64_t ip;
 
        switch (packet->count) {
-       case 2:
+       case 1:
                ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
                     packet->payload;
                break;
-       case 4:
+       case 2:
                ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
                     packet->payload;
                break;
+       case 3:
+               ip = packet->payload;
+               /* Sign-extend 6-byte ip */
+               if (ip & (uint64_t)0x800000000000ULL)
+                       ip |= (uint64_t)0xffff000000000000ULL;
+               break;
+       case 4:
+               ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
+                    packet->payload;
+               break;
        case 6:
                ip = packet->payload;
                break;
@@ -384,16 +388,12 @@ static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder,
                return 0;
        }
 
-       if (ip & decoder->sign_bit)
-               return ip | decoder->sign_bits;
-
        return ip;
 }
 
 static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
 {
-       decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet,
-                                           decoder->last_ip);
+       decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
 }
 
 static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
@@ -1657,6 +1657,12 @@ next:
        }
 }
 
+static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
+{
+       return decoder->last_ip || decoder->packet.count == 0 ||
+              decoder->packet.count == 3 || decoder->packet.count == 6;
+}
+
 /* Walk PSB+ packets to get in sync. */
 static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
 {
@@ -1677,8 +1683,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
 
                case INTEL_PT_FUP:
                        decoder->pge = true;
-                       if (decoder->last_ip || decoder->packet.count == 6 ||
-                           decoder->packet.count == 0) {
+                       if (intel_pt_have_ip(decoder)) {
                                uint64_t current_ip = decoder->ip;
 
                                intel_pt_set_ip(decoder);
@@ -1767,8 +1772,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
                case INTEL_PT_TIP_PGE:
                case INTEL_PT_TIP:
                        decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
-                       if (decoder->last_ip || decoder->packet.count == 6 ||
-                           decoder->packet.count == 0)
+                       if (intel_pt_have_ip(decoder))
                                intel_pt_set_ip(decoder);
                        if (decoder->ip)
                                return 0;
@@ -1776,9 +1780,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
 
                case INTEL_PT_FUP:
                        if (decoder->overflow) {
-                               if (decoder->last_ip ||
-                                   decoder->packet.count == 6 ||
-                                   decoder->packet.count == 0)
+                               if (intel_pt_have_ip(decoder))
                                        intel_pt_set_ip(decoder);
                                if (decoder->ip)
                                        return 0;
index b1257c816310fefe0bed0e963fc106273c019da1..4f7b32020487011a6bb04ec8d339e7d7c4dfe7cc 100644 (file)
@@ -292,36 +292,46 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
                           const unsigned char *buf, size_t len,
                           struct intel_pt_pkt *packet)
 {
-       switch (byte >> 5) {
+       int ip_len;
+
+       packet->count = byte >> 5;
+
+       switch (packet->count) {
        case 0:
-               packet->count = 0;
+               ip_len = 0;
                break;
        case 1:
                if (len < 3)
                        return INTEL_PT_NEED_MORE_BYTES;
-               packet->count = 2;
+               ip_len = 2;
                packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
                break;
        case 2:
                if (len < 5)
                        return INTEL_PT_NEED_MORE_BYTES;
-               packet->count = 4;
+               ip_len = 4;
                packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
                break;
        case 3:
-       case 6:
+       case 4:
                if (len < 7)
                        return INTEL_PT_NEED_MORE_BYTES;
-               packet->count = 6;
+               ip_len = 6;
                memcpy_le64(&packet->payload, buf + 1, 6);
                break;
+       case 6:
+               if (len < 9)
+                       return INTEL_PT_NEED_MORE_BYTES;
+               ip_len = 8;
+               packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1));
+               break;
        default:
                return INTEL_PT_BAD_PACKET;
        }
 
        packet->type = type;
 
-       return packet->count + 1;
+       return ip_len + 1;
 }
 
 static int intel_pt_get_mode(const unsigned char *buf, size_t len,
index 9f3305f6b6d5871ebde2c27f4ca1b2ac59953340..95f0884aae0286078681ecf19546546f9d6fb2a9 100644 (file)
@@ -1,3 +1,4 @@
+#include <sys/sysmacros.h>
 #include <sys/types.h>
 #include <stdio.h>
 #include <stdlib.h>
index 953dc1ab2ed7bd0be442c4491c9c8a4862aa3386..28733962cd80a63e1376b5b71f4771ab8f8857f7 100644 (file)
@@ -170,15 +170,17 @@ static struct map *kernel_get_module_map(const char *module)
                module = "kernel";
 
        for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+               /* short_name is "[module]" */
                if (strncmp(pos->dso->short_name + 1, module,
-                           pos->dso->short_name_len - 2) == 0) {
+                           pos->dso->short_name_len - 2) == 0 &&
+                   module[pos->dso->short_name_len - 2] == '\0') {
                        return pos;
                }
        }
        return NULL;
 }
 
-static struct map *get_target_map(const char *target, bool user)
+struct map *get_target_map(const char *target, bool user)
 {
        /* Init maps of given executable or kernel */
        if (user)
@@ -385,7 +387,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
                if (uprobes)
                        address = sym->start;
                else
-                       address = map->unmap_ip(map, sym->start);
+                       address = map->unmap_ip(map, sym->start) - map->reloc;
                break;
        }
        if (!address) {
@@ -664,22 +666,14 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
        return ret;
 }
 
-/* Post processing the probe events */
-static int post_process_probe_trace_events(struct probe_trace_event *tevs,
-                                          int ntevs, const char *module,
-                                          bool uprobe)
+static int
+post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
+                                      int ntevs)
 {
        struct ref_reloc_sym *reloc_sym;
        char *tmp;
        int i, skipped = 0;
 
-       if (uprobe)
-               return add_exec_to_probe_trace_events(tevs, ntevs, module);
-
-       /* Note that currently ref_reloc_sym based probe is not for drivers */
-       if (module)
-               return add_module_to_probe_trace_events(tevs, ntevs, module);
-
        reloc_sym = kernel_get_ref_reloc_sym();
        if (!reloc_sym) {
                pr_warning("Relocated base symbol is not found!\n");
@@ -711,6 +705,34 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
        return skipped;
 }
 
+void __weak
+arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unused,
+                                     int ntevs __maybe_unused)
+{
+}
+
+/* Post processing the probe events */
+static int post_process_probe_trace_events(struct perf_probe_event *pev,
+                                          struct probe_trace_event *tevs,
+                                          int ntevs, const char *module,
+                                          bool uprobe)
+{
+       int ret;
+
+       if (uprobe)
+               ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
+       else if (module)
+               /* Currently ref_reloc_sym based probe is not for drivers */
+               ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+       else
+               ret = post_process_kernel_probe_trace_events(tevs, ntevs);
+
+       if (ret >= 0)
+               arch__post_process_probe_trace_events(pev, ntevs);
+
+       return ret;
+}
+
 /* Try to find perf_probe_event with debuginfo */
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                                          struct probe_trace_event **tevs)
@@ -749,7 +771,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
 
        if (ntevs > 0) {        /* Succeeded to find trace events */
                pr_debug("Found %d probe_trace_events.\n", ntevs);
-               ret = post_process_probe_trace_events(*tevs, ntevs,
+               ret = post_process_probe_trace_events(pev, *tevs, ntevs,
                                                pev->target, pev->uprobes);
                if (ret < 0 || ret == ntevs) {
                        clear_probe_trace_events(*tevs, ntevs);
@@ -2936,8 +2958,6 @@ errout:
        return err;
 }
 
-bool __weak arch__prefers_symtab(void) { return false; }
-
 /* Concatinate two arrays */
 static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b)
 {
@@ -3158,12 +3178,6 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
        if (ret > 0 || pev->sdt)        /* SDT can be found only in the cache */
                return ret == 0 ? -ENOENT : ret; /* Found in probe cache */
 
-       if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
-               ret = find_probe_trace_events_from_map(pev, tevs);
-               if (ret > 0)
-                       return ret; /* Found in symbol table */
-       }
-
        /* Convert perf_probe_event with debuginfo */
        ret = try_to_find_probe_trace_events(pev, tevs);
        if (ret != 0)
index e18ea9fe63857cb7a9b382dac563fd2c8cdfbd85..f4f45db77c1c1ec59c3ee505f525f2b2561530ee 100644 (file)
@@ -158,7 +158,6 @@ int show_line_range(struct line_range *lr, const char *module, bool user);
 int show_available_vars(struct perf_probe_event *pevs, int npevs,
                        struct strfilter *filter);
 int show_available_funcs(const char *module, struct strfilter *filter, bool user);
-bool arch__prefers_symtab(void);
 void arch__fix_tev_from_maps(struct perf_probe_event *pev,
                             struct probe_trace_event *tev, struct map *map,
                             struct symbol *sym);
@@ -173,4 +172,9 @@ int e_snprintf(char *str, size_t size, const char *format, ...)
 int copy_to_probe_trace_arg(struct probe_trace_arg *tvar,
                            struct perf_probe_arg *pvar);
 
+struct map *get_target_map(const char *target, bool user);
+
+void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
+                                          int ntevs);
+
 #endif /*_PROBE_EVENT_H */
index 9aed9c332da656c69d91693e531df87fb03007b1..9c3b9ed5b3c3ec68ee175b6bec91d1c90211798b 100644 (file)
@@ -133,7 +133,7 @@ int probe_file__open_both(int *kfd, int *ufd, int flag)
 /* Get raw string list of current kprobe_events  or uprobe_events */
 struct strlist *probe_file__get_rawlist(int fd)
 {
-       int ret, idx;
+       int ret, idx, fddup;
        FILE *fp;
        char buf[MAX_CMDLEN];
        char *p;
@@ -143,8 +143,17 @@ struct strlist *probe_file__get_rawlist(int fd)
                return NULL;
 
        sl = strlist__new(NULL, NULL);
+       if (sl == NULL)
+               return NULL;
+
+       fddup = dup(fd);
+       if (fddup < 0)
+               goto out_free_sl;
+
+       fp = fdopen(fddup, "r");
+       if (!fp)
+               goto out_close_fddup;
 
-       fp = fdopen(dup(fd), "r");
        while (!feof(fp)) {
                p = fgets(buf, MAX_CMDLEN, fp);
                if (!p)
@@ -156,13 +165,21 @@ struct strlist *probe_file__get_rawlist(int fd)
                ret = strlist__add(sl, buf);
                if (ret < 0) {
                        pr_debug("strlist__add failed (%d)\n", ret);
-                       strlist__delete(sl);
-                       return NULL;
+                       goto out_close_fp;
                }
        }
        fclose(fp);
 
        return sl;
+
+out_close_fp:
+       fclose(fp);
+       goto out_free_sl;
+out_close_fddup:
+       close(fddup);
+out_free_sl:
+       strlist__delete(sl);
+       return NULL;
 }
 
 static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
@@ -447,12 +464,17 @@ static int probe_cache__load(struct probe_cache *pcache)
 {
        struct probe_cache_entry *entry = NULL;
        char buf[MAX_CMDLEN], *p;
-       int ret = 0;
+       int ret = 0, fddup;
        FILE *fp;
 
-       fp = fdopen(dup(pcache->fd), "r");
-       if (!fp)
+       fddup = dup(pcache->fd);
+       if (fddup < 0)
+               return -errno;
+       fp = fdopen(fddup, "r");
+       if (!fp) {
+               close(fddup);
                return -EINVAL;
+       }
 
        while (!feof(fp)) {
                if (!fgets(buf, MAX_CMDLEN, fp))
index f2d9ff064e2de720247e77f9645faf94b6b4ce57..5c290c682afe7176607fe01f4d29742b1821f1a9 100644 (file)
@@ -297,10 +297,13 @@ static int convert_variable_type(Dwarf_Die *vr_die,
        char sbuf[STRERR_BUFSIZE];
        int bsize, boffs, total;
        int ret;
+       char sign;
 
        /* TODO: check all types */
-       if (cast && strcmp(cast, "string") != 0) {
+       if (cast && strcmp(cast, "string") != 0 &&
+           strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) {
                /* Non string type is OK */
+               /* and respect signedness cast */
                tvar->type = strdup(cast);
                return (tvar->type == NULL) ? -ENOMEM : 0;
        }
@@ -361,6 +364,13 @@ static int convert_variable_type(Dwarf_Die *vr_die,
                return (tvar->type == NULL) ? -ENOMEM : 0;
        }
 
+       if (cast && (strcmp(cast, "u") == 0))
+               sign = 'u';
+       else if (cast && (strcmp(cast, "s") == 0))
+               sign = 's';
+       else
+               sign = die_is_signed_type(&type) ? 's' : 'u';
+
        ret = dwarf_bytesize(&type);
        if (ret <= 0)
                /* No size ... try to use default type */
@@ -373,8 +383,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
                        dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
                ret = MAX_BASIC_TYPE_BITS;
        }
-       ret = snprintf(buf, 16, "%c%d",
-                      die_is_signed_type(&type) ? 's' : 'u', ret);
+       ret = snprintf(buf, 16, "%c%d", sign, ret);
 
 formatted:
        if (ret < 0 || ret >= 16) {
index 947d21f3839838c433430b01fe52165522f87295..3d3cb8392c86029bb488f737564730e0cd8995bc 100644 (file)
@@ -588,7 +588,11 @@ static char *get_trace_output(struct hist_entry *he)
        } else {
                pevent_event_info(&seq, evsel->tp_format, &rec);
        }
-       return seq.buffer;
+       /*
+        * Trim the buffer, it starts at 4KB and we're not going to
+        * add anything more to this buffer.
+        */
+       return realloc(seq.buffer, seq.len + 1);
 }
 
 static int64_t
index a34321e9b44d8a42c1f4839b844b15e196a6417b..a811c13a74d663ac40efdf03333145450e7d7c19 100644 (file)
@@ -837,7 +837,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
        sec = syms_ss->symtab;
        shdr = syms_ss->symshdr;
 
-       if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
+       if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
+                               ".text", NULL))
                dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
 
        if (runtime_ss->opdsec)
index cf5e250bc78e1a4c82a08fc2ea50d5876bec1c69..783a53fb7a4ed41c2fef9e90de26355e6b1bcc7e 100644 (file)
@@ -66,7 +66,7 @@ static int entry(u64 ip, struct unwind_info *ui)
        if (__report_module(&al, ip, ui))
                return -1;
 
-       e->ip  = ip;
+       e->ip  = al.addr;
        e->map = al.map;
        e->sym = al.sym;
 
index 97c0f8fc55615961df9cd1a70f0c94125860d39a..20c2e5743903872771ae975b340b63aec521c3d3 100644 (file)
@@ -542,7 +542,7 @@ static int entry(u64 ip, struct thread *thread,
        thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
                                   MAP__FUNCTION, ip, &al);
 
-       e.ip = ip;
+       e.ip = al.addr;
        e.map = al.map;
        e.sym = al.sym;
 
index 5404efa578a3fcea18ce5bbab2a991e0c3d98b73..dd48f421844c7902773526d8c0845109ab9c5e55 100644 (file)
@@ -13,6 +13,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
 #include <linux/libnvdimm.h>
 #include <linux/vmalloc.h>
 #include <linux/device.h>
@@ -1474,6 +1475,7 @@ static int nfit_test_probe(struct platform_device *pdev)
        if (nfit_test->setup != nfit_test0_setup)
                return 0;
 
+       flush_work(&acpi_desc->work);
        nfit_test->setup_hotplug = 1;
        nfit_test->setup(nfit_test);
 
index 3b530467148ec44d4b329e5bece6c5d9b9c056d4..9d0919ed52a4088d237ba47aa00219e1e1dee47b 100644 (file)
@@ -1,5 +1,5 @@
 
-CFLAGS += -I. -g -Wall -D_LGPL_SOURCE
+CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE
 LDFLAGS += -lpthread -lurcu
 TARGETS = main
 OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
index 39d9b9568fe2291661e8ea49879670af5dc1a27d..05d7bc4889711d44d57d9b58224b21cd079b6c45 100644 (file)
@@ -124,6 +124,8 @@ static void multiorder_check(unsigned long index, int order)
        unsigned long i;
        unsigned long min = index & ~((1UL << order) - 1);
        unsigned long max = min + (1UL << order);
+       void **slot;
+       struct item *item2 = item_create(min);
        RADIX_TREE(tree, GFP_KERNEL);
 
        printf("Multiorder index %ld, order %d\n", index, order);
@@ -139,13 +141,19 @@ static void multiorder_check(unsigned long index, int order)
                item_check_absent(&tree, i);
        for (i = max; i < 2*max; i++)
                item_check_absent(&tree, i);
+       for (i = min; i < max; i++)
+               assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
+
+       slot = radix_tree_lookup_slot(&tree, index);
+       free(*slot);
+       radix_tree_replace_slot(slot, item2);
        for (i = min; i < max; i++) {
-               static void *entry = (void *)
-                                       (0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY);
-               assert(radix_tree_insert(&tree, i, entry) == -EEXIST);
+               struct item *item = item_lookup(&tree, i);
+               assert(item != 0);
+               assert(item->index == min);
        }
 
-       assert(item_delete(&tree, index) != 0);
+       assert(item_delete(&tree, min) != 0);
 
        for (i = 0; i < 2*max; i++)
                item_check_absent(&tree, i);
index 3c40c9d0e6c70a87b83c9d92a61f0fc0f2f4570c..1cc6d64c39b709dd28f104ced47829d7427c435f 100644 (file)
@@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc)
 
 GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
 
-CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
+CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
 
 export CFLAGS
 
index 4f93af89ae165af7f82ced48e57c00460e6ce34d..18601f6689b9e13de81f4926423c7b88cdc44bdf 100644 (file)
@@ -14,4 +14,20 @@ enum dma_data_direction {
        DMA_NONE = 3,
 };
 
+#define dma_alloc_coherent(d, s, hp, f) ({ \
+       void *__dma_alloc_coherent_p = kmalloc((s), (f)); \
+       *(hp) = (unsigned long)__dma_alloc_coherent_p; \
+       __dma_alloc_coherent_p; \
+})
+
+#define dma_free_coherent(d, s, p, h) kfree(p)
+
+#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
+
+#define dma_map_single(d, p, s, dir) (virt_to_phys(p))
+#define dma_mapping_error(...) (0)
+
+#define dma_unmap_single(...) do { } while (0)
+#define dma_unmap_page(...) do { } while (0)
+
 #endif
index 0338499482159883bb3e30452d5afc0f77af577e..d9554fc3f3403c2adef3d883fb767cabaa82015e 100644 (file)
@@ -20,7 +20,9 @@
 
 #define PAGE_SIZE getpagesize()
 #define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
 
+typedef unsigned long long phys_addr_t;
 typedef unsigned long long dma_addr_t;
 typedef size_t __kernel_size_t;
 typedef unsigned int __wsum;
@@ -57,6 +59,11 @@ static inline void *kzalloc(size_t s, gfp_t gfp)
        return p;
 }
 
+static inline void *alloc_pages_exact(size_t s, gfp_t gfp)
+{
+       return kmalloc(s, gfp);
+}
+
 static inline void kfree(void *p)
 {
        if (p >= __kfree_ignore_start && p < __kfree_ignore_end)
@@ -64,6 +71,11 @@ static inline void kfree(void *p)
        free(p);
 }
 
+static inline void free_pages_exact(void *p, size_t s)
+{
+       kfree(p);
+}
+
 static inline void *krealloc(void *p, size_t s, gfp_t gfp)
 {
        return realloc(p, s);
@@ -105,6 +117,8 @@ static inline void free_page(unsigned long addr)
 #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
 #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
 
+#define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n"))
+
 #define min(x, y) ({                           \
        typeof(x) _min1 = (x);                  \
        typeof(y) _min2 = (y);                  \
index 81baeac8ae40249728e1bd40188da4d09bfc854b..7e1c1197d4390ede3a3ec6447256ee09ebf38600 100644 (file)
@@ -1,2 +1,6 @@
 #ifndef LINUX_SLAB_H
+#define GFP_KERNEL 0
+#define GFP_ATOMIC 0
+#define __GFP_NOWARN 0
+#define __GFP_ZERO 0
 #endif
index ee125e714053a91a76658d417a46232349927450..9377c8b4ac167723de43088e96e5b5f0effcf4b2 100644 (file)
@@ -3,8 +3,12 @@
 #include <linux/scatterlist.h>
 #include <linux/kernel.h>
 
+struct device {
+       void *parent;
+};
+
 struct virtio_device {
-       void *dev;
+       struct device dev;
        u64 features;
 };
 
index 57a6964a1e355b8daa154adff9bd007c93f8374f..9ba11815e0a16b93ec0e04735a3486ac9dc6bfe4 100644 (file)
@@ -40,6 +40,19 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev,
 #define virtio_has_feature(dev, feature) \
        (__virtio_test_bit((dev), feature))
 
+/**
+ * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * @vdev: the device
+ */
+static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+{
+       /*
+        * Note the reverse polarity of the quirk feature (compared to most
+        * other features), this is for compatibility with legacy systems.
+        */
+       return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+}
+
 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
 {
        return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
index 68e4f9f0da3abe48ea6e91cb3c8ce215bbd118c5..bd2ad1d3b7a9ef88e28e1ad982dd37638841fa04 100644 (file)
@@ -13,6 +13,7 @@
 #define cache_line_size() SMP_CACHE_BYTES
 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
 #define unlikely(x)    (__builtin_expect(!!(x), 0))
+#define likely(x)    (__builtin_expect(!!(x), 1))
 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
 typedef pthread_spinlock_t  spinlock_t;
 
index 4fde8c7dfcfe18ce2a0d96ffb465d177ed590e86..77e6ccf149011b2f3b43ba01aa1454fd08889dc8 100644 (file)
@@ -33,6 +33,7 @@
 static struct timecounter *timecounter;
 static struct workqueue_struct *wqueue;
 static unsigned int host_vtimer_irq;
+static u32 host_vtimer_irq_flags;
 
 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
 {
@@ -365,7 +366,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
 
 static void kvm_timer_init_interrupt(void *info)
 {
-       enable_percpu_irq(host_vtimer_irq, 0);
+       enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 }
 
 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
@@ -432,6 +433,14 @@ int kvm_timer_hyp_init(void)
        }
        host_vtimer_irq = info->virtual_irq;
 
+       host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
+       if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
+           host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
+               kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
+                       host_vtimer_irq);
+               host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
+       }
+
        err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
                                 "kvm guest timer", kvm_get_running_vcpus());
        if (err) {
index fb4b0a79a9502ba9bcaad7acfdbbf692c5ed5208..83777c1cbae0693c14e1d3f23df927c5cae25fef 100644 (file)
@@ -73,12 +73,8 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
        int i, vcpu_lock_idx = -1, ret;
        struct kvm_vcpu *vcpu;
 
-       mutex_lock(&kvm->lock);
-
-       if (irqchip_in_kernel(kvm)) {
-               ret = -EEXIST;
-               goto out;
-       }
+       if (irqchip_in_kernel(kvm))
+               return -EEXIST;
 
        /*
         * This function is also called by the KVM_CREATE_IRQCHIP handler,
@@ -87,10 +83,8 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
         * the proper checks already.
         */
        if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
-               !kvm_vgic_global_state.can_emulate_gicv2) {
-               ret = -ENODEV;
-               goto out;
-       }
+               !kvm_vgic_global_state.can_emulate_gicv2)
+               return -ENODEV;
 
        /*
         * Any time a vcpu is run, vcpu_load is called which tries to grab the
@@ -138,9 +132,6 @@ out_unlock:
                vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
                mutex_unlock(&vcpu->mutex);
        }
-
-out:
-       mutex_unlock(&kvm->lock);
        return ret;
 }
 
index 07411cf967b98767bada3c6287fc1cc69c53a86a..4660a7d04eeaf9792b4df044a092054759977046 100644 (file)
@@ -51,7 +51,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
 
        irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
        if (!irq)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        INIT_LIST_HEAD(&irq->lpi_list);
        INIT_LIST_HEAD(&irq->ap_list);
@@ -441,39 +441,63 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
  * Find the target VCPU and the LPI number for a given devid/eventid pair
  * and make this IRQ pending, possibly injecting it.
  * Must be called with the its_lock mutex held.
+ * Returns 0 on success, a positive error value for any ITS mapping
+ * related errors and negative error values for generic errors.
  */
-static void vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
-                                u32 devid, u32 eventid)
+static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
+                               u32 devid, u32 eventid)
 {
+       struct kvm_vcpu *vcpu;
        struct its_itte *itte;
 
        if (!its->enabled)
-               return;
+               return -EBUSY;
 
        itte = find_itte(its, devid, eventid);
-       /* Triggering an unmapped IRQ gets silently dropped. */
-       if (itte && its_is_collection_mapped(itte->collection)) {
-               struct kvm_vcpu *vcpu;
-
-               vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
-               if (vcpu && vcpu->arch.vgic_cpu.lpis_enabled) {
-                       spin_lock(&itte->irq->irq_lock);
-                       itte->irq->pending = true;
-                       vgic_queue_irq_unlock(kvm, itte->irq);
-               }
-       }
+       if (!itte || !its_is_collection_mapped(itte->collection))
+               return E_ITS_INT_UNMAPPED_INTERRUPT;
+
+       vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
+       if (!vcpu)
+               return E_ITS_INT_UNMAPPED_INTERRUPT;
+
+       if (!vcpu->arch.vgic_cpu.lpis_enabled)
+               return -EBUSY;
+
+       spin_lock(&itte->irq->irq_lock);
+       itte->irq->pending = true;
+       vgic_queue_irq_unlock(kvm, itte->irq);
+
+       return 0;
+}
+
+static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
+{
+       struct vgic_io_device *iodev;
+
+       if (dev->ops != &kvm_io_gic_ops)
+               return NULL;
+
+       iodev = container_of(dev, struct vgic_io_device, dev);
+
+       if (iodev->iodev_type != IODEV_ITS)
+               return NULL;
+
+       return iodev;
 }
 
 /*
  * Queries the KVM IO bus framework to get the ITS pointer from the given
  * doorbell address.
  * We then call vgic_its_trigger_msi() with the decoded data.
+ * According to the KVM_SIGNAL_MSI API description returns 1 on success.
  */
 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 {
        u64 address;
        struct kvm_io_device *kvm_io_dev;
        struct vgic_io_device *iodev;
+       int ret;
 
        if (!vgic_has_its(kvm))
                return -ENODEV;
@@ -485,15 +509,28 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 
        kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
        if (!kvm_io_dev)
-               return -ENODEV;
+               return -EINVAL;
 
-       iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
+       iodev = vgic_get_its_iodev(kvm_io_dev);
+       if (!iodev)
+               return -EINVAL;
 
        mutex_lock(&iodev->its->its_lock);
-       vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
+       ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
        mutex_unlock(&iodev->its->its_lock);
 
-       return 0;
+       if (ret < 0)
+               return ret;
+
+       /*
+        * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
+        * if the guest has blocked the MSI. So we map any LPI mapping
+        * related error to that.
+        */
+       if (ret)
+               return 0;
+       else
+               return 1;
 }
 
 /* Requires the its_lock to be held. */
@@ -502,7 +539,8 @@ static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
        list_del(&itte->itte_list);
 
        /* This put matches the get in vgic_add_lpi. */
-       vgic_put_irq(kvm, itte->irq);
+       if (itte->irq)
+               vgic_put_irq(kvm, itte->irq);
 
        kfree(itte);
 }
@@ -697,6 +735,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
        struct its_device *device;
        struct its_collection *collection, *new_coll = NULL;
        int lpi_nr;
+       struct vgic_irq *irq;
 
        device = find_its_device(its, device_id);
        if (!device)
@@ -710,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
            lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
                return E_ITS_MAPTI_PHYSICALID_OOR;
 
+       /* If there is an existing mapping, behavior is UNPREDICTABLE. */
+       if (find_itte(its, device_id, event_id))
+               return 0;
+
        collection = find_collection(its, coll_id);
        if (!collection) {
                int ret = vgic_its_alloc_collection(its, &collection, coll_id);
@@ -718,22 +761,28 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
                new_coll = collection;
        }
 
-       itte = find_itte(its, device_id, event_id);
+       itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
        if (!itte) {
-               itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
-               if (!itte) {
-                       if (new_coll)
-                               vgic_its_free_collection(its, coll_id);
-                       return -ENOMEM;
-               }
-
-               itte->event_id  = event_id;
-               list_add_tail(&itte->itte_list, &device->itt_head);
+               if (new_coll)
+                       vgic_its_free_collection(its, coll_id);
+               return -ENOMEM;
        }
 
+       itte->event_id  = event_id;
+       list_add_tail(&itte->itte_list, &device->itt_head);
+
        itte->collection = collection;
        itte->lpi = lpi_nr;
-       itte->irq = vgic_add_lpi(kvm, lpi_nr);
+
+       irq = vgic_add_lpi(kvm, lpi_nr);
+       if (IS_ERR(irq)) {
+               if (new_coll)
+                       vgic_its_free_collection(its, coll_id);
+               its_free_itte(kvm, itte);
+               return PTR_ERR(irq);
+       }
+       itte->irq = irq;
+
        update_affinity_itte(kvm, itte);
 
        /*
@@ -981,9 +1030,7 @@ static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
        u32 msi_data = its_cmd_get_id(its_cmd);
        u64 msi_devid = its_cmd_get_deviceid(its_cmd);
 
-       vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
-
-       return 0;
+       return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
 }
 
 /*
@@ -1288,13 +1335,13 @@ void vgic_enable_lpis(struct kvm_vcpu *vcpu)
                its_sync_lpi_pending_table(vcpu);
 }
 
-static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
+static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
 {
        struct vgic_io_device *iodev = &its->iodev;
        int ret;
 
-       if (its->initialized)
-               return 0;
+       if (!its->initialized)
+               return -EBUSY;
 
        if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
                return -ENXIO;
@@ -1311,9 +1358,6 @@ static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
                                      KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
        mutex_unlock(&kvm->slots_lock);
 
-       if (!ret)
-               its->initialized = true;
-
        return ret;
 }
 
@@ -1435,9 +1479,6 @@ static int vgic_its_set_attr(struct kvm_device *dev,
                if (type != KVM_VGIC_ITS_ADDR_TYPE)
                        return -ENODEV;
 
-               if (its->initialized)
-                       return -EBUSY;
-
                if (copy_from_user(&addr, uaddr, sizeof(addr)))
                        return -EFAULT;
 
@@ -1453,7 +1494,9 @@ static int vgic_its_set_attr(struct kvm_device *dev,
        case KVM_DEV_ARM_VGIC_GRP_CTRL:
                switch (attr->attr) {
                case KVM_DEV_ARM_VGIC_CTRL_INIT:
-                       return vgic_its_init_its(dev->kvm, its);
+                       its->initialized = true;
+
+                       return 0;
                }
                break;
        }
@@ -1498,3 +1541,30 @@ int kvm_vgic_register_its_device(void)
        return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
                                       KVM_DEV_TYPE_ARM_VGIC_ITS);
 }
+
+/*
+ * Registers all ITSes with the kvm_io_bus framework.
+ * To follow the existing VGIC initialization sequence, this has to be
+ * done as late as possible, just before the first VCPU runs.
+ */
+int vgic_register_its_iodevs(struct kvm *kvm)
+{
+       struct kvm_device *dev;
+       int ret = 0;
+
+       list_for_each_entry(dev, &kvm->devices, vm_node) {
+               if (dev->ops != &kvm_arm_vgic_its_ops)
+                       continue;
+
+               ret = vgic_register_its_iodev(kvm, dev->private);
+               if (ret)
+                       return ret;
+               /*
+                * We don't need to care about tearing down previously
+                * registered ITSes, as the kvm_io_bus framework removes
+                * them for us if the VM gets destroyed.
+                */
+       }
+
+       return ret;
+}
index ff668e0dd586de477767b796e780597c6736eeca..90d81811fdda0e653e19fe92c640bede9053f617 100644 (file)
@@ -306,16 +306,19 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
 {
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       u64 propbaser = dist->propbaser;
+       u64 old_propbaser, propbaser;
 
        /* Storing a value with LPIs already enabled is undefined */
        if (vgic_cpu->lpis_enabled)
                return;
 
-       propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
-       propbaser = vgic_sanitise_propbaser(propbaser);
-
-       dist->propbaser = propbaser;
+       do {
+               old_propbaser = dist->propbaser;
+               propbaser = old_propbaser;
+               propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
+               propbaser = vgic_sanitise_propbaser(propbaser);
+       } while (cmpxchg64(&dist->propbaser, old_propbaser,
+                          propbaser) != old_propbaser);
 }
 
 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
@@ -331,16 +334,19 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
                                     unsigned long val)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       u64 pendbaser = vgic_cpu->pendbaser;
+       u64 old_pendbaser, pendbaser;
 
        /* Storing a value with LPIs already enabled is undefined */
        if (vgic_cpu->lpis_enabled)
                return;
 
-       pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
-       pendbaser = vgic_sanitise_pendbaser(pendbaser);
-
-       vgic_cpu->pendbaser = pendbaser;
+       do {
+               old_pendbaser = vgic_cpu->pendbaser;
+               pendbaser = old_pendbaser;
+               pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
+               pendbaser = vgic_sanitise_pendbaser(pendbaser);
+       } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
+                          pendbaser) != old_pendbaser);
 }
 
 /*
index 0506543df38a7aeee6c24dbfc60b84d433e6c7d7..9f0dae397d9c818b1d0237076d0bc59020a05950 100644 (file)
@@ -289,6 +289,14 @@ int vgic_v3_map_resources(struct kvm *kvm)
                goto out;
        }
 
+       if (vgic_has_its(kvm)) {
+               ret = vgic_register_its_iodevs(kvm);
+               if (ret) {
+                       kvm_err("Unable to register VGIC ITS MMIO regions\n");
+                       goto out;
+               }
+       }
+
        dist->ready = true;
 
 out:
index e7aeac719e09175514a6a0c4dd349ac6500039a4..e83b7fe4baaed1e1c5bc5f7fd520995002186b61 100644 (file)
@@ -117,17 +117,17 @@ static void vgic_irq_release(struct kref *ref)
 
 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
 {
-       struct vgic_dist *dist;
+       struct vgic_dist *dist = &kvm->arch.vgic;
 
        if (irq->intid < VGIC_MIN_LPI)
                return;
 
-       if (!kref_put(&irq->refcount, vgic_irq_release))
+       spin_lock(&dist->lpi_list_lock);
+       if (!kref_put(&irq->refcount, vgic_irq_release)) {
+               spin_unlock(&dist->lpi_list_lock);
                return;
+       };
 
-       dist = &kvm->arch.vgic;
-
-       spin_lock(&dist->lpi_list_lock);
        list_del(&irq->lpi_list);
        dist->lpi_list_count--;
        spin_unlock(&dist->lpi_list_lock);
index 1d8e21d5c13f589d3e8389829f8e6722b0b0c0ee..6c4625c4636843c551a23d26b6b1d849e26b52cd 100644 (file)
@@ -84,6 +84,7 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu);
 int vgic_v3_probe(const struct gic_kvm_info *info);
 int vgic_v3_map_resources(struct kvm *kvm);
 int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
+int vgic_register_its_iodevs(struct kvm *kvm);
 bool vgic_has_its(struct kvm *kvm);
 int kvm_vgic_register_its_device(void);
 void vgic_enable_lpis(struct kvm_vcpu *vcpu);
@@ -140,6 +141,11 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm,
        return -ENODEV;
 }
 
+static inline int vgic_register_its_iodevs(struct kvm *kvm)
+{
+       return -ENODEV;
+}
+
 static inline bool vgic_has_its(struct kvm *kvm)
 {
        return false;
index cc081ccfcaa3743ca5e0f46dd350bda36bc3ec72..195078225aa5d0c3b3214fd40e0b5f441c5d7518 100644 (file)
@@ -696,6 +696,11 @@ static void kvm_destroy_devices(struct kvm *kvm)
 {
        struct kvm_device *dev, *tmp;
 
+       /*
+        * We do not need to take the kvm->lock here, because nobody else
+        * has a reference to the struct kvm at this point and therefore
+        * cannot access the devices list anyhow.
+        */
        list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
                list_del(&dev->vm_node);
                dev->ops->destroy(dev);
@@ -2832,19 +2837,28 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        dev->ops = ops;
        dev->kvm = kvm;
 
+       mutex_lock(&kvm->lock);
        ret = ops->create(dev, cd->type);
        if (ret < 0) {
+               mutex_unlock(&kvm->lock);
                kfree(dev);
                return ret;
        }
+       list_add(&dev->vm_node, &kvm->devices);
+       mutex_unlock(&kvm->lock);
+
+       if (ops->init)
+               ops->init(dev);
 
        ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
        if (ret < 0) {
                ops->destroy(dev);
+               mutex_lock(&kvm->lock);
+               list_del(&dev->vm_node);
+               mutex_unlock(&kvm->lock);
                return ret;
        }
 
-       list_add(&dev->vm_node, &kvm->devices);
        kvm_get_kvm(kvm);
        cd->fd = ret;
        return 0;